From 7358946bdac5944d32b5ca9802864e78693bbf20 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 2 May 2018 10:47:02 +0200 Subject: [PATCH 01/30] Add a new `_ignored` meta field. (#29658) This adds a new `_ignored` meta field which indexes and stores fields that have been ignored at index time because of the `ignore_malformed` option. It makes malformed documents easier to identify by using `exists` or `term(s)` queries on the `_ignored` field. Closes #29494 --- docs/CHANGELOG.asciidoc | 8 +- docs/reference/mapping/fields.asciidoc | 10 ++ .../mapping/fields/ignored-field.asciidoc | 45 +++++ .../mapping/params/ignore-malformed.asciidoc | 10 ++ .../test/search/200_ignore_malformed.yml | 92 +++++++++++ .../index/fieldvisitor/FieldsVisitor.java | 7 + .../index/mapper/DateFieldMapper.java | 1 + .../index/mapper/GeoPointFieldMapper.java | 2 + .../index/mapper/GeoShapeFieldMapper.java | 1 + .../index/mapper/IgnoredFieldMapper.java | 154 ++++++++++++++++++ .../index/mapper/IpFieldMapper.java | 1 + .../index/mapper/MapperService.java | 2 +- .../index/mapper/NumberFieldMapper.java | 1 + .../index/mapper/ParseContext.java | 37 +++++ .../elasticsearch/indices/IndicesModule.java | 6 +- .../org/elasticsearch/search/SearchHit.java | 10 +- .../index/mapper/DateFieldMapperTests.java | 1 + .../index/mapper/IgnoredFieldTypeTests.java | 29 ++++ .../index/mapper/IpFieldMapperTests.java | 1 + .../index/mapper/NumberFieldMapperTests.java | 1 + .../indices/IndicesModuleTests.java | 9 +- 21 files changed, 418 insertions(+), 10 deletions(-) create mode 100644 docs/reference/mapping/fields/ignored-field.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 20a792d0f78..a880a3c423e 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -71,8 +71,12 @@ written to by an older Elasticsearch after writing to it with a newer Elasticsea [[release-notes-6.4.0]] == {es} 6.4.0 -//[float] -//=== New Features +[float] +=== New Features + +The new <> field allows to know which fields +got ignored at index time because of the <> +option. ({pull}30140[#29658]) [float] === Enhancements diff --git a/docs/reference/mapping/fields.asciidoc b/docs/reference/mapping/fields.asciidoc index 155e23c9e54..d3bc90ab8b3 100644 --- a/docs/reference/mapping/fields.asciidoc +++ b/docs/reference/mapping/fields.asciidoc @@ -40,6 +40,14 @@ can be customised when a mapping type is created. All fields in the document which contain non-null values. +[float] +=== Indexing meta-fields + +<>:: + + All fields in the document that have been ignored at index time because of + <>. + [float] === Routing meta-field @@ -57,6 +65,8 @@ can be customised when a mapping type is created. include::fields/field-names-field.asciidoc[] +include::fields/ignored-field.asciidoc[] + include::fields/id-field.asciidoc[] include::fields/index-field.asciidoc[] diff --git a/docs/reference/mapping/fields/ignored-field.asciidoc b/docs/reference/mapping/fields/ignored-field.asciidoc new file mode 100644 index 00000000000..d2776ea86b2 --- /dev/null +++ b/docs/reference/mapping/fields/ignored-field.asciidoc @@ -0,0 +1,45 @@ +[[mapping-ignored-field]] +=== `_ignored` field + +added[6.4.0] + +The `_ignored` field indexes and stores the names of every field in a document +that has been ignored because it was malformed and +<> was turned on. + +This field is searchable with <>, +<> and <> +queries, and is returned as part of the search hits. + +For instance the below query matches all documents that have one or more fields +that got ignored: + +[source,js] +-------------------------------------------------- +GET _search +{ + "query": { + "exists": { + "field": "_ignored" + } + } +} +-------------------------------------------------- +// CONSOLE + +Similarly, the below query finds all documents whose `@timestamp` field was +ignored at index time: + +[source,js] +-------------------------------------------------- +GET _search +{ + "query": { + "term": { + "_ignored": "@timestamp" + } + } +} +-------------------------------------------------- +// CONSOLE + diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 9a2fef1a23e..be0bdfe4ffa 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -85,3 +85,13 @@ PUT my_index <1> The `number_one` field inherits the index-level setting. <2> The `number_two` field overrides the index-level setting to turn off `ignore_malformed`. + +==== Dealing with malformed fields + +Malformed fields are silently ignored at indexing time when `ignore_malformed` +is turned on. Whenever possible it is recommended to keep the number of +documents that have a malformed field contained, or queries on this field will +become meaningless. Elasticsearch makes it easy to check how many documents +have malformed fields by using `exist` or `term` queries on the special +<> field. + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml new file mode 100644 index 00000000000..e32b1fa1b95 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml @@ -0,0 +1,92 @@ +--- +setup: + - skip: + version: " - 6.99.99" # TODO: change to 6.3.99 after backport to 6.4 + reason: _ignored was added in 6.4.0 + + - do: + indices.create: + index: test + body: + mappings: + _doc: + properties: + my_date: + type: date + ignore_malformed: true + store: true + my_ip: + type: ip + ignore_malformed: true + + - do: + index: + index: test + type: _doc + id: 1 + body: { "my_date": "2018-05-11", "my_ip": ":::1" } + + - do: + index: + index: test + type: _doc + id: 2 + body: { "my_date": "bar", "my_ip": "192.168.1.42" } + + - do: + index: + index: test + type: _doc + id: 3 + body: { "my_date": "bar", "my_ip": "quux" } + + - do: + indices.refresh: {} + +--- +"Exists on _ignored": + + - do: + search: + body: { query: { exists: { "field": "_ignored" } } } + + - length: { hits.hits: 3 } + +--- +"Search on _ignored with term": + + - do: + search: + body: { query: { term: { "_ignored": "my_date" } } } + + - length: { hits.hits: 2 } + +--- +"Search on _ignored with terms": + + - do: + search: + body: { query: { terms: { "_ignored": [ "my_date", "my_ip" ] } } } + + - length: { hits.hits: 3 } + +--- +"_ignored is returned by default": + + - do: + search: + body: { query: { ids: { "values": [ "3" ] } } } + + - length: { hits.hits: 1 } + - length: { hits.hits.0._ignored: 2} + +--- +"_ignored is still returned with explicit list of stored fields": + + - do: + search: + stored_fields: [ "my_date" ] + body: { query: { ids: { "values": [ "3" ] } } } + + - length: { hits.hits: 1 } + - is_true: hits.hits.0._ignored diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index aecbba766f4..0151fc9ec62 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.RoutingFieldMapper; @@ -69,6 +70,12 @@ public class FieldsVisitor extends StoredFieldVisitor { if (requiredFields.remove(fieldInfo.name)) { return Status.YES; } + // Always load _ignored to be explicit about ignored fields + // This works because _ignored is added as the first metadata mapper, + // so its stored fields always appear first in the list. + if (IgnoredFieldMapper.NAME.equals(fieldInfo.name)) { + return Status.YES; + } // All these fields are single-valued so we can stop when the set is // empty return requiredFields.isEmpty() diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 7f1f0b95682..c8360e468d7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -446,6 +446,7 @@ public class DateFieldMapper extends FieldMapper { timestamp = fieldType().parse(dateAsString); } catch (IllegalArgumentException e) { if (ignoreMalformed.value()) { + context.addIgnoredField(fieldType.name()); return; } else { throw e; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 0cd20002170..2ea31f67e29 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -305,6 +305,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper if (ignoreMalformed.value() == false) { throw e; } + context.addIgnoredField(fieldType.name()); } token = context.parser().nextToken(); } @@ -352,6 +353,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper if (ignoreMalformed.value() == false) { throw e; } + context.addIgnoredField(fieldType.name()); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 753d91f7be2..c0158f61c3a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -506,6 +506,7 @@ public class GeoShapeFieldMapper extends FieldMapper { if (ignoreMalformed.value() == false) { throw new MapperParsingException("failed to parse [" + fieldType().name() + "]", e); } + context.addIgnoredField(fieldType.name()); } return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java new file mode 100644 index 00000000000..69f1e36664e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermRangeQuery; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * A field mapper that records fields that have been ignored because they were malformed. + */ +public final class IgnoredFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_ignored"; + + public static final String CONTENT_TYPE = "_ignored"; + + public static class Defaults { + public static final String NAME = IgnoredFieldMapper.NAME; + + public static final MappedFieldType FIELD_TYPE = new IgnoredFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setStored(true); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setName(NAME); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends MetadataFieldMapper.Builder { + + public Builder(MappedFieldType existing) { + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); + } + + @Override + public IgnoredFieldMapper build(BuilderContext context) { + return new IgnoredFieldMapper(context.indexSettings()); + } + } + + public static class TypeParser implements MetadataFieldMapper.TypeParser { + @Override + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { + return new Builder(parserContext.mapperService().fullName(NAME)); + } + + @Override + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + return new IgnoredFieldMapper(indexSettings); + } + } + + public static final class IgnoredFieldType extends TermBasedFieldType { + + public IgnoredFieldType() { + } + + protected IgnoredFieldType(IgnoredFieldType ref) { + super(ref); + } + + @Override + public IgnoredFieldType clone() { + return new IgnoredFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + // This query is not performance sensitive, it only helps assess + // quality of the data, so we may use a slow query. It shouldn't + // be too slow in practice since the number of unique terms in this + // field is bounded by the number of fields in the mappings. + return new TermRangeQuery(name(), null, null, true, true); + } + + } + + private IgnoredFieldMapper(Settings indexSettings) { + super(NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE, indexSettings); + } + + @Override + public void preParse(ParseContext context) throws IOException { + } + + @Override + public void postParse(ParseContext context) throws IOException { + super.parse(context); + } + + @Override + public Mapper parse(ParseContext context) throws IOException { + // done in post-parse + return null; + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + for (String field : context.getIgnoredFields()) { + context.doc().add(new Field(NAME, field, fieldType())); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index fd5dc080011..a8ef46b9306 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -370,6 +370,7 @@ public class IpFieldMapper extends FieldMapper { address = InetAddresses.forString(addressAsString); } catch (IllegalArgumentException e) { if (ignoreMalformed.value()) { + context.addIgnoredField(fieldType.name()); return; } else { throw e; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index c72187b6497..b28ea695f82 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -112,7 +112,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { //also missing, not sure if on purpose. See IndicesModule#getMetadataMappers private static ObjectHashSet META_FIELDS = ObjectHashSet.from( "_id", "_type", "_routing", "_index", - "_size", "_timestamp", "_ttl" + "_size", "_timestamp", "_ttl", IgnoredFieldMapper.NAME ); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(MapperService.class)); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 69793ca89b5..9c327c5294e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -990,6 +990,7 @@ public class NumberFieldMapper extends FieldMapper { numericValue = fieldType().type.parse(parser, coerce.value()); } catch (IllegalArgumentException e) { if (ignoreMalformed.value()) { + context.addIgnoredField(fieldType.name()); return; } else { throw e; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 8c2eda31ca1..b77ffee05ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -29,9 +29,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; public abstract class ParseContext implements Iterable{ @@ -286,6 +289,16 @@ public abstract class ParseContext implements Iterable{ public Iterator iterator() { return in.iterator(); } + + @Override + public void addIgnoredField(String field) { + in.addIgnoredField(field); + } + + @Override + public Collection getIgnoredFields() { + return in.getIgnoredFields(); + } } public static class InternalParseContext extends ParseContext { @@ -319,6 +332,8 @@ public abstract class ParseContext implements Iterable{ private boolean docsReversed = false; + private final Set ignoredFields = new HashSet<>(); + public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, SourceToParse source, XContentParser parser) { this.indexSettings = indexSettings; @@ -453,6 +468,17 @@ public abstract class ParseContext implements Iterable{ public Iterator iterator() { return documents.iterator(); } + + + @Override + public void addIgnoredField(String field) { + ignoredFields.add(field); + } + + @Override + public Collection getIgnoredFields() { + return Collections.unmodifiableCollection(ignoredFields); + } } /** @@ -461,6 +487,17 @@ public abstract class ParseContext implements Iterable{ */ public abstract Iterable nonRootDocuments(); + + /** + * Add the given {@code field} to the set of ignored fields. + */ + public abstract void addIgnoredField(String field); + + /** + * Return the collection of fields that have been ignored so far. + */ + public abstract Collection getIgnoredFields(); + public abstract DocumentMapperParser docMapperParser(); /** diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 92faa0a71fd..6c786763003 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -130,7 +131,10 @@ public class IndicesModule extends AbstractModule { Map builtInMetadataMappers; // Use a LinkedHashMap for metadataMappers because iteration order matters builtInMetadataMappers = new LinkedHashMap<>(); - // ID first so it will be the first stored field to load (so will benefit from "fields: []" early termination + // _ignored first so that we always load it, even if only _id is requested + builtInMetadataMappers.put(IgnoredFieldMapper.NAME, new IgnoredFieldMapper.TypeParser()); + // ID second so it will be the first (if no ignored fields) stored field to load + // (so will benefit from "fields: []" early termination builtInMetadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser()); builtInMetadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser()); builtInMetadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser()); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index da7a42b22e3..bbb88b5fff6 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.ShardId; @@ -444,8 +445,13 @@ public final class SearchHit implements Streamable, ToXContentObject, IterablegetValue()); + } } if (source != null) { XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 9d334cecb70..c19965ac5f7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -192,6 +192,7 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); + assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored")); } public void testChangeFormat() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java new file mode 100644 index 00000000000..4035383893d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +public class IgnoredFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new IgnoredFieldMapper.IgnoredFieldType(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index 28a3a2f16f2..a20c88fe693 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -194,6 +194,7 @@ public class IpFieldMapperTests extends ESSingleNodeTestCase { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); + assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored")); } public void testNullValue() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index 66b90cdca3a..9167c0d5a7d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -255,6 +255,7 @@ public class NumberFieldMapperTests extends AbstractNumericFieldMapperTestCase { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); + assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored")); } public void testRejectNorms() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 3ebb268a7b8..9b88c6ab8f2 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -82,9 +83,9 @@ public class IndicesModuleTests extends ESTestCase { } }); - private static String[] EXPECTED_METADATA_FIELDS = new String[]{IdFieldMapper.NAME, RoutingFieldMapper.NAME, - IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, VersionFieldMapper.NAME, - SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME}; + private static String[] EXPECTED_METADATA_FIELDS = new String[]{IgnoredFieldMapper.NAME, IdFieldMapper.NAME, + RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, + VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME}; public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); @@ -106,7 +107,7 @@ public class IndicesModuleTests extends ESTestCase { greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size())); Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); - assertEquals(IdFieldMapper.NAME, iterator.next()); + assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; while(iterator.hasNext()) { last = iterator.next(); From 231a63fdf83255182357e96000c10a32d83afcad Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 2 May 2018 11:34:15 +0200 Subject: [PATCH 02/30] Remove useless version checks in REST tests. (#30165) Many tests are added with a version check so that they do not run against a version that doesn't have the feature yet. Master is 7.0, so all tests that do not run against 6.0+ can be removed and the version check can be removed on all tests that always run on 6.0+. --- .../test/indices.analyze/10_analyze.yml | 3 - .../test/indices.analyze/10_synonyms.yml | 3 - .../rest-api-spec/test/11_parent_child.yml | 4 - .../rest-api-spec/test/20_parent_join.yml | 8 - .../test/delete_by_query/20_validation.yml | 4 - .../test/multi_cluster/30_field_caps.yml | 4 - .../rest-api-spec/test/bulk/10_basic.yml | 8 - .../test/cat.aliases/10_basic.yml | 8 - .../test/cat.indices/10_basic.yml | 3 - .../rest-api-spec/test/cat.nodes/10_basic.yml | 8 - .../test/cat.repositories/10_basic.yml | 3 - .../test/cat.segments/10_basic.yml | 4 - .../test/cat.shards/10_basic.yml | 7 - .../test/cat.templates/10_basic.yml | 26 --- .../cluster.allocation_explain/10_basic.yml | 12 -- .../test/cluster.put_settings/10_basic.yml | 5 - .../test/cluster.remote_info/10_info.yml | 3 - .../test/cluster.state/10_basic.yml | 4 - .../test/create/30_internal_version.yml | 4 - .../test/create/35_external_version.yml | 4 - .../test/field_caps/10_basic.yml | 12 -- .../test/indices.analyze/10_analyze.yml | 9 - .../test/indices.clear_cache/10_basic.yml | 4 - .../test/indices.create/10_basic.yml | 3 - .../test/indices.delete/10_basic.yml | 24 --- .../test/indices.delete_alias/10_basic.yml | 4 - .../indices.delete_alias/all_path_options.yml | 20 --- .../test/indices.exists_template/10_basic.yml | 4 - .../test/indices.exists_type/10_basic.yml | 6 - .../test/indices.get/10_basic.yml | 7 - .../test/indices.get_alias/10_basic.yml | 24 --- .../indices.get_mapping/20_missing_type.yml | 15 -- .../50_wildcard_expansion.yml | 4 - .../70_legacy_multi_type.yml | 166 ------------------ .../test/indices.get_template/10_basic.yml | 16 -- .../test/indices.open/10_basic.yml | 4 - .../test/indices.open/20_multiple_indices.yml | 12 -- .../test/indices.put_alias/10_basic.yml | 3 - .../indices.put_alias/all_path_options.yml | 12 -- .../test/indices.put_template/10_basic.yml | 24 --- .../test/indices.rollover/10_basic.yml | 8 - .../test/indices.segments/10_basic.yml | 4 - .../test/indices.shrink/20_source_mapping.yml | 4 - .../test/indices.sort/10_basic.yml | 4 - .../test/indices.stats/10_index.yml | 3 - .../test/indices.stats/11_metric.yml | 6 - .../test/indices.stats/20_translog.yml | 3 - .../test/indices.upgrade/10_basic.yml | 16 -- .../rest-api-spec/test/mget/15_ids.yml | 13 +- .../test/msearch/20_typed_keys.yml | 4 - .../test/nodes.stats/10_basic.yml | 3 - .../test/nodes.stats/11_indices_metrics.yml | 6 - .../rest-api-spec/test/scroll/11_clear.yml | 4 - .../test/search.aggregation/10_histogram.yml | 1 - .../test/search.aggregation/20_terms.yml | 33 ---- .../test/search.aggregation/40_range.yml | 6 - .../test/search.aggregation/50_filter.yml | 4 - .../70_adjacency_matrix.yml | 4 - .../test/search.aggregation/80_typed_keys.yml | 3 - .../test/search.aggregation/90_sig_text.yml | 8 - .../test/search.highlight/10_unified.yml | 3 - .../test/search.highlight/20_fvh.yml | 3 - .../test/search/110_field_collapsing.yml | 36 ---- .../test/search/120_batch_reduce_size.yml | 6 - .../search/140_pre_filter_search_shards.yml | 6 - .../search/150_rewrite_on_coordinator.yml | 4 - .../test/search/40_indices_boost.yml | 21 --- .../test/search/90_search_after.yml | 3 - .../test/search_shards/10_basic.yml | 4 - .../test/snapshot.get/10_basic.yml | 4 - .../test/suggest/40_typed_keys.yml | 3 - .../rest-api-spec/test/tasks.get/10_basic.yml | 3 - .../test/multi_cluster/30_field_caps.yml | 4 - .../test/mixed_cluster/20_security.yml | 3 - 74 files changed, 4 insertions(+), 734 deletions(-) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index f8fc3acc02c..611c6703ebc 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -2,9 +2,6 @@ "Custom normalizer with illegal filter in request": # Tests analyze api with normalizer. This is in the analysis-common module # because there are no filters that support multiTermAware - - skip: - version: " - 5.99.99" - reason: normalizer support in 6.0.0 - do: catch: bad_request indices.analyze: diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml index 75dff3c7096..774d30b0b04 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_synonyms.yml @@ -1,9 +1,6 @@ "Synonym filter with char_filter": # Tests analyze with synonym and char_filter. This is in the analysis-common module # because there are no char filters in core. - - skip: - version: " - 5.99.99" - reason: to support synonym same analysis chain were added in 6.0.0 - do: indices.create: index: test_synonym_with_charfilter diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml index 2b7368f9ec4..3936e03f9b0 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml @@ -13,10 +13,6 @@ setup: --- "Parent/child inner hits": - - skip: - version: " - 5.5.99" - reason: parent-join was added in 5.6. - - do: index: index: test diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml index 7eb364ae15b..d0e36dfd360 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/20_parent_join.yml @@ -59,10 +59,6 @@ setup: --- "Test basic": - - skip: - version: " - 5.5.99" - reason: parent-join was added in 5.6 - - do: search: body: { sort: ["join_field", "_id"] } @@ -104,10 +100,6 @@ setup: --- "Test parent_id query": - - skip: - version: " - 5.5.99" - reason: parent-join was added in 5.6. - - do: search: body: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml index 715e81f5ded..89ab990bf9b 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yml @@ -8,10 +8,6 @@ --- "no query fails": - - skip: - version: " - 5.99.99" - reason: explicit query is required since 6.0.0 - - do: catch: /query is missing/ delete_by_query: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml index e4463eb17a0..2144c281e40 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml @@ -1,9 +1,5 @@ --- "Get simple field caps from remote cluster": - - skip: - version: " - 5.4.99" - reason: this uses a new API functionality that has been added in 5.5.0 - - do: indices.create: index: field_caps_index_2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml index 6bc9f0084b7..233ff32b418 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml @@ -25,9 +25,6 @@ --- "Empty _id": - - skip: - version: " - 5.3.0" - reason: empty IDs were not rejected until 5.3.1 - do: bulk: refresh: true @@ -62,11 +59,6 @@ --- "empty action": - - skip: - version: " - 5.4.99" - reason: confusing exception messaged caused by empty object fixed in 5.5.0 - features: ["headers"] - - do: catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ headers: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 12879fa412a..5892077236c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -129,10 +129,6 @@ --- "Multiple alias names": - - skip: - version: " - 5.99.99" - reason: multiple aliases are supported only from 6.0.0 on - - do: indices.create: index: test @@ -265,10 +261,6 @@ --- "Alias sorting": - - skip: - version: " - 5.0.99" - reason: sorting was introduced in 5.1.0 - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yml index 3e900132273..c7eddf42d1b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.indices/10_basic.yml @@ -160,9 +160,6 @@ --- "Test cat indices sort": - - skip: - version: " - 5.0.99" - reason: sorting was introduced in 5.1.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml index e1225ef5da6..7663b693105 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml @@ -60,10 +60,6 @@ --- "Additional disk information": - - skip: - version: " - 5.5.99" - reason: additional disk info added in 5.6.0 - - do: cat.nodes: h: diskAvail,diskTotal,diskUsed,diskUsedPercent @@ -92,10 +88,6 @@ --- "Test cat nodes output with full_id set": - - skip: - version: " - 5.0.0" - reason: The full_id setting was rejected in 5.0.0 see #21266 - - do: cat.nodes: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml index 6d83274726e..ca1a03545e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.repositories/10_basic.yml @@ -46,9 +46,6 @@ --- "Test cat repositories sort": - - skip: - version: " - 5.0.99" - reason: sorting was introduced in 5.1.0 - do: snapshot.create_repository: repository: test_cat_repo_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml index 3a05a9baa75..ba2684dc412 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.segments/10_basic.yml @@ -86,10 +86,6 @@ --- "Test cat segments on closed index behaviour": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 53b9b741bdc..16551ede70b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,8 +1,5 @@ --- "Help": - - skip: - version: " - 5.99.99" - reason: seq no stats were added in 6.0.0 - do: cat.shards: @@ -219,10 +216,6 @@ --- "Test cat shards sort": - - skip: - version: " - 5.0.99" - reason: sorting was introduced in 5.1.0 - - do: indices.create: index: foo diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml index 7dd43e33bec..403b0b740c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -1,8 +1,5 @@ --- "Help": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - do: cat.templates: help: true @@ -17,9 +14,6 @@ --- "No templates": - - skip: - version: " - 5.0.99" - reason: templates were introduced in 5.1.0 - do: cat.templates: {} @@ -31,10 +25,6 @@ --- "Normal templates": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -83,10 +73,6 @@ --- "Filtered templates": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -125,9 +111,6 @@ --- "Column headers": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -161,9 +144,6 @@ --- "Select columns": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -194,9 +174,6 @@ --- "Sort templates": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test @@ -245,9 +222,6 @@ --- "Multiple template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - do: indices.put_template: name: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index e88093c5c11..7dbc57dac8b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -1,8 +1,4 @@ "bad cluster shard allocation explanation request": - - skip: - version: " - 5.5.99" - reason: response status on bad request was changed starting in 5.6.0 - - do: # there aren't any unassigned shards to explain catch: /illegal_argument_exception/ @@ -10,10 +6,6 @@ --- "cluster shard allocation explanation test": - - skip: - version: " - 5.1.99" - reason: explain API response output was changed starting in 5.2.0 - - do: indices.create: index: test @@ -40,10 +32,6 @@ --- "cluster shard allocation explanation test with empty request": - - skip: - version: " - 5.1.99" - reason: explain API response output was changed starting in 5.2.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml index 9339e5797cf..d801f3aeac8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml @@ -64,11 +64,6 @@ --- "Test get a default settings": -# this can't be bumped to 5.0.2 until snapshots are published - - skip: - version: " - 5.0.3" - reason: Fetching default group setting was buggy until 5.0.3 - - do: cluster.get_settings: include_defaults: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml index 4ba67f4ab0a..e11eff2b78a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.remote_info/10_info.yml @@ -1,8 +1,5 @@ --- "Get an empty remote info": - - skip: - version: " - 5.3.99" - reason: this API doesn't exist in 5.3.x yet - do: cluster.remote_info: {} - is_true: '' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml index ae9637c08dd..ceed71c18e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml @@ -7,10 +7,6 @@ --- "get cluster state returns cluster state size with human readable format": - - skip: - version: " - 5.99.99" - reason: "cluster state size is only available in v6.0.0 and higher" - - do: cluster.state: human: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml index afd5ea134fe..83772828bc8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yml @@ -21,10 +21,6 @@ --- "Internal versioning with explicit version": - - skip: - version: " - 5.1.1" - reason: validation logic only fixed from 5.1.2 onwards - - do: catch: bad_request create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml index ac1f1adcc94..cb8c041d710 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yml @@ -1,10 +1,6 @@ --- "External version": - - skip: - version: " - 5.1.1" - reason: validation logic only fixed from 5.1.2 onwards - - do: catch: bad_request create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml index f22afb91169..39320d12136 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yml @@ -75,9 +75,6 @@ setup: --- "Get simple field caps": - - skip: - version: " - 5.3.99" - reason: this uses a new API that has been added in 5.4.0 - do: field_caps: @@ -116,9 +113,6 @@ setup: - is_false: fields.geo.keyword.on_aggregatable_indices --- "Get nested field caps": - - skip: - version: " - 5.3.99" - reason: this uses a new API that has been added in 5.4.0 - do: field_caps: @@ -147,9 +141,6 @@ setup: - is_false: fields.object\.nested2.keyword.non_searchable_indices --- "Get prefix field caps": - - skip: - version: " - 5.3.99" - reason: this uses a new API that has been added in 5.4.0 - do: field_caps: @@ -168,9 +159,6 @@ setup: --- "Mix in non-existing field field caps": - - skip: - version: " - 5.4.0" - reason: "#24504 fixed a bug in this API in 5.4.1" - do: field_caps: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 3429de6ed45..824c48c8d99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -58,9 +58,6 @@ --- "Custom filter in request": - - skip: - version: " - 5.99.99" - reason: token filter name changed in 6.0, so this needs to be skipped on mixed clusters - do: indices.analyze: body: @@ -81,9 +78,6 @@ --- "Synonym filter with tokenizer": - - skip: - version: " - 5.99.99" - reason: to support synonym same analysis chain were added in 6.0.0 - do: indices.create: index: test_synonym @@ -114,9 +108,6 @@ --- "Custom normalizer in request": - - skip: - version: " - 5.99.99" - reason: normalizer support in 6.0.0 - do: indices.analyze: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml index e1c4cac866e..099226e41e6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yml @@ -5,10 +5,6 @@ --- "clear_cache with request set to false": - - skip: - version: " - 5.3.99" - reason: this name was added in 5.4 - - do: indices.clear_cache: request: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index b0aad6f64bc..8fafd9ef250 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -32,9 +32,6 @@ --- "Create index": - - skip: - version: " - 5.5.99" - reason: create index response contains index name since 5.6.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml index 783e65001ef..e43c835ae96 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete/10_basic.yml @@ -10,9 +10,6 @@ setup: index: index2 --- "Delete index against alias": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: catch: bad_request indices.delete: @@ -24,9 +21,6 @@ setup: - is_true: index2 --- "Delete index against alias - ignore unavailable": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: indices.delete: index: alias @@ -38,9 +32,6 @@ setup: - is_true: index2 --- "Delete index against alias - multiple indices": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: catch: bad_request indices.delete: @@ -52,9 +43,6 @@ setup: - is_true: index2 --- "Delete index against alias - ignore unavailable - multiple indices": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: indices.delete: index: alias,index2 @@ -67,9 +55,6 @@ setup: - is_false: index2 --- "Delete index against wildcard matching alias": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: indices.delete: index: alia* @@ -80,9 +65,6 @@ setup: - is_true: index2 --- "Delete index against wildcard matching alias - disallow no indices": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: catch: missing indices.delete: @@ -95,9 +77,6 @@ setup: - is_true: index2 --- "Delete index against wildcard matching alias - multiple indices": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: indices.delete: index: alia*,index2 @@ -109,9 +88,6 @@ setup: - is_false: index2 --- "Delete index against wildcard matching alias - disallow no indices - multiple indices": - - skip: - version: " - 5.99.0" - reason: delete index doesn't support aliases only from 6.0.0 on - do: catch: missing indices.delete: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml index e9645e69a53..74684901579 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/10_basic.yml @@ -1,9 +1,5 @@ --- "Basic test for delete alias": - - skip: - version: " - 5.4.99" - reason: Previous versions did not 404 on missing aliases - - do: indices.create: index: testind diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml index 2882250519c..d1d01cbaaa7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.delete_alias/all_path_options.yml @@ -84,10 +84,6 @@ setup: --- "check delete with index list": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.delete_alias: index: "test_index1,test_index2" @@ -110,10 +106,6 @@ setup: --- "check delete with prefix* index": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.delete_alias: index: "test_*" @@ -137,10 +129,6 @@ setup: --- "check delete with index list and * aliases": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.delete_alias: index: "test_index1,test_index2" @@ -164,10 +152,6 @@ setup: --- "check delete with index list and _all aliases": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.delete_alias: index: "test_index1,test_index2" @@ -191,10 +175,6 @@ setup: --- "check delete with index list and wildcard aliases": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.delete_alias: index: "test_index1,test_index2" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml index 0dd1a452548..67592a013e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_template/10_basic.yml @@ -7,10 +7,6 @@ setup: --- "Test indices.exists_template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.exists_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml index 813868e2201..f9b46aa800e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.exists_type/10_basic.yml @@ -1,17 +1,11 @@ --- "Exists type": - - skip: - # this will only run in a mixed cluster environment with at least 1 5.x node - version: "5.99.99 - " - reason: multiple types are not supported on 6.x indices onwards - - do: indices.create: index: test_1 body: mappings: type_1: {} - type_2: {} - do: indices.exists_type: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml index e30af208aeb..6301087f489 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -137,10 +137,6 @@ setup: --- "Should return test_index_3 if expand_wildcards=closed": - - skip: - version: " - 2.0.0" - reason: Requires fix for issue 7258 - - do: indices.get: index: test_index_* @@ -162,9 +158,6 @@ setup: --- "Should return an exception when querying invalid indices": - - skip: - version: " - 5.99.99" - reason: "bad request logic added in 6.0.0" - do: catch: bad_request diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 4e3861b059b..2c5419589ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -42,10 +42,6 @@ setup: --- "Get aliases via /_all/_alias/": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.create: index: myindex @@ -62,10 +58,6 @@ setup: --- "Get aliases via /*/_alias/": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.create: index: myindex @@ -82,10 +74,6 @@ setup: --- "Get and index with no aliases via /{index}/_alias/": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.create: index: myindex @@ -222,10 +210,6 @@ setup: --- "Non-existent alias on an existing index returns 404": - - skip: - version: " - 5.4.99" - reason: Previous versions did not 404 on missing aliases - - do: catch: missing indices.get_alias: @@ -237,10 +221,6 @@ setup: --- "Existent and non-existent alias returns 404 and the existing alias": - - skip: - version: " - 5.4.99" - reason: Previous versions did not 404 on missing aliases - - do: catch: missing indices.get_alias: @@ -253,10 +233,6 @@ setup: --- "Existent and non-existent aliases returns 404 and the existing alias": - - skip: - version: " - 5.4.99" - reason: Previous versions did not 404 on missing aliases - - do: catch: missing indices.get_alias: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml index bb990296f7c..eecf1786e00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/20_missing_type.yml @@ -1,8 +1,5 @@ --- "Non-existent type returns 404": - - skip: - version: " - 5.5.99" - reason: Previous versions did not 404 on missing types - do: indices.create: index: test_index @@ -25,9 +22,6 @@ --- "No type matching pattern returns 404": - - skip: - version: " - 5.5.99" - reason: Previous versions did not 404 on missing types - do: indices.create: index: test_index @@ -51,9 +45,6 @@ --- "Existent and non-existent type returns 404 and the existing type": - - skip: - version: " - 5.5.99" - reason: Previous versions did not 404 on missing types - do: indices.create: index: test_index @@ -77,9 +68,6 @@ --- "Existent and non-existent types returns 404 and the existing type": - - skip: - version: " - 5.5.99" - reason: Previous versions did not 404 on missing types - do: indices.create: index: test_index @@ -103,9 +91,6 @@ --- "Type missing when no types exist": - - skip: - version: " - 5.0.2" - reason: there was a bug prior to 5.0.2 - do: catch: missing indices.get_mapping: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml index 224cc3ec225..a0552f395ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml @@ -94,10 +94,6 @@ setup: --- "Get test-* with wildcard_expansion=none": - - skip: - version: " - 5.99.99" - reason: this was a breaking change in 6.0 - - do: catch: missing indices.get_mapping: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml deleted file mode 100644 index 9334afc4d76..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_legacy_multi_type.yml +++ /dev/null @@ -1,166 +0,0 @@ ---- -setup: - - - skip: - # this will only run in a mixed cluster environment with at least 1 5.x node - version: "5.99.99 - " - reason: multiple types are not supported on 6.x indices onwards - - - do: - indices.create: - index: test_1 - body: - mappings: - type_1: {} - type_2: {} - - do: - indices.create: - index: test_2 - body: - mappings: - type_2: {} - type_3: {} - ---- -"Get /_mapping": - - - do: - indices.get_mapping: {} - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_true: test_2.mappings.type_3 - ---- -"Get /{index}/_mapping": - - - do: - indices.get_mapping: - index: test_1 - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_false: test_2 - - ---- -"Get /{index}/_mapping/_all": - - - do: - indices.get_mapping: - index: test_1 - type: _all - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_false: test_2 - ---- -"Get /{index}/_mapping/*": - - - do: - indices.get_mapping: - index: test_1 - type: '*' - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type}": - - - do: - indices.get_mapping: - index: test_1 - type: type_1 - - - is_false: test_1.mappings.type_2 - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type,type}": - - - do: - indices.get_mapping: - index: test_1 - type: type_1,type_2 - - - is_true: test_1.mappings.type_1 - - is_true: test_1.mappings.type_2 - - is_false: test_2 - ---- -"Get /{index}/_mapping/{type*}": - - - do: - indices.get_mapping: - index: test_1 - type: '*2' - - - is_true: test_1.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2 - ---- -"Get /_mapping/{type}": - - - do: - indices.get_mapping: - type: type_2 - - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 - ---- -"Get /_all/_mapping/{type}": - - - do: - indices.get_mapping: - index: _all - type: type_2 - - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 - ---- -"Get /*/_mapping/{type}": - - - do: - indices.get_mapping: - index: '*' - type: type_2 - - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_1.mappings.type_1 - - is_false: test_2.mappings.type_3 - ---- -"Get /index,index/_mapping/{type}": - - - do: - indices.get_mapping: - index: test_1,test_2 - type: type_2 - - - is_true: test_1.mappings.type_2 - - is_true: test_2.mappings.type_2 - - is_false: test_2.mappings.type_3 - ---- -"Get /index*/_mapping/{type}": - - - do: - indices.get_mapping: - index: '*2' - type: type_2 - - - is_true: test_2.mappings.type_2 - - is_false: test_1 - - is_false: test_2.mappings.type_3 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml index 0746c9e805a..a03a10c1a5a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_template/10_basic.yml @@ -11,10 +11,6 @@ setup: --- "Get template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.get_template: name: test @@ -25,10 +21,6 @@ setup: --- "Get all templates": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test2 @@ -46,10 +38,6 @@ setup: --- "Get template with local flag": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.get_template: name: test @@ -60,10 +48,6 @@ setup: --- "Get template with flat settings and master timeout": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.get_template: name: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml index 2fa6b34681b..8b46aec04b2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -1,9 +1,5 @@ --- "Basic test for index open/close": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml index 944338123d1..1aecbcf37d7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml @@ -23,10 +23,6 @@ setup: --- "All indices": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.close: index: _all @@ -50,10 +46,6 @@ setup: --- "Trailing wildcard": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.close: index: test_* @@ -77,10 +69,6 @@ setup: --- "Only wildcard": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.close: index: '*' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml index 32a5be62765..9978bb219f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/10_basic.yml @@ -30,9 +30,6 @@ --- "Can't create alias with invalid characters": - - skip: - version: " - 5.0.99" - reason: alias name validation was introduced in 5.1.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml index aa030aa5546..bef57bbddf1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_alias/all_path_options.yml @@ -14,10 +14,6 @@ setup: --- "put alias per index": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.put_alias: index: test_index1 @@ -72,10 +68,6 @@ setup: --- "put alias prefix* index": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.put_alias: index: "test_*" @@ -91,10 +83,6 @@ setup: --- "put alias in list of indices": - - skip: - version: " - 5.99.99" - reason: only requested indices are included in 6.x - - do: indices.put_alias: index: "test_index1,test_index2" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml index 3e8b3db468e..b4e66c23c60 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_template/10_basic.yml @@ -1,10 +1,6 @@ --- "Put template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -25,10 +21,6 @@ --- "Put multiple template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -49,10 +41,6 @@ --- "Put template with aliases": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -77,10 +65,6 @@ --- "Put template create": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: test @@ -113,10 +97,6 @@ --- "Test Put Versioned Template": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.put_template: name: "my_template" @@ -214,10 +194,6 @@ --- "Put index template without index_patterns": - - skip: - version: " - 5.99.99" - reason: the error message is updated in v6.0.0 - - do: catch: /index patterns are missing/ indices.put_template: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml index a797edcfa4e..3a2b859a5b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml @@ -76,10 +76,6 @@ --- "Rollover no condition matched": - - skip: - version: " - 5.0.0" - reason: bug fixed in 5.0.1 - # create index with alias - do: indices.create: @@ -108,10 +104,6 @@ --- "Rollover with dry-run but target index exists": - - skip: - version: " - 5.0.0" - reason: bug fixed in 5.0.1 - dry run was returning just fine even if the index exists - # create index with alias - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml index 64d94535a9c..37602774474 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.segments/10_basic.yml @@ -43,10 +43,6 @@ --- "closed segments test": - - skip: - version: " - 5.99.99" - reason: status code on closed indices changed in 6.0.0 from 403 to 400 - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 13e52a2dc80..d96e1dbdcb9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,9 +1,5 @@ --- "Shrink index ignores target template mapping": - - skip: - version: " - 5.99.99" - reason: bug fixed in 5.6.0 - - do: cluster.state: {} # Get master node id diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index 550b868ff49..9281882a70a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -1,10 +1,6 @@ --- "Index Sort": - - skip: - version: " - 5.99.99" - reason: this uses a new feature that has been added in 6.0.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml index a0e131024b6..b70fac8cf04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yml @@ -102,9 +102,6 @@ setup: --- "Indices stats unrecognized parameter": - - skip: - version: " - 5.0.99" - reason: strict stats handling does not exist in 5.0 - do: catch: bad_request indices.stats: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yml index e030a27165d..0f373b7177c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yml @@ -123,9 +123,6 @@ setup: --- "Metric - _all include_segment_file_sizes": - - skip: - version: " - 5.1.1" - reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: indices.stats: { metric: _all, include_segment_file_sizes: true } @@ -148,9 +145,6 @@ setup: --- "Metric - segments include_segment_file_sizes": - - skip: - version: " - 5.1.1" - reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: indices.stats: { metric: segments, include_segment_file_sizes: true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 5c9ec3e597a..15fb7c33f3c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -9,9 +9,6 @@ setup: --- "Translog retention": - - skip: - version: " - 5.99.0" - reason: translog retention was added in 6.0.0 - do: indices.stats: metric: [ translog ] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml index abb30eb970e..55070cb8c1f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.upgrade/10_basic.yml @@ -18,10 +18,6 @@ --- "Upgrade indices ignore unavailable": - - skip: - version: " - 5.0.0" - reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 - - do: indices.create: index: test_index @@ -43,10 +39,6 @@ --- "Upgrade indices allow no indices": - - skip: - version: " - 5.0.0" - reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 - - do: indices.upgrade: index: test_index @@ -58,10 +50,6 @@ --- "Upgrade indices disallow no indices": - - skip: - version: " - 5.0.0" - reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 - - do: catch: missing indices.upgrade: @@ -72,10 +60,6 @@ --- "Upgrade indices disallow unavailable": - - skip: - version: " - 5.0.0" - reason: ignore_unavailable was added as a bugfix in 5.0.1 see #21281 - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml index 3516c2a877a..6c233e4d92a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/15_ids.yml @@ -1,10 +1,5 @@ --- "IDs": - - skip: - # this will only run in a mixed cluster environment with at least 1 5.x node - version: "5.99.99 - " - reason: multiple types are not supported on 6.x indices onwards - - do: indices.create: index: test_1 @@ -19,7 +14,7 @@ - do: index: index: test_1 - type: test_2 + type: test id: 2 body: { foo: baz } @@ -28,7 +23,7 @@ index: test_1 type: test body: - ids: [1, 2] + ids: [1, 3] - is_true: docs.0.found - match: { docs.0._index: test_1 } @@ -40,7 +35,7 @@ - is_false: docs.1.found - match: { docs.1._index: test_1 } - match: { docs.1._type: test } - - match: { docs.1._id: "2" } + - match: { docs.1._id: "3" } - do: mget: @@ -57,7 +52,7 @@ - is_true: docs.1.found - match: { docs.1._index: test_1 } - - match: { docs.1._type: test_2 } + - match: { docs.1._type: test } - match: { docs.1._id: "2" } - match: { docs.1._version: 1 } - match: { docs.1._source: { foo: baz }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml index 360405fd317..7348cd4d14b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/20_typed_keys.yml @@ -1,9 +1,5 @@ --- setup: - - skip: - version: " - 5.3.99" - reason: typed_keys parameter was added in 5.4.0 - - do: indices.create: index: test-0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml index 07f32ff4132..61614e7f8e1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml @@ -23,9 +23,6 @@ --- "Nodes stats unrecognized parameter": - - skip: - version: " - 5.0.99" - reason: strict stats handling does not exist in 5.0 - do: catch: bad_request nodes.stats: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index c9046758b35..998909dd9cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -157,9 +157,6 @@ --- "Metric - _all include_segment_file_sizes": - - skip: - version: " - 5.1.1" - reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: cluster.state: {} @@ -187,9 +184,6 @@ --- "Metric - segments include_segment_file_sizes": - - skip: - version: " - 5.1.1" - reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: cluster.state: {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/11_clear.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/11_clear.yml index c2a026df1d7..4368cf790e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/11_clear.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/11_clear.yml @@ -79,10 +79,6 @@ --- "Body params with string param scroll id override query string": - - skip: - version: " - 5.99.99" - reason: this uses a new API that has been added in 6.0 - - do: indices.create: index: test_scroll diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index 4955dcfb4da..11aaa93aebf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -150,7 +150,6 @@ setup: "Deprecated _time order": - skip: - version: " - 5.99.99" reason: _time order deprecated in 6.0, replaced by _key features: "warnings" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 5ac79a89881..0086b3c5941 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -370,10 +370,6 @@ setup: --- "Partitioned string test": - - skip: - version: " - 5.1.99" - reason: Partitioning is a 5.2.0 feature - - do: index: index: test_1 @@ -429,10 +425,6 @@ setup: --- "Partitioned integer test": - - skip: - version: " - 5.1.99" - reason: Partitioning is a 5.2.0 feature - - do: index: index: test_1 @@ -484,10 +476,6 @@ setup: --- "Unmapped strings": - - skip: - version: " - 5.1.99" - reason: Unmapped fields handling with value_type was added in 5.2 - - do: index: index: test_1 @@ -513,10 +501,6 @@ setup: --- "Unmapped booleans": - - skip: - version: " - 5.1.99" - reason: Unmapped fields handling with value_type was added in 5.2 - - do: index: index: test_1 @@ -544,10 +528,6 @@ setup: --- "Unmapped dates": - - skip: - version: " - 5.1.99" - reason: Unmapped fields handling with value_type was added in 5.2 - - do: index: index: test_1 @@ -575,10 +555,6 @@ setup: --- "Unmapped longs": - - skip: - version: " - 5.1.99" - reason: Unmapped fields handling with value_type was added in 5.2 - - do: index: index: test_1 @@ -604,10 +580,6 @@ setup: --- "Unmapped doubles": - - skip: - version: " - 5.1.99" - reason: Unmapped fields handling with value_type was added in 5.2 - - do: index: index: test_1 @@ -633,10 +605,6 @@ setup: --- "Mixing longs and doubles": - - skip: - version: " - 5.99.99" - reason: in 6.0 longs and doubles are compatible within a terms agg (longs are promoted to doubles) - - do: index: index: test_1 @@ -699,7 +667,6 @@ setup: "Deprecated _term order": - skip: - version: " - 5.99.99" reason: _term order deprecated in 6.0, replaced by _key features: "warnings" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 366243c78ee..9a07e6f8ad5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -112,9 +112,6 @@ setup: --- "IP range": - - skip: - version: " - 5.1.1" - reason: IP range queries had an exclusive range bug prior to 5.1.2 - do: index: index: test @@ -228,9 +225,6 @@ setup: --- "Date range": - - skip: - version: " - 5.99.99" - reason: before 6.0, numeric date_range to/from parameters were always parsed as if they are epoch_millis (#17920) - do: index: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml index a094628ae92..14e83433ff4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/50_filter.yml @@ -34,10 +34,6 @@ setup: --- "Filter aggs with terms lookup and ensure it's cached": # Because the filter agg rewrites the terms lookup in the rewrite phase the request can be cached - - skip: - version: " - 5.99.99" - reason: This using filter aggs that are rewritten, this was added in 6.0.0 - - do: search: size: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml index dc018363bf4..9416f4c3f29 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml @@ -40,10 +40,6 @@ setup: --- "Filters intersections": - - skip: - version: " - 5.2.99" - reason: Adjacency Matrix is a 5.3.0 feature - - do: search: body: { "size": 0, "aggs": { "conns": { "adjacency_matrix": { "filters": { "1": { "term": { "num": 1 } }, "2": { "term": { "num": 2 } }, "4": { "term": { "num": 4 } } } } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 9ea856c856e..841d5cf611b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 5.3.99" - reason: typed_keys parameter was added in 5.4.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml index 6f368463aa0..305623e6f04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml @@ -1,10 +1,6 @@ --- "Default index": - - skip: - version: " - 5.99.99" - reason: this uses a new feature that has been added in 6.0.0 - - do: indices.create: index: goodbad @@ -86,10 +82,6 @@ --- "Dedup noise": - - skip: - version: " - 5.99.99" - reason: this uses a new feature that has been added in 6.0.0 - - do: indices.create: index: goodbad diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml index 644e8c4ec5a..b799fb8f3e0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/10_unified.yml @@ -27,9 +27,6 @@ setup: --- "Basic": - - skip: - version: " - 5.99.99" - reason: this uses a new highlighter that has been added in 5.3 - do: search: body: { "query" : {"multi_match" : { "query" : "quick brown fox", "fields" : [ "text*"] } }, "highlight" : { "type" : "unified", "fields" : { "*" : {} } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml index d4cb980a05c..58590236b87 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/20_fvh.yml @@ -25,9 +25,6 @@ setup: --- "Highlight query": - - skip: - version: " - 5.5.99" - reason: bug fixed in 5.6 - do: search: body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index a7998a0b2f9..2dfd868d66b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -57,10 +57,6 @@ setup: --- "field collapsing": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: search: index: test @@ -92,10 +88,6 @@ setup: --- "field collapsing and from": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: search: index: test @@ -116,10 +108,6 @@ setup: --- "field collapsing and inner_hits": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: search: index: test @@ -159,10 +147,6 @@ setup: --- "field collapsing, inner_hits and maxConcurrentGroupRequests": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: search: index: test @@ -203,10 +187,6 @@ setup: --- "field collapsing and scroll": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: catch: /cannot use \`collapse\` in a scroll context/ search: @@ -218,10 +198,6 @@ setup: --- "field collapsing and search_after": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: catch: /cannot use \`collapse\` in conjunction with \`search_after\`/ search: @@ -234,10 +210,6 @@ setup: --- "field collapsing and rescore": - - skip: - version: " - 5.2.99" - reason: this uses a new API that has been added in 5.3 - - do: catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ search: @@ -255,10 +227,6 @@ setup: --- "no hits and inner_hits": - - skip: - version: " - 5.4.0" - reason: "bug fixed in 5.4.1" - - do: search: index: test @@ -273,10 +241,6 @@ setup: --- "field collapsing and multiple inner_hits": - - skip: - version: " - 5.4.99" - reason: Multiple inner_hits is a new feature added in 5.5 - - do: search: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml index 7444b1bba17..e57dfaa8e93 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/120_batch_reduce_size.yml @@ -14,9 +14,6 @@ setup: --- "batched_reduce_size lower limit": - - skip: - version: " - 5.3.99" - reason: this was added in 5.4.0 - do: catch: /batchedReduceSize must be >= 2/ search: @@ -26,9 +23,6 @@ setup: --- "batched_reduce_size 2 with 5 shards": - - skip: - version: " - 5.3.99" - reason: this was added in 5.4.0 - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index 343808e3374..dc6b130b289 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -39,9 +39,6 @@ setup: --- "pre_filter_shard_size with invalid parameter": - - skip: - version: " - 5.5.99" - reason: this was added in 5.6.0 - do: catch: /preFilterShardSize must be >= 1/ search: @@ -50,9 +47,6 @@ setup: --- "pre_filter_shard_size with shards that have no hit": - - skip: - version: " - 5.5.99" - reason: this was added in 5.6.0 - do: index: index: index_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml index fa161bd7182..deeb18ef848 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/150_rewrite_on_coordinator.yml @@ -1,8 +1,4 @@ "Ensure that we fetch the document only once": - - skip: - version: " - 5.99.99" - reason: this was added in 6.0.0 - - do: indices.create: index: search_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yml index 8271b0583f7..b6793f9e225 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yml @@ -37,7 +37,6 @@ setup: --- "Indices boost using object": - skip: - version: " - 5.1.99" reason: deprecation was added in 5.2.0 features: "warnings" @@ -67,10 +66,6 @@ setup: --- "Indices boost using array": - - skip: - version: " - 5.1.99" - reason: array format was added in 5.2.0 - - do: search: index: _all @@ -93,10 +88,6 @@ setup: --- "Indices boost using array with alias": - - skip: - version: " - 5.1.99" - reason: array format was added in 5.2.0 - - do: search: index: _all @@ -119,10 +110,6 @@ setup: --- "Indices boost using array with wildcard": - - skip: - version: " - 5.1.99" - reason: array format was added in 5.2.0 - - do: search: index: _all @@ -145,10 +132,6 @@ setup: --- "Indices boost using array multiple match": - - skip: - version: " - 5.1.99" - reason: array format was added in 5.2.0 - - do: search: index: _all @@ -173,10 +156,6 @@ setup: --- "Indices boost for nonexistent index/alias": - - skip: - version: " - 5.1.99" - reason: array format was added in 5.2.0 - - do: catch: /no such index/ search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 3392adb50ac..968095ae698 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -30,9 +30,6 @@ setup: --- "search with search_after parameter": - - skip: - version: " - 5.99.99" - reason: fielddata on _id is only available as of 6.0.0 - do: search: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml index c2d341e3439..b95b7c644e2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml @@ -13,10 +13,6 @@ --- "Search shards aliases with and without filters": - - skip: - version: " - 5.4.0" - reason: "#24489 fixed a bug that not all aliases where added in 5.4.1 - indices section was added in 5.1.0" - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml index d17655daad2..aa15ca34ff0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get/10_basic.yml @@ -61,10 +61,6 @@ setup: --- "Get snapshot info when verbose is false": - - skip: - version: " - 5.99.99" - reason: verbose mode was introduced in 6.0 - - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml index 139c972eea6..dffc1fdd770 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml @@ -1,7 +1,4 @@ setup: - - skip: - version: " - 5.3.99" - reason: typed_keys parameter was added in 5.4.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml index fdb62cdce2e..caf97b302f1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml @@ -1,8 +1,5 @@ --- "get task test": - - skip: - version: " - 5.1.99" - reason: massage was changed in 5.2.0 # Note that this gets much better testing in reindex's tests because it actually saves the task - do: cluster.state: {} diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml index 937c0ddec9a..c960f2f1432 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml @@ -39,10 +39,6 @@ teardown: ignore: 404 --- "Get simple field caps from remote cluster": - - skip: - version: " - 5.4.99" - reason: this uses a new API that has been added in 5.5.0 - - do: indices.create: index: field_caps_index_2 diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml index e9189a916bb..750bedc4c6d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml @@ -31,9 +31,6 @@ --- "verify users for default password migration in mixed cluster": - - skip: - version: " - 5.1.1" - reason: "the rest enabled action used by the old cluster test trips an assertion. see https://github.com/elastic/x-pack/pull/4443" - do: xpack.security.get_user: username: "kibana,logstash_system" From 368ddc408f00d4cbad89fe68933d9a2b739f6cf4 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 2 May 2018 11:35:12 +0200 Subject: [PATCH 03/30] Remove MapperService#types(). (#29617) This isn't be necessary with a single type per index. --- .../percolator/PercolateQueryBuilder.java | 6 +----- .../TransportGetFieldMappingsIndexAction.java | 15 +++++++-------- .../index/fieldvisitor/FieldsVisitor.java | 9 ++++----- .../index/get/ShardGetService.java | 19 ++++++------------- .../index/mapper/MapperService.java | 9 --------- .../index/mapper/TypeFieldMapper.java | 17 ++++------------- .../index/query/IdsQueryBuilder.java | 12 +++++------- .../index/query/QueryShardContext.java | 9 ++++----- .../RandomScoreFunctionBuilder.java | 2 +- .../index/mapper/TypeFieldTypeTests.java | 15 +++++++-------- .../index/query/IdsQueryBuilderTests.java | 2 +- .../aggregations/AggregatorTestCase.java | 6 ++++-- 12 files changed, 44 insertions(+), 77 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 902d46c66aa..c1063795193 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -581,11 +581,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder docs = new ArrayList<>(); final DocumentMapper docMapper; final MapperService mapperService = context.getMapperService(); - Collection types = mapperService.types(); - if (types.size() != 1) { - throw new IllegalStateException("Only a single type should exist, but [" + types.size() + " types exists"); - } - String type = types.iterator().next(); + String type = mapperService.documentMapper().type(); if (documentType != null) { DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); if (documentType.equals(type) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index e9551e6e69d..54079592224 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -47,13 +47,11 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; -import java.util.stream.Collectors; import static java.util.Collections.singletonMap; @@ -96,15 +94,16 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc Predicate metadataFieldPredicate = indicesService::isMetaDataField; Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); + DocumentMapper mapper = indexService.mapperService().documentMapper(); Collection typeIntersection; if (request.types().length == 0) { - typeIntersection = indexService.mapperService().types(); - + typeIntersection = mapper == null + ? Collections.emptySet() + : Collections.singleton(mapper.type()); } else { - typeIntersection = indexService.mapperService().types() - .stream() - .filter(type -> Regex.simpleMatch(request.types(), type)) - .collect(Collectors.toCollection(ArrayList::new)); + typeIntersection = mapper != null && Regex.simpleMatch(request.types(), mapper.type()) + ? Collections.singleton(mapper.type()) + : Collections.emptySet(); if (typeIntersection.isEmpty()) { throw new TypeMissingException(shardId.getIndex(), request.types()); } diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 0151fc9ec62..4c65635c61b 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -34,7 +35,6 @@ import org.elasticsearch.index.mapper.Uid; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -84,10 +84,9 @@ public class FieldsVisitor extends StoredFieldVisitor { } public void postProcess(MapperService mapperService) { - final Collection types = mapperService.types(); - assert types.size() <= 1 : types; - if (types.isEmpty() == false) { - type = types.iterator().next(); + final DocumentMapper mapper = mapperService.documentMapper(); + if (mapper != null) { + type = mapper.type(); } for (Map.Entry> entry : fields().entrySet()) { MappedFieldType fieldType = mapperService.fullName(entry.getKey()); diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 50f73944d87..cb5ff580434 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -48,8 +48,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -149,23 +147,18 @@ public final class ShardGetService extends AbstractIndexShardComponent { private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean readFromTranslog) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); - final Collection types; if (type == null || type.equals("_all")) { - types = mapperService.types(); - } else { - types = Collections.singleton(type); + DocumentMapper mapper = mapperService.documentMapper(); + type = mapper == null ? null : mapper.type(); } Engine.GetResult get = null; - for (String typeX : types) { - Term uidTerm = mapperService.createUidTerm(typeX, id); + if (type != null) { + Term uidTerm = mapperService.createUidTerm(type, id); if (uidTerm != null) { - get = indexShard.get(new Engine.Get(realtime, readFromTranslog, typeX, id, uidTerm) + get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm) .version(version).versionType(versionType)); - if (get.exists()) { - type = typeX; - break; - } else { + if (get.exists() == false) { get.release(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b28ea695f82..a06288b67e3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -673,15 +673,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null); } - /** - * Get the set of types. - * @deprecated Indices may have one type at most, use {@link #documentMapper()} instead. - */ - @Deprecated - public Set types() { - return mapper == null ? Collections.emptySet() : Collections.singleton(mapper.type()); - } - /** * Return the document mapper, or {@code null} if no mapping has been put yet. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 36bd4b137cf..5172cb652e2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -114,15 +114,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { return new DocValuesIndexFieldData.Builder(); } else { // means the index has a single type and the type field is implicit - Function typeFunction = mapperService -> { - Collection types = mapperService.types(); - if (types.size() > 1) { - throw new AssertionError(); - } - // If we reach here, there is necessarily one type since we were able to find a `_type` field - String type = types.iterator().next(); - return type; - }; + Function typeFunction = mapperService -> mapperService.documentMapper().type(); return new ConstantIndexFieldData.Builder(typeFunction); } } @@ -144,12 +136,11 @@ public class TypeFieldMapper extends MetadataFieldMapper { @Override public Query termsQuery(List values, QueryShardContext context) { - Collection indexTypes = context.getMapperService().types(); - if (indexTypes.isEmpty()) { + DocumentMapper mapper = context.getMapperService().documentMapper(); + if (mapper == null) { return new MatchNoDocsQuery("No types"); } - assert indexTypes.size() == 1; - BytesRef indexType = indexedValueForSearch(indexTypes.iterator().next()); + BytesRef indexType = indexedValueForSearch(mapper.type()); if (values.stream() .map(this::indexedValueForSearch) .anyMatch(indexType::equals)) { diff --git a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 68c872e9cbb..7cbd38f3398 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -31,9 +31,9 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Uid; import java.io.IOException; import java.util.ArrayList; @@ -163,19 +163,17 @@ public class IdsQueryBuilder extends AbstractQueryBuilder { if (this.ids.isEmpty()) { return Queries.newMatchNoDocsQuery("Missing ids in \"" + this.getName() + "\" query."); } else { + final DocumentMapper mapper = context.getMapperService().documentMapper(); Collection typesForQuery; if (types.length == 0) { typesForQuery = context.queryTypes(); } else if (types.length == 1 && MetaData.ALL.equals(types[0])) { - typesForQuery = context.getMapperService().types(); + typesForQuery = Collections.singleton(mapper.type()); } else { - typesForQuery = new HashSet<>(); - Collections.addAll(typesForQuery, types); + typesForQuery = new HashSet<>(Arrays.asList(types)); } - final Collection mappingTypes = context.getMapperService().types(); - assert mappingTypes.size() == 1; - if (typesForQuery.contains(mappingTypes.iterator().next())) { + if (typesForQuery.contains(mapper.type())) { return idField.termsQuery(new ArrayList<>(ids), context); } else { return new MatchNoDocsQuery("Type mismatch"); diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 32a1f64d37b..6bb69c0cab9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -56,6 +56,7 @@ import org.elasticsearch.transport.RemoteClusterAware; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -262,11 +263,9 @@ public class QueryShardContext extends QueryRewriteContext { */ public Collection queryTypes() { String[] types = getTypes(); - if (types == null || types.length == 0) { - return getMapperService().types(); - } - if (types.length == 1 && types[0].equals("_all")) { - return getMapperService().types(); + if (types == null || types.length == 0 || (types.length == 1 && types[0].equals("_all"))) { + DocumentMapper mapper = getMapperService().documentMapper(); + return mapper == null ? Collections.emptyList() : Collections.singleton(mapper.type()); } return Arrays.asList(types); } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index da2a165258f..c0a13105f84 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -172,7 +172,7 @@ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder types = Collections.emptySet(); - Mockito.when(mapperService.types()).thenReturn(types); + Mockito.when(mapperService.documentMapper()).thenReturn(null); Mockito.when(context.getMapperService()).thenReturn(mapperService); TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); @@ -69,8 +66,9 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { Query query = ft.termQuery("my_type", context); assertEquals(new MatchNoDocsQuery(), query); - types = Collections.singleton("my_type"); - Mockito.when(mapperService.types()).thenReturn(types); + DocumentMapper mapper = Mockito.mock(DocumentMapper.class); + Mockito.when(mapper.type()).thenReturn("my_type"); + Mockito.when(mapperService.documentMapper()).thenReturn(mapper); query = ft.termQuery("my_type", context); assertEquals(new MatchAllDocsQuery(), query); @@ -78,8 +76,9 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { query = ft.termQuery("my_type", context); assertEquals(Queries.newNonNestedFilter(context.indexVersionCreated()), query); - types = Collections.singleton("other_type"); - Mockito.when(mapperService.types()).thenReturn(types); + mapper = Mockito.mock(DocumentMapper.class); + Mockito.when(mapper.type()).thenReturn("other_type"); + Mockito.when(mapperService.documentMapper()).thenReturn(mapper); query = ft.termQuery("my_type", context); assertEquals(new MatchNoDocsQuery(), query); } diff --git a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index dab25b0ce3d..504112b6649 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -84,7 +84,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase || context.getQueryShardContext().fieldMapper(IdFieldMapper.NAME) == null // there are types, but disjoint from the query || (allTypes == false && - Arrays.asList(queryBuilder.types()).indexOf(context.mapperService().types().iterator().next()) == -1)) { + Arrays.asList(queryBuilder.types()).indexOf(context.mapperService().documentMapper().type()) == -1)) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else { assertThat(query, instanceOf(TermInSetQuery.class)); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 1940c824388..010eb1d7cdc 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperService; @@ -60,7 +61,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.mock.orig.Mockito; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase; @@ -129,7 +129,9 @@ public abstract class AggregatorTestCase extends ESTestCase { MapperService mapperService = mapperServiceMock(); when(mapperService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.hasNested()).thenReturn(false); - when(mapperService.types()).thenReturn(Collections.singleton(TYPE_NAME)); + DocumentMapper mapper = mock(DocumentMapper.class); + when(mapper.type()).thenReturn(TYPE_NAME); + when(mapperService.documentMapper()).thenReturn(mapper); when(searchContext.mapperService()).thenReturn(mapperService); IndexFieldDataService ifds = new IndexFieldDataService(indexSettings, new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() { From 5991ede9ef6c428954183f7d88ccb96f30474ce8 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 2 May 2018 11:31:50 +0200 Subject: [PATCH 04/30] Fix docs of the `_ignored` meta field. Relates #29658 --- docs/reference/mapping/fields.asciidoc | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/reference/mapping/fields.asciidoc b/docs/reference/mapping/fields.asciidoc index d3bc90ab8b3..f6d5f00a9b5 100644 --- a/docs/reference/mapping/fields.asciidoc +++ b/docs/reference/mapping/fields.asciidoc @@ -40,9 +40,6 @@ can be customised when a mapping type is created. All fields in the document which contain non-null values. -[float] -=== Indexing meta-fields - <>:: All fields in the document that have been ignored at index time because of From bcdf3d5c618efd746e841773e1681e6552f12208 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 2 May 2018 11:32:18 +0200 Subject: [PATCH 05/30] Post backport of #29658. --- .../rest-api-spec/test/search/200_ignore_malformed.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml index e32b1fa1b95..996501e7d79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_ignore_malformed.yml @@ -1,7 +1,7 @@ --- setup: - skip: - version: " - 6.99.99" # TODO: change to 6.3.99 after backport to 6.4 + version: " - 6.3.99" reason: _ignored was added in 6.4.0 - do: From 5deda6929a3829269b499d6db6d52f7b5de0471f Mon Sep 17 00:00:00 2001 From: Conor Landry Date: Wed, 2 May 2018 05:44:53 -0400 Subject: [PATCH 06/30] [Docs] Clarify `fuzzy_like_this` redirect (#30183) --- docs/reference/redirects.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 1583726421a..a5a8e4d008a 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -245,7 +245,7 @@ as a query in ``query context'' and as a filter in ``filter context'' (see [role="exclude",id="query-dsl-flt-query"] === Fuzzy Like This Query -The `fuzzy_like_this` or `flt` query has been removed. Instead use +The `fuzzy_like_this`, alternatively known as `flt`, query has been removed. Instead use either the <> parameter with the <> or the <>. From ea35a16645ec06a710bf4ef9a01c35839a24a075 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 2 May 2018 12:11:52 +0100 Subject: [PATCH 07/30] Create default ES_TMPDIR on Windows (#30325) If the elasticsearch-env bash script chooses $ES_TMPDIR then it also creates the directory. This change makes elasticsearch-env.bat do the same thing: if %ES_TMPDIR% is chosen by the script then the script will ensure it exists, but if %ES_TMPDIR% is already set then the user is responsible for creating it. Relates #27609 Relates #28217 --- distribution/src/bin/elasticsearch-env.bat | 3 +++ 1 file changed, 3 insertions(+) diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index b0d015924b4..8bd5f24864e 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -58,4 +58,7 @@ set ES_DISTRIBUTION_TYPE=${es.distribution.type} if not defined ES_TMPDIR ( set ES_TMPDIR=!TMP!\elasticsearch + if not exist "!ES_TMPDIR!" ( + mkdir "!ES_TMPDIR!" + ) ) From 916bf9d26d6292dcb9b229123c58a86f0e741ffd Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 08:08:54 -0700 Subject: [PATCH 08/30] Convert server javadoc to html5 (#30279) This commit converts the remaining javadocs in :server using html4 to html5. This was mostly converting `tt` to `{@code}`. --- .../elasticsearch/gradle/BuildPlugin.groovy | 16 ---------- .../gradle/test/ClusterConfiguration.groovy | 2 +- .../queries/ExtendedCommonTermsQuery.java | 2 +- .../elasticsearch/ElasticsearchException.java | 2 +- .../action/ShardOperationFailedException.java | 4 +-- .../elasticsearch/action/ThreadingModel.java | 4 +-- .../cluster/health/ClusterHealthResponse.java | 2 +- .../reroute/ClusterRerouteRequest.java | 4 +-- .../reroute/ClusterRerouteRequestBuilder.java | 6 ++-- .../shards/ClusterSearchShardsRequest.java | 2 +- .../ClusterSearchShardsRequestBuilder.java | 2 +- .../state/ClusterStateRequestBuilder.java | 8 ++--- .../admin/indices/flush/FlushRequest.java | 6 ++-- .../indices/forcemerge/ForceMergeRequest.java | 10 +++---- .../forcemerge/ForceMergeRequestBuilder.java | 6 ++-- .../template/put/PutIndexTemplateRequest.java | 2 +- .../put/PutIndexTemplateRequestBuilder.java | 2 +- .../indices/upgrade/post/UpgradeRequest.java | 4 +-- .../upgrade/post/UpgradeRequestBuilder.java | 2 +- .../action/bulk/BulkItemResponse.java | 4 +-- .../action/bulk/BulkProcessor.java | 8 ++--- .../action/bulk/BulkRequest.java | 4 +-- .../action/bulk/BulkRequestBuilder.java | 4 +-- .../elasticsearch/action/get/GetRequest.java | 10 +++---- .../action/get/GetRequestBuilder.java | 10 +++---- .../action/get/MultiGetItemResponse.java | 2 +- .../action/get/MultiGetRequest.java | 2 +- .../action/get/MultiGetRequestBuilder.java | 6 ++-- .../action/get/MultiGetShardRequest.java | 2 +- .../action/index/IndexRequest.java | 2 +- .../action/index/IndexRequestBuilder.java | 2 +- .../action/index/TransportIndexAction.java | 6 ++-- .../action/search/InitialSearchPhase.java | 2 +- .../action/search/SearchRequest.java | 10 +++---- .../action/search/SearchRequestBuilder.java | 14 ++++----- .../master/AcknowledgedRequestBuilder.java | 2 +- .../replication/ReplicationRequest.java | 4 +-- .../ReplicationRequestBuilder.java | 4 +-- .../InstanceShardOperationRequest.java | 4 +-- .../InstanceShardOperationRequestBuilder.java | 4 +-- .../MultiTermVectorsItemResponse.java | 2 +- .../MultiTermVectorsShardRequest.java | 2 +- .../action/termvectors/TermVectorsFields.java | 2 +- .../termvectors/TermVectorsRequest.java | 2 +- .../TermVectorsRequestBuilder.java | 2 +- .../client/ClusterAdminClient.java | 2 +- .../org/elasticsearch/client/Requests.java | 28 ++++++++--------- .../cluster/block/ClusterBlocks.java | 2 +- .../cluster/metadata/AutoExpandReplicas.java | 2 +- .../cluster/metadata/IndexMetaData.java | 2 +- .../metadata/IndexNameExpressionResolver.java | 4 +-- .../cluster/node/DiscoveryNodes.java | 2 +- .../cluster/routing/IndexRoutingTable.java | 2 +- .../cluster/routing/ShardsIterator.java | 2 +- .../allocator/BalancedShardsAllocator.java | 6 ++-- .../decider/AwarenessAllocationDecider.java | 6 ++-- .../ClusterRebalanceAllocationDecider.java | 8 ++--- .../ConcurrentRebalanceAllocationDecider.java | 6 ++-- .../decider/FilterAllocationDecider.java | 16 +++++----- .../decider/MaxRetryAllocationDecider.java | 4 +-- .../decider/SameShardAllocationDecider.java | 8 ++--- .../decider/ShardsLimitAllocationDecider.java | 6 ++-- .../decider/ThrottlingAllocationDecider.java | 8 ++--- .../common/FieldMemoryStats.java | 2 +- .../common/bytes/BytesReference.java | 4 +-- .../common/collect/CopyOnWriteHashMap.java | 2 +- .../common/component/Lifecycle.java | 8 ++--- .../common/inject/internal/Strings.java | 4 +-- .../common/lease/Releasables.java | 4 +-- .../elasticsearch/common/lucene/Lucene.java | 4 +-- .../lucene/search/MoreLikeThisQuery.java | 2 +- .../common/network/NetworkService.java | 4 +-- .../common/settings/PropertyPlaceholder.java | 2 +- .../common/settings/Setting.java | 6 ++-- .../common/settings/Settings.java | 8 ++--- .../common/unit/ByteSizeUnit.java | 4 +-- .../elasticsearch/common/unit/Fuzziness.java | 2 +- .../common/unit/MemorySizeValue.java | 4 +-- .../elasticsearch/common/util/ArrayUtils.java | 2 +- .../common/util/concurrent/RefCounted.java | 2 +- .../discovery/zen/ElectMasterService.java | 2 +- .../gateway/MetaDataStateFormat.java | 2 +- .../org/elasticsearch/index/IndexModule.java | 2 +- .../elasticsearch/index/IndexSettings.java | 2 +- .../elasticsearch/index/engine/Engine.java | 2 +- .../index/fielddata/FieldData.java | 10 +++---- .../fielddata/ordinals/OrdinalsBuilder.java | 6 ++-- .../index/mapper/ContentPath.java | 2 +- .../index/mapper/MappedFieldType.java | 2 +- .../index/query/AbstractQueryBuilder.java | 2 +- .../index/query/BoolQueryBuilder.java | 10 +++---- .../index/query/CommonTermsQueryBuilder.java | 6 ++-- .../query/MatchPhrasePrefixQueryBuilder.java | 2 +- .../index/query/MatchPhraseQueryBuilder.java | 2 +- .../index/query/MatchQueryBuilder.java | 8 ++--- .../index/query/MoreLikeThisQueryBuilder.java | 16 +++++----- .../index/query/MultiMatchQueryBuilder.java | 8 ++--- .../index/query/QueryBuilder.java | 8 ++--- .../index/query/QueryBuilders.java | 8 ++--- .../index/query/QueryStringQueryBuilder.java | 12 ++++---- .../index/query/RangeQueryBuilder.java | 4 +-- .../elasticsearch/index/query/RegexpFlag.java | 30 +++++++++---------- .../index/query/SimpleQueryStringBuilder.java | 18 +++++------ .../index/query/WildcardQueryBuilder.java | 16 +++++----- .../index/reindex/DeleteByQueryRequest.java | 10 +++---- .../search/SimpleQueryStringQueryParser.java | 2 +- .../index/search/stats/ShardSearchStats.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/shard/InternalIndexingStats.java | 2 +- .../org/elasticsearch/index/store/Store.java | 22 +++++++------- .../index/store/StoreFileMetaData.java | 2 +- .../index/translog/MultiSnapshot.java | 2 +- .../index/translog/Translog.java | 6 ++-- .../index/translog/TranslogConfig.java | 2 +- .../elasticsearch/indices/IndicesService.java | 4 +-- .../java/org/elasticsearch/node/Node.java | 4 +-- .../blobstore/BlobStoreRepository.java | 2 +- .../org/elasticsearch/search/SearchHit.java | 4 +-- .../elasticsearch/search/SearchService.java | 2 +- .../aggregations/AggregatorFactory.java | 4 +-- .../aggregations/InternalAggregation.java | 4 +-- .../bucket/BucketsAggregator.java | 2 +- .../CompositeAggregationBuilder.java | 4 +-- .../cardinality/HyperLogLogPlusPlus.java | 2 +- .../ScriptedMetricAggregationBuilder.java | 24 +++++++-------- .../tophits/TopHitsAggregationBuilder.java | 6 ++-- .../aggregations/support/AggregationPath.java | 2 +- .../search/builder/SearchSourceBuilder.java | 10 +++---- .../highlight/AbstractHighlighterBuilder.java | 24 +++++++-------- .../subphase/highlight/HighlightBuilder.java | 10 +++---- .../subphase/highlight/HighlightField.java | 4 +-- .../search/internal/ContextIndexSearcher.java | 2 +- .../search/internal/SearchContext.java | 2 +- .../search/internal/ShardSearchRequest.java | 2 +- .../search/rescore/QueryRescorerBuilder.java | 10 +++---- .../search/sort/FieldSortBuilder.java | 6 ++-- .../CompletionSuggestionBuilder.java | 2 +- .../DirectCandidateGeneratorBuilder.java | 10 +++---- .../phrase/PhraseSuggestionBuilder.java | 18 +++++------ .../suggest/term/TermSuggestionBuilder.java | 10 +++---- .../transport/RemoteClusterService.java | 6 ++-- .../elasticsearch/transport/TcpTransport.java | 2 +- .../elasticsearch/transport/Transport.java | 2 +- 143 files changed, 390 insertions(+), 406 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 4e512b3cdd4..a44b9c849d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -549,22 +549,6 @@ class BuildPlugin implements Plugin { javadoc.classpath = javadoc.getClasspath().filter { f -> return classes.contains(f) == false } - /* - * Force html5 on projects that support it to silence the warning - * that `javadoc` will change its defaults in the future. - * - * But not all of our javadoc is actually valid html5. So we - * have to become valid incrementally. We only set html5 on the - * projects we have converted so that we still get the annoying - * warning on the unconverted ones. That will give us an - * incentive to convert them.... - */ - List html4Projects = [ - ':server', - ] - if (false == html4Projects.contains(project.path)) { - javadoc.options.addBooleanOption('html5', true) - } } configureJavadocJar(project) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 5aaf54454e1..afbfc747541 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -64,7 +64,7 @@ class ClusterConfiguration { boolean debug = false /** - * Configuration of the setting discovery.zen.minimum_master_nodes on the nodes. + * Configuration of the setting {@code discovery.zen.minimum_master_nodes} on the nodes. * In case of more than one node, this defaults to the number of nodes */ @Input diff --git a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java index 4580de4cc4a..c0c6bbb05d8 100644 --- a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java @@ -28,7 +28,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; /** * Extended version of {@link CommonTermsQuery} that allows to pass in a - * minimumNumberShouldMatch specification that uses the actual num of high frequent terms + * {@code minimumNumberShouldMatch} specification that uses the actual num of high frequent terms * to calculate the minimum matching terms. */ public class ExtendedCommonTermsQuery extends CommonTermsQuery { diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 929c5f49e34..db8263d1513 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -668,7 +668,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } /** - * Returns a underscore case name for the given exception. This method strips Elasticsearch prefixes from exception names. + * Returns a underscore case name for the given exception. This method strips {@code Elasticsearch} prefixes from exception names. */ public static String getExceptionName(Throwable ex) { String simpleName = ex.getClass().getSimpleName(); diff --git a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java index 2eea6d3fb99..013bf06d2f8 100644 --- a/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/ShardOperationFailedException.java @@ -31,12 +31,12 @@ import org.elasticsearch.rest.RestStatus; public interface ShardOperationFailedException extends Streamable, ToXContent { /** - * The index the operation failed on. Might return null if it can't be derived. + * The index the operation failed on. Might return {@code null} if it can't be derived. */ String index(); /** - * The index the operation failed on. Might return -1 if it can't be derived. + * The index the operation failed on. Might return {@code -1} if it can't be derived. */ int shardId(); diff --git a/server/src/main/java/org/elasticsearch/action/ThreadingModel.java b/server/src/main/java/org/elasticsearch/action/ThreadingModel.java index 996ebc9d6cc..b045e02ae36 100644 --- a/server/src/main/java/org/elasticsearch/action/ThreadingModel.java +++ b/server/src/main/java/org/elasticsearch/action/ThreadingModel.java @@ -37,7 +37,7 @@ public enum ThreadingModel { } /** - * true if the actual operation the action represents will be executed + * {@code true} if the actual operation the action represents will be executed * on a different thread than the calling thread (assuming it will be executed * on the same node). */ @@ -46,7 +46,7 @@ public enum ThreadingModel { } /** - * true if the invocation of the action result listener will be executed + * {@code true} if the invocation of the action result listener will be executed * on a different thread (than the calling thread or an "expensive" thread, like the * IO thread). */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 1fca07fb024..9704fa482ba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -123,7 +123,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo } /** - * true if the waitForXXX has timeout out and did not match. + * {@code true} if the waitForXXX has timeout out and did not match. */ public boolean isTimedOut() { return this.timedOut; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index bd13c000019..5bdde81e6de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -51,7 +51,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequestfalse) allowing to run the commands without + * Sets a dry run flag (defaults to {@code false}) allowing to run the commands without * actually applying them to the cluster state, and getting the resulting cluster state back. */ public ClusterRerouteRequest dryRun(boolean dryRun) { @@ -78,7 +78,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequestfalse). If true, the + * Sets the retry failed flag (defaults to {@code false}). If true, the * request will retry allocating shards that can't currently be allocated due to too many allocation failures. */ public ClusterRerouteRequest setRetryFailed(boolean retryFailed) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 3a7ffbeb44d..7cdbe3c62b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -42,7 +42,7 @@ public class ClusterRerouteRequestBuilder } /** - * Sets a dry run flag (defaults to false) allowing to run the commands without + * Sets a dry run flag (defaults to {@code false}) allowing to run the commands without * actually applying them to the cluster state, and getting the resulting cluster state back. */ public ClusterRerouteRequestBuilder setDryRun(boolean dryRun) { @@ -51,7 +51,7 @@ public class ClusterRerouteRequestBuilder } /** - * Sets the explain flag (defaults to false). If true, the + * Sets the explain flag (defaults to {@code false}). If true, the * request will include an explanation in addition to the cluster state. */ public ClusterRerouteRequestBuilder setExplain(boolean explain) { @@ -60,7 +60,7 @@ public class ClusterRerouteRequestBuilder } /** - * Sets the retry failed flag (defaults to false). If true, the + * Sets the retry failed flag (defaults to {@code false}). If true, the * request will retry allocating shards that can't currently be allocated due to too many allocation failures. */ public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index d127829fa35..3ae5c2d683a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -146,7 +146,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public ClusterSearchShardsRequest preference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index da31a79fc9b..92edcc56496 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -55,7 +55,7 @@ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRe /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public ClusterSearchShardsRequestBuilder setPreference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 6ec7763e31b..524e167e3a2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -52,7 +52,7 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu /** * Should the cluster state result include the {@link org.elasticsearch.cluster.metadata.MetaData}. Defaults - * to true. + * to {@code true}. */ public ClusterStateRequestBuilder setMetaData(boolean filter) { request.metaData(filter); @@ -61,7 +61,7 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu /** * Should the cluster state result include the {@link org.elasticsearch.cluster.node.DiscoveryNodes}. Defaults - * to true. + * to {@code true}. */ public ClusterStateRequestBuilder setNodes(boolean filter) { request.nodes(filter); @@ -70,7 +70,7 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu /** * Should the cluster state result include the {@link org.elasticsearch.cluster.ClusterState.Custom}. Defaults - * to true. + * to {@code true}. */ public ClusterStateRequestBuilder setCustoms(boolean filter) { request.customs(filter); @@ -79,7 +79,7 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu /** * Should the cluster state result include the {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults - * to true. + * to {@code true}. */ public ClusterStateRequestBuilder setRoutingTable(boolean filter) { request.routingTable(filter); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index f91b69755c1..284b98b3280 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -51,15 +51,15 @@ public class FlushRequest extends BroadcastRequest { } /** - * Returns true iff a flush should block - * if a another flush operation is already running. Otherwise false + * Returns {@code true} iff a flush should block + * if a another flush operation is already running. Otherwise {@code false} */ public boolean waitIfOngoing() { return this.waitIfOngoing; } /** - * if set to true the flush will block + * if set to {@code true} the flush will block * if a another flush operation is already running until the flush can be performed. * The default is true */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 2b8fe5d2b01..91f965d6268 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -27,7 +27,7 @@ import java.io.IOException; /** * A request to force merging the segments of one or more indices. In order to - * run a merge on all the indices, pass an empty array or null for the + * run a merge on all the indices, pass an empty array or {@code null} for the * indices. * {@link #maxNumSegments(int)} allows to control the number of segments * to force merge down to. Defaults to simply checking if a merge needs @@ -81,7 +81,7 @@ public class ForceMergeRequest extends BroadcastRequest { /** * Should the merge only expunge deletes from the index, without full merging. - * Defaults to full merging (false). + * Defaults to full merging ({@code false}). */ public boolean onlyExpungeDeletes() { return onlyExpungeDeletes; @@ -89,7 +89,7 @@ public class ForceMergeRequest extends BroadcastRequest { /** * Should the merge only expunge deletes from the index, without full merge. - * Defaults to full merging (false). + * Defaults to full merging ({@code false}). */ public ForceMergeRequest onlyExpungeDeletes(boolean onlyExpungeDeletes) { this.onlyExpungeDeletes = onlyExpungeDeletes; @@ -97,14 +97,14 @@ public class ForceMergeRequest extends BroadcastRequest { } /** - * Should flush be performed after the merge. Defaults to true. + * Should flush be performed after the merge. Defaults to {@code true}. */ public boolean flush() { return flush; } /** - * Should flush be performed after the merge. Defaults to true. + * Should flush be performed after the merge. Defaults to {@code true}. */ public ForceMergeRequest flush(boolean flush) { this.flush = flush; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 138db7078ee..285ef99a70a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -24,7 +24,7 @@ import org.elasticsearch.client.ElasticsearchClient; /** * A request to force merge one or more indices. In order to force merge all - * indices, pass an empty array or null for the indices. + * indices, pass an empty array or {@code null} for the indices. * {@link #setMaxNumSegments(int)} allows to control the number of segments to force * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. @@ -47,7 +47,7 @@ public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilderfalse). + * Defaults to full merging ({@code false}). */ public ForceMergeRequestBuilder setOnlyExpungeDeletes(boolean onlyExpungeDeletes) { request.onlyExpungeDeletes(onlyExpungeDeletes); @@ -55,7 +55,7 @@ public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuildertrue. + * Should flush be performed after the merge. Defaults to {@code true}. */ public ForceMergeRequestBuilder setFlush(boolean flush) { request.flush(flush); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 83c3f474e66..8cd1fac6f6f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -154,7 +154,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequesttrue to force only creation, not an update of an index template. If it already + * Set to {@code true} to force only creation, not an update of an index template. If it already * exists, it will fail with an {@link IllegalArgumentException}. */ public PutIndexTemplateRequest create(boolean create) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 7b365f94ab4..5a9f359554b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -76,7 +76,7 @@ public class PutIndexTemplateRequestBuilder } /** - * Set to true to force only creation, not an update of an index template. If it already + * Set to {@code true} to force only creation, not an update of an index template. If it already * exists, it will fail with an {@link IllegalArgumentException}. */ public PutIndexTemplateRequestBuilder setCreate(boolean create) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java index be47ba22195..6a0288b78d4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -27,7 +27,7 @@ import java.io.IOException; /** * A request to upgrade one or more indices. In order to update all indices, pass an empty array or - * null for the indices. + * {@code null} for the indices. * @see org.elasticsearch.client.Requests#upgradeRequest(String...) * @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest) * @see UpgradeResponse @@ -67,7 +67,7 @@ public class UpgradeRequest extends BroadcastRequest { /** * Should the upgrade only the ancient (older major version of Lucene) segments? - * Defaults to false. + * Defaults to {@code false}. */ public boolean upgradeOnlyAncientSegments() { return upgradeOnlyAncientSegments; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java index adc8ea5510a..e2c20a0e508 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -24,7 +24,7 @@ import org.elasticsearch.client.ElasticsearchClient; /** * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or - * null for the indices. + * {@code null} for the indices. */ public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 6fd2fd2da84..fb535d312cf 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -407,7 +407,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { } /** - * The actual response ({@link IndexResponse} or {@link DeleteResponse}). null in + * The actual response ({@link IndexResponse} or {@link DeleteResponse}). {@code null} in * case of failure. */ public T getResponse() { @@ -422,7 +422,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject { } /** - * The failure message, null if it did not fail. + * The failure message, {@code null} if it did not fail. */ public String getFailureMessage() { if (failure != null) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index fdafb3b2b80..9febbd63962 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -100,7 +100,7 @@ public class BulkProcessor implements Closeable { /** * Sets the number of concurrent requests allowed to be executed. A value of 0 means that only a single * request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed - * while accumulating new bulk requests. Defaults to 1. + * while accumulating new bulk requests. Defaults to {@code 1}. */ public Builder setConcurrentRequests(int concurrentRequests) { this.concurrentRequests = concurrentRequests; @@ -109,7 +109,7 @@ public class BulkProcessor implements Closeable { /** * Sets when to flush a new bulk request based on the number of actions currently added. Defaults to - * 1000. Can be set to -1 to disable it. + * {@code 1000}. Can be set to {@code -1} to disable it. */ public Builder setBulkActions(int bulkActions) { this.bulkActions = bulkActions; @@ -118,7 +118,7 @@ public class BulkProcessor implements Closeable { /** * Sets when to flush a new bulk request based on the size of actions currently added. Defaults to - * 5mb. Can be set to -1 to disable it. + * {@code 5mb}. Can be set to {@code -1} to disable it. */ public Builder setBulkSize(ByteSizeValue bulkSize) { this.bulkSize = bulkSize; @@ -129,7 +129,7 @@ public class BulkProcessor implements Closeable { * Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set. *

* Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)} - * can be set to -1 with the flush interval set allowing for complete async processing of bulk actions. + * can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions. */ public Builder setFlushInterval(TimeValue flushInterval) { this.flushInterval = flushInterval; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index ebc095b1670..ca5d997dc38 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -493,7 +493,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques } /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final BulkRequest timeout(TimeValue timeout) { this.timeout = timeout; @@ -501,7 +501,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques } /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final BulkRequest timeout(String timeout) { return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 7d2bca54d15..ca4a5ef2cbb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -132,7 +132,7 @@ public class BulkRequestBuilder extends ActionRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final BulkRequestBuilder setTimeout(TimeValue timeout) { request.timeout(timeout); @@ -140,7 +140,7 @@ public class BulkRequestBuilder extends ActionRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final BulkRequestBuilder setTimeout(String timeout) { request.timeout(timeout); diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 05100e58809..f4f7f8ba794 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -137,7 +137,7 @@ public class GetRequest extends SingleShardRequest implements Realti /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public GetRequest preference(String preference) { @@ -174,7 +174,7 @@ public class GetRequest extends SingleShardRequest implements Realti } /** - * Explicitly specify the stored fields that will be returned. By default, the _source + * Explicitly specify the stored fields that will be returned. By default, the {@code _source} * field will be returned. */ public GetRequest storedFields(String... fields) { @@ -183,7 +183,7 @@ public class GetRequest extends SingleShardRequest implements Realti } /** - * Explicitly specify the stored fields that will be returned. By default, the _source + * Explicitly specify the stored fields that will be returned. By default, the {@code _source} * field will be returned. */ public String[] storedFields() { @@ -192,8 +192,8 @@ public class GetRequest extends SingleShardRequest implements Realti /** * Should a refresh be executed before this get operation causing the operation to - * return the latest value. Note, heavy get should not set this to true. Defaults - * to false. + * return the latest value. Note, heavy get should not set this to {@code true}. Defaults + * to {@code false}. */ public GetRequest refresh(boolean refresh) { this.refresh = refresh; diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java index 9f59d3ecaef..37b9dbe79df 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java @@ -40,7 +40,7 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuildernull, will use just the id to fetch the + * Sets the type of the document to fetch. If set to {@code null}, will use just the id to fetch the * first document matching it. */ public GetRequestBuilder setType(@Nullable String type) { @@ -67,7 +67,7 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public GetRequestBuilder setPreference(String preference) { @@ -76,7 +76,7 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder_source + * Explicitly specify the fields that will be returned. By default, the {@code _source} * field will be returned. */ public GetRequestBuilder setStoredFields(String... fields) { @@ -123,8 +123,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuildertrue. Defaults - * to false. + * return the latest value. Note, heavy get should not set this to {@code true}. Defaults + * to {@code false}. */ public GetRequestBuilder setRefresh(boolean refresh) { request.refresh(refresh); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java index bdaf8cff11b..97ff2fe769f 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetItemResponse.java @@ -80,7 +80,7 @@ public class MultiGetItemResponse implements Streamable { } /** - * The actual get response, null if its a failure. + * The actual get response, {@code null} if its a failure. */ public GetResponse getResponse() { return this.response; diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index b93c8a7f037..0ea564154ab 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -311,7 +311,7 @@ public class MultiGetRequest extends ActionRequest /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public MultiGetRequest preference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java index fd7a6ac8825..508dc99fc4d 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequestBuilder.java @@ -58,7 +58,7 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public MultiGetRequestBuilder setPreference(String preference) { @@ -68,8 +68,8 @@ public class MultiGetRequestBuilder extends ActionRequestBuildertrue. Defaults - * to false. + * return the latest value. Note, heavy get should not set this to {@code true}. Defaults + * to {@code false}. */ public MultiGetRequestBuilder setRefresh(boolean refresh) { request.refresh(refresh); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java index fea3cd1043c..55f1fc6f5b9 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java @@ -64,7 +64,7 @@ public class MultiGetShardRequest extends SingleShardRequest_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public MultiGetShardRequest preference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 50a379acae5..a7438c62919 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -416,7 +416,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement /** - * Set to true to force this index to use {@link OpType#CREATE}. + * Set to {@code true} to force this index to use {@link OpType#CREATE}. */ public IndexRequest create(boolean create) { if (create) { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 1f7d5e0bca8..b81d09abda3 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -175,7 +175,7 @@ public class IndexRequestBuilder extends ReplicationRequestBuildertrue to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}. + * Set to {@code true} to force this index to use {@link org.elasticsearch.action.index.IndexRequest.OpType#CREATE}. */ public IndexRequestBuilder setCreate(boolean create) { request.create(create); diff --git a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 4d5797971ca..8480c7be3bb 100644 --- a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -37,9 +37,9 @@ import org.elasticsearch.transport.TransportService; * * Allows for the following settings: *

    - *
  • autoCreateIndex: When set to true, will automatically create an index if one does not exists. - * Defaults to true. - *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. + *
  • autoCreateIndex: When set to {@code true}, will automatically create an index if one does not exists. + * Defaults to {@code true}. + *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to {@code true}. *
* * Deprecated use TransportBulkAction with a single item instead diff --git a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index 0d8c0c33cc9..a3be7c39aff 100644 --- a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -41,7 +41,7 @@ import java.util.stream.Stream; * This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator} * and collect the results. If a shard request returns a failure this class handles the advance to the next replica of the shard until * the shards replica iterator is exhausted. Each shard is referenced by position in the {@link GroupShardsIterator} which is later - * referred to as the shardIndex. + * referred to as the {@code shardIndex}. * The fan out and collect algorithm is traditionally used as the initial phase which can either be a query execution or collection * distributed frequencies */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index a7be0f41ffb..6149503a08d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -261,7 +261,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public SearchRequest preference(String preference) { @@ -399,7 +399,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest /** * Returns the number of shard requests that should be executed concurrently. This value should be used as a protection mechanism to * reduce the number of shard reqeusts fired per high level search request. Searches that hit the entire cluster can be throttled - * with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most 256. + * with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most {@code 256}. */ public int getMaxConcurrentShardRequests() { return maxConcurrentShardRequests == 0 ? 256 : maxConcurrentShardRequests; @@ -408,7 +408,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest /** * Sets the number of shard requests that should be executed concurrently. This value should be used as a protection mechanism to * reduce the number of shard requests fired per high level search request. Searches that hit the entire cluster can be throttled - * with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most 256. + * with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most {@code 256}. */ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { if (maxConcurrentShardRequests < 1) { @@ -420,7 +420,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest * Sets a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard - * bounds and the query are disjoint. The default is 128 + * bounds and the query are disjoint. The default is {@code 128} */ public void setPreFilterShardSize(int preFilterShardSize) { if (preFilterShardSize < 1) { @@ -433,7 +433,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on it's rewrite method ie. if date filters are mandatory to match but the shard - * bounds and the query are disjoint. The default is 128 + * bounds and the query are disjoint. The default is {@code 128} */ public int getPreFilterShardSize() { return preFilterShardSize; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 1ddecf13315..91ac46c1d62 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -146,7 +146,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public SearchRequestBuilder setPreference(String preference) { @@ -192,7 +192,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder0. + * From index to start the search from. Defaults to {@code 0}. */ public SearchRequestBuilder setFrom(int from) { sourceBuilder().from(from); @@ -200,7 +200,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder10. + * The number of search hits to return. Defaults to {@code 10}. */ public SearchRequestBuilder setSize(int size) { sourceBuilder().size(size); @@ -351,7 +351,7 @@ public class SearchRequestBuilder extends ActionRequestBuilderfalse. + * Applies when sorting, and controls if scores will be tracked as well. Defaults to {@code false}. */ public SearchRequestBuilder setTrackScores(boolean trackScores) { sourceBuilder().trackScores(trackScores); @@ -359,7 +359,7 @@ public class SearchRequestBuilder extends ActionRequestBuildertrue + * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} */ public SearchRequestBuilder setTrackTotalHits(boolean trackTotalHits) { sourceBuilder().trackTotalHits(trackTotalHits); @@ -541,7 +541,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder256. + * with this number to reduce the cluster load. The default grows with the number of nodes in the cluster but is at most {@code 256}. */ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { this.request.setMaxConcurrentShardRequests(maxConcurrentShardRequests); @@ -552,7 +552,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder128 + * bounds and the query are disjoint. The default is {@code 128} */ public SearchRequestBuilder setPreFilterShardSize(int preFilterShardSize) { this.request.setPreFilterShardSize(preFilterShardSize); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java index 939f47f9acf..6a4a2b9c0d7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java @@ -43,7 +43,7 @@ public abstract class AcknowledgedRequestBuilder10s. + * to {@code 10s}. */ @SuppressWarnings("unchecked") public RequestBuilder setTimeout(String timeout) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 3dc222f9e3a..c463ad76c85 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -79,7 +79,7 @@ public abstract class ReplicationRequest1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final Request timeout(TimeValue timeout) { @@ -88,7 +88,7 @@ public abstract class ReplicationRequest1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final Request timeout(String timeout) { return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java index 4ef20fcb15b..15d36ad3467 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequestBuilder.java @@ -34,7 +34,7 @@ public abstract class ReplicationRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(TimeValue timeout) { @@ -43,7 +43,7 @@ public abstract class ReplicationRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(String timeout) { diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index 2490110927f..56645c30e2d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -86,7 +86,7 @@ public abstract class InstanceShardOperationRequest1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final Request timeout(TimeValue timeout) { @@ -95,7 +95,7 @@ public abstract class InstanceShardOperationRequest1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ public final Request timeout(String timeout) { return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java index 7bec08ce9ec..9e7a48dc49e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java @@ -39,7 +39,7 @@ public abstract class InstanceShardOperationRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(TimeValue timeout) { @@ -48,7 +48,7 @@ public abstract class InstanceShardOperationRequestBuilder1m. + * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(String timeout) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java index ef84ea86458..3e32af7f2c2 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -81,7 +81,7 @@ public class MultiTermVectorsItemResponse implements Streamable { } /** - * The actual get response, null if its a failure. + * The actual get response, {@code null} if its a failure. */ public TermVectorsResponse getResponse() { return this.response; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardRequest.java index 8fdb6398ddc..81f1b023578 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsShardRequest.java @@ -59,7 +59,7 @@ public class MultiTermVectorsShardRequest extends SingleShardRequest_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public MultiTermVectorsShardRequest preference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 92086cdd97d..031a537c37b 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -44,7 +44,7 @@ import static org.apache.lucene.util.ArrayUtil.grow; * exactly like the {@link Fields} class except for one thing: It can return * offsets and payloads even if positions are not present. You must call * nextPosition() anyway to move the counter although this method only returns - * -1,, if no positions were returned by the {@link TermVectorsRequest}. + * {@code -1,}, if no positions were returned by the {@link TermVectorsRequest}. *

* The data is stored in two byte arrays ({@code headerRef} and * {@code termVectors}, both {@link BytesRef}) that have the following format: diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 053eb6939da..f416627c1e0 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -294,7 +294,7 @@ public class TermVectorsRequest extends SingleShardRequest i /** * Sets the preference to execute the search. Defaults to randomize across - * shards. Can be set to _local to prefer local shards or a custom value, + * shards. Can be set to {@code _local} to prefer local shards or a custom value, * which guarantees that the same order will be used across different * requests. */ diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java index 34ce90156d1..7faadef68d8 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java @@ -90,7 +90,7 @@ public class TermVectorsRequestBuilder extends ActionRequestBuilder_local to prefer local shards or a custom value, which guarantees that the same order + * {@code _local} to prefer local shards or a custom value, which guarantees that the same order * will be used across different requests. */ public TermVectorsRequestBuilder setPreference(String preference) { diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index ccb3296a5d5..468863266fc 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -310,7 +310,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { /** * Returns a request builder to fetch top N hot-threads samples per node. The hot-threads are only sampled - * for the node ids provided. Note: Use * to fetch samples for all nodes + * for the node ids provided. Note: Use {@code *} to fetch samples for all nodes */ NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds); diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index 74f83452e51..19ad2fb397e 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -130,7 +130,7 @@ public class Requests { * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. * - * @param indices The indices to search against. Use null or _all to execute against all indices + * @param indices The indices to search against. Use {@code null} or {@code _all} to execute against all indices * @return The search request * @see org.elasticsearch.client.Client#search(org.elasticsearch.action.search.SearchRequest) */ @@ -220,7 +220,7 @@ public class Requests { /** * Create a create mapping request against one or more indices. * - * @param indices The indices to create mapping. Use null or _all to execute against all indices + * @param indices The indices to create mapping. Use {@code null} or {@code _all} to execute against all indices * @return The create mapping request * @see org.elasticsearch.client.IndicesAdminClient#putMapping(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest) */ @@ -240,7 +240,7 @@ public class Requests { /** * Creates a refresh indices request. * - * @param indices The indices to refresh. Use null or _all to execute against all indices + * @param indices The indices to refresh. Use {@code null} or {@code _all} to execute against all indices * @return The refresh request * @see org.elasticsearch.client.IndicesAdminClient#refresh(org.elasticsearch.action.admin.indices.refresh.RefreshRequest) */ @@ -251,7 +251,7 @@ public class Requests { /** * Creates a flush indices request. * - * @param indices The indices to flush. Use null or _all to execute against all indices + * @param indices The indices to flush. Use {@code null} or {@code _all} to execute against all indices * @return The flush request * @see org.elasticsearch.client.IndicesAdminClient#flush(org.elasticsearch.action.admin.indices.flush.FlushRequest) */ @@ -262,7 +262,7 @@ public class Requests { /** * Creates a synced flush indices request. * - * @param indices The indices to sync flush. Use null or _all to execute against all indices + * @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices * @return The synced flush request * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) */ @@ -273,7 +273,7 @@ public class Requests { /** * Creates a force merge request. * - * @param indices The indices to force merge. Use null or _all to execute against all indices + * @param indices The indices to force merge. Use {@code null} or {@code _all} to execute against all indices * @return The force merge request * @see org.elasticsearch.client.IndicesAdminClient#forceMerge(org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest) */ @@ -284,7 +284,7 @@ public class Requests { /** * Creates an upgrade request. * - * @param indices The indices to upgrade. Use null or _all to execute against all indices + * @param indices The indices to upgrade. Use {@code null} or {@code _all} to execute against all indices * @return The upgrade request * @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest) */ @@ -295,7 +295,7 @@ public class Requests { /** * Creates a clean indices cache request. * - * @param indices The indices to clean their caches. Use null or _all to execute against all indices + * @param indices The indices to clean their caches. Use {@code null} or {@code _all} to execute against all indices * @return The request */ public static ClearIndicesCacheRequest clearIndicesCacheRequest(String... indices) { @@ -305,7 +305,7 @@ public class Requests { /** * A request to update indices settings. * - * @param indices The indices to update the settings for. Use null or _all to executed against all indices. + * @param indices The indices to update the settings for. Use {@code null} or {@code _all} to executed against all indices. * @return The request */ public static UpdateSettingsRequest updateSettingsRequest(String... indices) { @@ -334,7 +334,7 @@ public class Requests { * Creates a cluster health request. * * @param indices The indices to provide additional cluster health information for. - * Use null or _all to execute against all indices + * Use {@code null} or {@code _all} to execute against all indices * @return The cluster health request * @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest) */ @@ -367,7 +367,7 @@ public class Requests { } /** - * Creates a nodes info request against one or more nodes. Pass null or an empty array for all nodes. + * Creates a nodes info request against one or more nodes. Pass {@code null} or an empty array for all nodes. * * @param nodesIds The nodes ids to get the status for * @return The nodes info request @@ -378,7 +378,7 @@ public class Requests { } /** - * Creates a nodes stats request against one or more nodes. Pass null or an empty array for all nodes. + * Creates a nodes stats request against one or more nodes. Pass {@code null} or an empty array for all nodes. * * @param nodesIds The nodes ids to get the stats for * @return The nodes info request @@ -390,7 +390,7 @@ public class Requests { /** * Creates a nodes usage request against one or more nodes. Pass - * null or an empty array for all nodes. + * {@code null} or an empty array for all nodes. * * @param nodesIds * The nodes ids to get the usage for @@ -432,7 +432,7 @@ public class Requests { } /** - * Creates a nodes tasks request against one or more nodes. Pass null or an empty array for all nodes. + * Creates a nodes tasks request against one or more nodes. Pass {@code null} or an empty array for all nodes. * * @return The nodes tasks request * @see org.elasticsearch.client.ClusterAdminClient#cancelTasks(CancelTasksRequest) diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index ee4779bc8c5..6343c2f7274 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -104,7 +104,7 @@ public class ClusterBlocks extends AbstractDiffable { } /** - * Returns true if one of the global blocks as its disable state persistence flag set. + * Returns {@code true} if one of the global blocks as its disable state persistence flag set. */ public boolean disableStatePersistence() { for (ClusterBlock clusterBlock : global) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index fa30abe5a73..c2a23a378f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; /** - * This class acts as a functional wrapper around the index.auto_expand_replicas setting. + * This class acts as a functional wrapper around the {@code index.auto_expand_replicas} setting. * This setting or rather it's value is expanded into a min and max value which requires special handling * based on the number of datanodes in the cluster. This class handles all the parsing and streamlines the access to these values. */ diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 414e06a2365..7af2ec2d237 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -1317,7 +1317,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen } /** - * Returns the routing factor for this index. The default is 1. + * Returns the routing factor for this index. The default is {@code 1}. * * @see #getRoutingFactor(int, int) for details */ diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 1f36e50ca1d..8fa3c2e0fc1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -277,7 +277,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { * Iterates through the list of indices and selects the effective list of filtering aliases for the * given index. *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to - * the index itself - null is returned. Returns null if no filtering is required. + * the index itself - null is returned. Returns {@code null} if no filtering is required. */ public String[] filteringAliases(ClusterState state, String index, String... expressions) { return indexAliases(state, index, AliasMetaData::filteringRequired, false, expressions); @@ -286,7 +286,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { /** * Iterates through the list of indices and selects the effective list of required aliases for the given index. *

Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to - * the index itself - null is returned. Returns null if no filtering is required. + * the index itself - null is returned. Returns {@code null} if no filtering is required. */ public String[] indexAliases(ClusterState state, String index, Predicate requiredAlias, boolean skipIdentity, String... expressions) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 5522e37f71a..c657af38703 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -84,7 +84,7 @@ public class DiscoveryNodes extends AbstractDiffable implements } /** - * Returns true if the local node is the elected master node. + * Returns {@code true} if the local node is the elected master node. */ public boolean isLocalNodeElectedMaster() { if (localNodeId == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 5a4e0c78414..4e7e81def87 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -52,7 +52,7 @@ import java.util.Set; * single shard in this context has one more instances namely exactly one * {@link ShardRouting#primary() primary} and 1 or more replicas. In other * words, each instance of a shard is considered a replica while only one - * replica per shard is a primary replica. The primary replica + * replica per shard is a {@code primary} replica. The {@code primary} replica * can be seen as the "leader" of the shard acting as the primary entry point * for operations on a specific shard. *

diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java index 638875ea071..dacf49cb736 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardsIterator.java @@ -45,7 +45,7 @@ public interface ShardsIterator extends Iterable { int sizeActive(); /** - * Returns the next shard, or null if none available. + * Returns the next shard, or {@code null} if none available. */ ShardRouting nextOrNull(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index a60287a8487..7998a1d27dd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -173,7 +173,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards *

* Each of these properties are expressed as factor such that the properties factor defines the relative importance of the property for the * weight function. For example if the weight function should calculate the weights only based on a global (shard) balance the index balance - * can be set to 0.0 and will in turn have no effect on the distribution. + * can be set to {@code 0.0} and will in turn have no effect on the distribution. *

* The weight per index is calculated based on the following formula: *
    @@ -476,9 +476,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * {@link WeightFunction}. This weight is calculated per index to * distribute shards evenly per index. The balancer tries to relocate * shards only if the delta exceeds the threshold. In the default case - * the threshold is set to 1.0 to enforce gaining relocation + * the threshold is set to {@code 1.0} to enforce gaining relocation * only, or in other words relocations that move the weight delta closer - * to 0.0 + * to {@code 0.0} */ private void balanceByWeights() { final AllocationDeciders deciders = allocation.deciders(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index f00e9cdc3ce..e7e538ae371 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -36,7 +36,7 @@ import org.elasticsearch.common.settings.Settings; /** * This {@link AllocationDecider} controls shard allocation based on - * awareness key-value pairs defined in the node configuration. + * {@code awareness} key-value pairs defined in the node configuration. * Awareness explicitly controls where replicas should be allocated based on * attributes like node or physical rack locations. Awareness attributes accept * arbitrary configuration keys like a rack data-center identifier. For example @@ -48,7 +48,7 @@ import org.elasticsearch.common.settings.Settings; * will cause allocations to be distributed over different racks such that * ideally at least one replicas of the all shard is available on the same rack. * To enable allocation awareness in this example nodes should contain a value - * for the rack_id key like: + * for the {@code rack_id} key like: *
      * node.attr.rack_id:1
      * 
    @@ -67,7 +67,7 @@ import org.elasticsearch.common.settings.Settings; * *

    * In contrast to regular awareness this setting will prevent over-allocation on - * zone1 even if zone2 fails partially or becomes entirely + * {@code zone1} even if {@code zone2} fails partially or becomes entirely * unavailable. Nodes that belong to a certain zone / group should be started * with the zone id configured on the node-level settings like: *

    diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
    index 281f6a603c3..ea945c23c72 100644
    --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
    +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
    @@ -32,16 +32,16 @@ import org.elasticsearch.common.settings.Settings;
      * This {@link AllocationDecider} controls re-balancing operations based on the
      * cluster wide active shard state. This decided can not be configured in
      * real-time and should be pre-cluster start via
    - * cluster.routing.allocation.allow_rebalance. This setting respects the following
    + * {@code cluster.routing.allocation.allow_rebalance}. This setting respects the following
      * values:
      * 
      - *
    • indices_primaries_active - Re-balancing is allowed only once all + *
    • {@code indices_primaries_active} - Re-balancing is allowed only once all * primary shards on all indices are active.
    • * - *
    • indices_all_active - Re-balancing is allowed only once all + *
    • {@code indices_all_active} - Re-balancing is allowed only once all * shards on all indices are active.
    • * - *
    • always - Re-balancing is allowed once a shard replication group + *
    • {@code always} - Re-balancing is allowed once a shard replication group * is active
    • *
    */ diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 63fbad59b92..05351109c86 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -31,11 +31,11 @@ import org.elasticsearch.common.settings.Settings; * {@link AllocationDecider} controls the number of currently in-progress * re-balance (relocation) operations and restricts node allocations if the * configured threshold is reached. The default number of concurrent rebalance - * operations is set to 2 + * operations is set to {@code 2} *

    * Re-balance operations can be controlled in real-time via the cluster update API using - * cluster.routing.allocation.cluster_concurrent_rebalance. Iff this - * setting is set to -1 the number of concurrent re-balance operations + * {@code cluster.routing.allocation.cluster_concurrent_rebalance}. Iff this + * setting is set to {@code -1} the number of concurrent re-balance operations * are unlimited. */ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index 06a0859cee7..f3146f6f771 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -44,8 +44,8 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; * This filter is used to make explicit decision on which nodes certain shard * can / should be allocated. The decision if a shard can be allocated, must not * be allocated or should be allocated is based on either cluster wide dynamic - * settings (cluster.routing.allocation.*) or index specific dynamic - * settings (index.routing.allocation.*). All of those settings can be + * settings ({@code cluster.routing.allocation.*}) or index specific dynamic + * settings ({@code index.routing.allocation.*}). All of those settings can be * changed at runtime via the cluster or the index update settings API. *

    * Note: Cluster settings are applied first and will override index specific @@ -53,14 +53,14 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; * settings it wont be allocated on a node if the cluster specific settings * would disallow the allocation. Filters are applied in the following order: *
      - *
    1. required - filters required allocations. - * If any required filters are set the allocation is denied if the index is not in the set of required to allocate + *
    2. {@code required} - filters required allocations. + * If any {@code required} filters are set the allocation is denied if the index is not in the set of {@code required} to allocate * on the filtered node
    3. - *
    4. include - filters "allowed" allocations. - * If any include filters are set the allocation is denied if the index is not in the set of include filters for + *
    5. {@code include} - filters "allowed" allocations. + * If any {@code include} filters are set the allocation is denied if the index is not in the set of {@code include} filters for * the filtered node
    6. - *
    7. exclude - filters "prohibited" allocations. - * If any exclude filters are set the allocation is denied if the index is in the set of exclude filters for the + *
    8. {@code exclude} - filters "prohibited" allocations. + * If any {@code exclude} filters are set the allocation is denied if the index is in the set of {@code exclude} filters for the * filtered node
    9. *
    */ diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index c3817b429bb..4c580509e92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -30,8 +30,8 @@ import org.elasticsearch.common.settings.Settings; /** * An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without * success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until - * the setting for index.allocation.max_retry is raised. The default value is 5. - * Note: This allocation decider also allows allocation of repeatedly failing shards when the /_cluster/reroute?retry_failed=true + * the setting for {@code index.allocation.max_retry} is raised. The default value is {@code 5}. + * Note: This allocation decider also allows allocation of repeatedly failing shards when the {@code /_cluster/reroute?retry_failed=true} * API is manually invoked. This allows single retries without raising the limits. * */ diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 1bf77642252..cc2d488974b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -30,17 +30,17 @@ import org.elasticsearch.common.settings.Settings; /** * An allocation decider that prevents multiple instances of the same shard to - * be allocated on the same node. + * be allocated on the same {@code node}. * * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent - * allocation of multiple instances of the same shard on a single host, + * allocation of multiple instances of the same shard on a single {@code host}, * based on host name and host address. Defaults to `false`, meaning that no * check is performed by default. * *

    * Note: this setting only applies if multiple nodes are started on the same - * host. Allocations of multiple copies of the same shard on the same - * node are not allowed independently of this setting. + * {@code host}. Allocations of multiple copies of the same shard on the same + * {@code node} are not allowed independently of this setting. *

    */ public class SameShardAllocationDecider extends AllocationDecider { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 2118d37fe47..398ee0a17ad 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -34,12 +34,12 @@ import java.util.function.BiPredicate; /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more - * than index.routing.allocation.total_shards_per_node per index and - * cluster.routing.allocation.total_shards_per_node globally during the allocation + * than {@code index.routing.allocation.total_shards_per_node} per index and + * {@code cluster.routing.allocation.total_shards_per_node} globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. *

    - * If index.routing.allocation.total_shards_per_node is reset to a negative value shards + * If {@code index.routing.allocation.total_shards_per_node} is reset to a negative value shards * per index are unlimited per node. Shards currently in the * {@link ShardRoutingState#RELOCATING relocating} state are ignored by this * {@link AllocationDecider} until the shard changed its state to either diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index e52102774dd..7821ad11a52 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -37,12 +37,12 @@ import static org.elasticsearch.cluster.routing.allocation.decider.Decision.YES; * the cluster. It exposes two settings via the cluster update API that allow * changes in real-time: *

      - *
    • cluster.routing.allocation.node_initial_primaries_recoveries - + *
    • {@code cluster.routing.allocation.node_initial_primaries_recoveries} - * restricts the number of initial primary shard recovery operations on a single - * node. The default is 4
    • - *
    • cluster.routing.allocation.node_concurrent_recoveries - + * node. The default is {@code 4}
    • + *
    • {@code cluster.routing.allocation.node_concurrent_recoveries} - * restricts the number of total concurrent shards initializing on a single node. The - * default is 2
    • + * default is {@code 2} *
    *

    * If one of the above thresholds is exceeded per node this allocation decider diff --git a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java index 7f63784d92a..9f4090d5c6b 100644 --- a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java @@ -32,7 +32,7 @@ import java.util.Iterator; import java.util.Objects; /** - * A reusable class to encode field -> memory size mappings + * A reusable class to encode {@code field -> memory size} mappings */ public final class FieldMemoryStats implements Writeable, Iterable>{ diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index abf832296c0..7147045c414 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -67,7 +67,7 @@ public abstract class BytesReference implements Accountable, Comparablefrom index up to length. + * Slice the bytes from the {@code from} index up to {@code length}. */ public abstract BytesReference slice(int from, int length); @@ -158,7 +158,7 @@ public abstract class BytesReference implements Accountable, ComparableBytesRef.deepCopyOf(reference.toBytesRef() instead + * to modify the returned array use {@code BytesRef.deepCopyOf(reference.toBytesRef()} instead */ public static byte[] toBytes(BytesReference reference) { final BytesRef bytesRef = reference.toBytesRef(); diff --git a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index 85d7eda8363..04713f249e8 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -85,7 +85,7 @@ public final class CopyOnWriteHashMap extends AbstractMap { /** * Recursively add a new entry to this node. hashBits is * the number of bits that are still set in the hash. When this value - * reaches a number that is less than or equal to 0, a leaf + * reaches a number that is less than or equal to {@code 0}, a leaf * node needs to be created since it means that a collision occurred * on the 32 bits of the hash. */ diff --git a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 4f0ef4c6887..e71c9b03899 100644 --- a/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/server/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -69,28 +69,28 @@ public class Lifecycle { } /** - * Returns true if the state is initialized. + * Returns {@code true} if the state is initialized. */ public boolean initialized() { return state == State.INITIALIZED; } /** - * Returns true if the state is started. + * Returns {@code true} if the state is started. */ public boolean started() { return state == State.STARTED; } /** - * Returns true if the state is stopped. + * Returns {@code true} if the state is stopped. */ public boolean stopped() { return state == State.STOPPED; } /** - * Returns true if the state is closed. + * Returns {@code true} if the state is closed. */ public boolean closed() { return state == State.CLOSED; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Strings.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Strings.java index 40215810607..29a9ce10391 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Strings.java @@ -41,9 +41,9 @@ public class Strings { *

    * * @param s the string whose first character is to be uppercased - * @return a string equivalent to s with its first character + * @return a string equivalent to {@code s} with its first character * converted to uppercase - * @throws NullPointerException if s is null + * @throws NullPointerException if {@code s} is null */ public static String capitalize(String s) { if (s.length() == 0) { diff --git a/server/src/main/java/org/elasticsearch/common/lease/Releasables.java b/server/src/main/java/org/elasticsearch/common/lease/Releasables.java index 6c928b29a84..e053f2fd200 100644 --- a/server/src/main/java/org/elasticsearch/common/lease/Releasables.java +++ b/server/src/main/java/org/elasticsearch/common/lease/Releasables.java @@ -61,7 +61,7 @@ public enum Releasables { closeWhileHandlingException(Arrays.asList(releasables)); } - /** Release the provided {@link Releasable}s, ignoring exceptions if success is false. */ + /** Release the provided {@link Releasable}s, ignoring exceptions if success is {@code false}. */ public static void close(boolean success, Iterable releasables) { if (success) { close(releasables); @@ -70,7 +70,7 @@ public enum Releasables { } } - /** Release the provided {@link Releasable}s, ignoring exceptions if success is false. */ + /** Release the provided {@link Releasable}s, ignoring exceptions if success is {@code false}. */ public static void close(boolean success, Releasable... releasables) { close(success, Arrays.asList(releasables)); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 5bef7bee4f1..ebd0d5ba2ef 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -637,9 +637,9 @@ public class Lucene { } /** - * Returns true iff the given exception or + * Returns {@code true} iff the given exception or * one of it's causes is an instance of {@link CorruptIndexException}, - * {@link IndexFormatTooOldException}, or {@link IndexFormatTooNewException} otherwise false. + * {@link IndexFormatTooOldException}, or {@link IndexFormatTooNewException} otherwise {@code false}. */ public static boolean isCorruptionException(Throwable t) { return ExceptionsHelper.unwrapCorruption(t) != null; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index f79f45f3b62..394b8bbe65d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -287,7 +287,7 @@ public class MoreLikeThisQuery extends Query { /** * Number of terms that must match the generated query expressed in the - * common syntax for minimum should match. Defaults to 30%. + * common syntax for minimum should match. Defaults to {@code 30%}. * * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) */ diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java index b9440edd5cf..7dab3e52566 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -66,12 +66,12 @@ public final class NetworkService { */ public interface CustomNameResolver { /** - * Resolves the default value if possible. If not, return null. + * Resolves the default value if possible. If not, return {@code null}. */ InetAddress[] resolveDefault(); /** - * Resolves a custom value handling, return null if can't handle it. + * Resolves a custom value handling, return {@code null} if can't handle it. */ InetAddress[] resolveIfPossible(String value) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java b/server/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java index 2eb7e2b8e70..52333d108cf 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java +++ b/server/src/main/java/org/elasticsearch/common/settings/PropertyPlaceholder.java @@ -28,7 +28,7 @@ import java.util.Set; /** * Utility class for working with Strings that have placeholder values in them. A placeholder takes the form - * ${name}. Using PropertyPlaceholder these placeholders can be substituted for + * {@code ${name}}. Using {@code PropertyPlaceholder} these placeholders can be substituted for * user-supplied values. *

    * Values for substitution can be supplied using a {@link Properties} instance or using a diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index d9e42a67671..f45f4bda9c9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -312,7 +312,7 @@ public class Setting implements ToXContentObject { /** * Returns true iff this setting is a group setting. Group settings represent a set of settings rather than a single value. - * The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like cluster.store. that matches all settings + * The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like {@code cluster.store.} that matches all settings * with this prefix. */ boolean isGroupSetting() { @@ -716,8 +716,8 @@ public class Setting implements ToXContentObject { } /** - * Returns the namespace for a concrete setting. Ie. an affix setting with prefix: search. and suffix: username - * will return remote as a namespace for the setting search.remote.username + * Returns the namespace for a concrete setting. Ie. an affix setting with prefix: {@code search.} and suffix: {@code username} + * will return {@code remote} as a namespace for the setting {@code search.remote.username} */ public String getNamespace(Setting concreteSetting) { return key.getNamespace(concreteSetting.getKey()); diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 95beadd66a6..2eb14f7ac65 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -230,7 +230,7 @@ public final class Settings implements ToXContentFragment { * Returns the setting value associated with the setting key. * * @param setting The setting key - * @return The setting value, null if it does not exists. + * @return The setting value, {@code null} if it does not exists. */ public String get(String setting) { return toString(settings.get(setting)); @@ -739,8 +739,8 @@ public final class Settings implements ToXContentFragment { Collections.unmodifiableSet(new HashSet<>(Arrays.asList("settings_filter", "flat_settings"))); /** - * Returns true if this settings object contains no settings - * @return true if this settings object contains no settings + * Returns {@code true} if this settings object contains no settings + * @return {@code true} if this settings object contains no settings */ public boolean isEmpty() { return this.settings.isEmpty() && (secureSettings == null || secureSettings.getSettingNames().isEmpty()); @@ -1176,7 +1176,7 @@ public final class Settings implements ToXContentFragment { /** * Runs across all the settings set on this builder and - * replaces ${...} elements in each setting with + * replaces {@code ${...}} elements in each setting with * another setting already set on this builder. */ public Builder replacePropertyPlaceholders() { diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java index ec6de176c2f..1c0ac613144 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeUnit.java @@ -26,9 +26,9 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; /** - * A SizeUnit represents size at a given unit of + * A {@code SizeUnit} represents size at a given unit of * granularity and provides utility methods to convert across units. - * A SizeUnit does not maintain size information, but only + * A {@code SizeUnit} does not maintain size information, but only * helps organize and use size representations that may be maintained * separately across various contexts. */ diff --git a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index 179870f8653..6fc863ee9e4 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -105,7 +105,7 @@ public final class Fuzziness implements ToXContentFragment, Writeable { } /** - * Creates a {@link Fuzziness} instance from an edit distance. The value must be one of [0, 1, 2] + * Creates a {@link Fuzziness} instance from an edit distance. The value must be one of {@code [0, 1, 2]} * * Note: Using this method only makes sense if the field you are applying Fuzziness to is some sort of string. */ diff --git a/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java index 2830d8318a3..939664dd4d6 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/MemorySizeValue.java @@ -31,8 +31,8 @@ public enum MemorySizeValue { ; /** Parse the provided string as a memory size. This method either accepts absolute values such as - * 42 (default assumed unit is byte) or 2mb, or percentages of the heap size: if - * the heap is 1G, 10% will be parsed as 100mb. */ + * {@code 42} (default assumed unit is byte) or {@code 2mb}, or percentages of the heap size: if + * the heap is 1G, {@code 10%} will be parsed as {@code 100mb}. */ public static ByteSizeValue parseBytesSizeValueOrHeapRatio(String sValue, String settingName) { settingName = Objects.requireNonNull(settingName); if (sValue != null && sValue.endsWith("%")) { diff --git a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index 20c12d564da..1d641a4a754 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -27,7 +27,7 @@ public class ArrayUtils { private ArrayUtils() {} /** - * Return the index of value in array, or -1 if there is no such index. + * Return the index of value in array, or {@code -1} if there is no such index. * If there are several values that are within tolerance or less of value, this method will return the * index of the closest value. In case of several values being as close ot value, there is no guarantee which index * will be returned. diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java index 8ffbbc9b6e4..b2cc8b99c63 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java @@ -49,7 +49,7 @@ public interface RefCounted { void incRef(); /** - * Tries to increment the refCount of this instance. This method will return true iff the refCount was + * Tries to increment the refCount of this instance. This method will return {@code true} iff the refCount was * * @see #decRef() * @see #incRef() diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 024c50fb6e0..ebce175e981 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -134,7 +134,7 @@ public class ElectMasterService extends AbstractComponent { } /** - * Elects a new master out of the possible nodes, returning it. Returns null + * Elects a new master out of the possible nodes, returning it. Returns {@code null} * if no master has been elected. */ public MasterCandidate electMaster(Collection candidates) { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index b6c8d411474..f6c190fee09 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -90,7 +90,7 @@ public abstract class MetaDataStateFormat { * Writes the given state to the given directories. The state is written to a * state directory ({@value #STATE_DIR_NAME}) underneath each of the given file locations and is created if it * doesn't exist. The state is serialized to a temporary file in that directory and is then atomically moved to - * it's target filename of the pattern {prefix}{version}.st. + * it's target filename of the pattern {@code {prefix}{version}.st}. * * @param state the state object to write * @param locations the locations where the state should be written to. diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 767ef487339..99a5f73c150 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -72,7 +72,7 @@ import java.util.function.Function; *

  • {@link Similarity} - New {@link Similarity} implementations can be registered through * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the - * {@link BM25Similarity}, the configuration "index.similarity.my_similarity.type : "BM25" can be used.
  • + * {@link BM25Similarity}, the configuration {@code "index.similarity.my_similarity.type : "BM25"} can be used. *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, Function)}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 12ded42033e..08cacee6ae0 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -606,7 +606,7 @@ public final class IndexSettings { } /** - * Returns this interval in which the shards of this index are asynchronously refreshed. -1 means async refresh is disabled. + * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ public TimeValue getRefreshInterval() { return refreshInterval; diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index e1ebd0ecc29..7faaf51f4de 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -112,7 +112,7 @@ public abstract class Engine implements Closeable { protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock()); protected final SetOnce failedEngine = new SetOnce<>(); /* - * on lastWriteNanos we use System.nanoTime() to initialize this since: + * on {@code lastWriteNanos} we use System.nanoTime() to initialize this since: * - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still consider it active * for the duration of the configured active to inactive period. If we initialize to 0 or Long.MAX_VALUE we either immediately or never mark it * inactive if no writes at all happen to the shard. diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/FieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/FieldData.java index eb569aa4c2b..b5e1608957e 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/FieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/FieldData.java @@ -285,7 +285,7 @@ public enum FieldData { /** * Returns whether the provided values *might* be multi-valued. There is no - * guarantee that this method will return false in the single-valued case. + * guarantee that this method will return {@code false} in the single-valued case. */ public static boolean isMultiValued(SortedSetDocValues values) { return DocValues.unwrapSingleton(values) == null; @@ -293,7 +293,7 @@ public enum FieldData { /** * Returns whether the provided values *might* be multi-valued. There is no - * guarantee that this method will return false in the single-valued case. + * guarantee that this method will return {@code false} in the single-valued case. */ public static boolean isMultiValued(SortedNumericDocValues values) { return DocValues.unwrapSingleton(values) == null; @@ -301,7 +301,7 @@ public enum FieldData { /** * Returns whether the provided values *might* be multi-valued. There is no - * guarantee that this method will return false in the single-valued case. + * guarantee that this method will return {@code false} in the single-valued case. */ public static boolean isMultiValued(SortedNumericDoubleValues values) { return unwrapSingleton(values) == null; @@ -309,7 +309,7 @@ public enum FieldData { /** * Returns whether the provided values *might* be multi-valued. There is no - * guarantee that this method will return false in the single-valued case. + * guarantee that this method will return {@code false} in the single-valued case. */ public static boolean isMultiValued(SortedBinaryDocValues values) { return unwrapSingleton(values) != null; @@ -317,7 +317,7 @@ public enum FieldData { /** * Returns whether the provided values *might* be multi-valued. There is no - * guarantee that this method will return false in the single-valued case. + * guarantee that this method will return {@code false} in the single-valued case. */ public static boolean isMultiValued(MultiGeoPointValues values) { return unwrapSingleton(values) == null; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index b2e1b9e3104..a2baf1fee6c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -38,7 +38,7 @@ import java.util.Arrays; /** * Simple class to build document ID <-> ordinal mapping. Note: Ordinals are - * 1 based monotonically increasing positive integers. 0 + * {@code 1} based monotonically increasing positive integers. {@code 0} * donates the missing value in this context. */ public final class OrdinalsBuilder implements Closeable { @@ -318,7 +318,7 @@ public final class OrdinalsBuilder implements Closeable { } /** - * Returns the current ordinal or 0 if this build has not been advanced via + * Returns the current ordinal or {@code 0} if this build has not been advanced via * {@link #nextOrdinal()}. */ public long currentOrdinal() { @@ -423,7 +423,7 @@ public final class OrdinalsBuilder implements Closeable { * This method iterates all terms in the given {@link TermsEnum} and * associates each terms ordinal with the terms documents. The caller must * exhaust the returned {@link BytesRefIterator} which returns all values - * where the first returned value is associated with the ordinal 1 + * where the first returned value is associated with the ordinal {@code 1} * etc. */ public BytesRefIterator buildFromTerms(final TermsEnum termsEnum) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/server/src/main/java/org/elasticsearch/index/mapper/ContentPath.java index 6ab9019e849..3c67d3ee7f3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ContentPath.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ContentPath.java @@ -36,7 +36,7 @@ public final class ContentPath { } /** - * Constructs a json path with an offset. The offset will result an offset + * Constructs a json path with an offset. The offset will result an {@code offset} * number of path elements to not be included in {@link #pathAsText(String)}. */ public ContentPath(int offset) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 69189ab1297..fb92c576e01 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -382,7 +382,7 @@ public abstract class MappedFieldType extends FieldType { return Relation.INTERSECTS; } - /** A term query to use when parsing a query string. Can return null. */ + /** A term query to use when parsing a query string. Can return {@code null}. */ @Nullable public Query queryStringTermQuery(Term term) { return null; diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index 942c72f2293..7e667af0544 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -226,7 +226,7 @@ public abstract class AbstractQueryBuilder> /** * Helper method to convert collection of {@link QueryBuilder} instances to lucene - * {@link Query} instances. {@link QueryBuilder} that return null calling + * {@link Query} instances. {@link QueryBuilder} that return {@code null} calling * their {@link QueryBuilder#toQuery(QueryShardContext)} method are not added to the * resulting collection. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 58697d9ada5..be5fca365d8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -112,7 +112,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { /** * Adds a query that must appear in the matching documents and will - * contribute to scoring. No null value allowed. + * contribute to scoring. No {@code null} value allowed. */ public BoolQueryBuilder must(QueryBuilder queryBuilder) { if (queryBuilder == null) { @@ -131,7 +131,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { /** * Adds a query that must appear in the matching documents but will - * not contribute to scoring. No null value allowed. + * not contribute to scoring. No {@code null} value allowed. */ public BoolQueryBuilder filter(QueryBuilder queryBuilder) { if (queryBuilder == null) { @@ -150,7 +150,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { /** * Adds a query that must not appear in the matching documents. - * No null value allowed. + * No {@code null} value allowed. */ public BoolQueryBuilder mustNot(QueryBuilder queryBuilder) { if (queryBuilder == null) { @@ -169,8 +169,8 @@ public class BoolQueryBuilder extends AbstractQueryBuilder { /** * Adds a clause that should be matched by the returned documents. For a boolean query with no - * MUST clauses one or more SHOULD clauses must match a document - * for the BooleanQuery to match. No null value allowed. + * {@code MUST} clauses one or more SHOULD clauses must match a document + * for the BooleanQuery to match. No {@code null} value allowed. * * @see #minimumShouldMatch(int) */ diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index c0c08e65480..974466a3161 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -143,7 +143,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilderAND. + * {@code AND}. */ public CommonTermsQueryBuilder highFreqOperator(Operator operator) { this.highFreqOperator = (operator == null) ? DEFAULT_HIGH_FREQ_OCCUR : operator; @@ -156,7 +156,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilderAND. + * than {@link #cutoffFrequency(float)}. Defaults to {@code AND}. */ public CommonTermsQueryBuilder lowFreqOperator(Operator operator) { this.lowFreqOperator = (operator == null) ? DEFAULT_LOW_FREQ_OCCUR : operator; @@ -185,7 +185,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder{@value #DEFAULT_CUTOFF_FREQ} + * {@code {@value #DEFAULT_CUTOFF_FREQ}} */ public CommonTermsQueryBuilder cutoffFrequency(float cutoffFrequency) { this.cutoffFrequency = cutoffFrequency; diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index 0e90ba5ae57..8c24c3eea7a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -101,7 +101,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuildernull */ + /** Get the analyzer to use, if previously set, otherwise {@code null} */ public String analyzer() { return this.analyzer; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index ef88db6c12c..7dc01bb3450 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -108,7 +108,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuildernull */ + /** Get the analyzer to use, if previously set, otherwise {@code null} */ public String analyzer() { return this.analyzer; } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index 74ccf423ffb..dc2054eda47 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -171,7 +171,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { return this.value; } - /** Sets the operator to use when using a boolean query. Defaults to OR. */ + /** Sets the operator to use when using a boolean query. Defaults to {@code OR}. */ public MatchQueryBuilder operator(Operator operator) { if (operator == null) { throw new IllegalArgumentException("[" + NAME + "] requires operator to be non-null"); @@ -194,7 +194,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { return this; } - /** Get the analyzer to use, if previously set, otherwise null */ + /** Get the analyzer to use, if previously set, otherwise {@code null} */ public String analyzer() { return this.analyzer; } @@ -258,7 +258,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { return this; } - /** Gets the optional cutoff value, can be null if not set previously */ + /** Gets the optional cutoff value, can be {@code null} if not set previously */ public Float cutoffFrequency() { return this.cutoffFrequency; } @@ -357,7 +357,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { /** * Whether phrase queries should be automatically generated for multi terms synonyms. - * Defaults to true. + * Defaults to {@code true}. */ public boolean autoGenerateSynonymsPhraseQuery() { return autoGenerateSynonymsPhraseQuery; diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index b5e3272ab0a..0de474f8b99 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -593,7 +593,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder25. + * Defaults to {@code 25}. */ public MoreLikeThisQueryBuilder maxQueryTerms(int maxQueryTerms) { this.maxQueryTerms = maxQueryTerms; @@ -606,7 +606,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder2. + * frequency is {@code 2}. */ public MoreLikeThisQueryBuilder minTermFreq(int minTermFreq) { this.minTermFreq = minTermFreq; @@ -619,7 +619,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder5. + * many docs. Defaults to {@code 5}. */ public MoreLikeThisQueryBuilder minDocFreq(int minDocFreq) { this.minDocFreq = minDocFreq; @@ -645,7 +645,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder0. + * to {@code 0}. */ public MoreLikeThisQueryBuilder minWordLength(int minWordLength) { this.minWordLength = minWordLength; @@ -658,7 +658,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder0). + * unbounded ({@code 0}). */ public MoreLikeThisQueryBuilder maxWordLength(int maxWordLength) { this.maxWordLength = maxWordLength; @@ -707,7 +707,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder30%. + * common syntax for minimum should match. Defaults to {@code 30%}. * * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) */ @@ -724,7 +724,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder0 (deactivated). + * Sets the boost factor to use when boosting terms. Defaults to {@code 0} (deactivated). */ public MoreLikeThisQueryBuilder boostTerms(float boostTerms) { this.boostTerms = boostTerms; @@ -736,7 +736,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilderfalse + * Whether to include the input documents. Defaults to {@code false} */ public MoreLikeThisQueryBuilder include(boolean include) { this.include = include; diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 156e6cca48f..6aa3e418bf5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -337,7 +337,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilderOR. + * Sets the operator to use when using a boolean query. Defaults to {@code OR}. */ public MultiMatchQueryBuilder operator(Operator operator) { if (operator == null) { @@ -460,7 +460,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder * - *

    A tie-breaker value of 1.0 is interpreted as a signal to score queries as + *

    A tie-breaker value of {@code 1.0} is interpreted as a signal to score queries as * "most-match" queries where all matching query clauses are considered for scoring.

    * * @see Type @@ -477,7 +477,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder * - *

    A tie-breaker value of 1.0 is interpreted as a signal to score queries as + *

    A tie-breaker value of {@code 1.0} is interpreted as a signal to score queries as * "most-match" queries where all matching query clauses are considered for scoring.

    * * @see Type @@ -546,7 +546,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuildertrue. + * Defaults to {@code true}. */ public boolean autoGenerateSynonymsPhraseQuery() { return autoGenerateSynonymsPhraseQuery; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java index f8f5b68be9a..5b765c5cbda 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilder.java @@ -29,21 +29,21 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea /** * Converts this QueryBuilder to a lucene {@link Query}. - * Returns null if this query should be ignored in the context of + * Returns {@code null} if this query should be ignored in the context of * parent queries. * * @param context additional information needed to construct the queries - * @return the {@link Query} or null if this query should be ignored upstream + * @return the {@link Query} or {@code null} if this query should be ignored upstream */ Query toQuery(QueryShardContext context) throws IOException; /** * Converts this QueryBuilder to an unscored lucene {@link Query} that acts as a filter. - * Returns null if this query should be ignored in the context of + * Returns {@code null} if this query should be ignored in the context of * parent queries. * * @param context additional information needed to construct the queries - * @return the {@link Query} or null if this query should be ignored upstream + * @return the {@link Query} or {@code null} if this query should be ignored upstream */ Query toFilter(QueryShardContext context) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 54c1384cf9d..bac514311a3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -240,12 +240,12 @@ public final class QueryBuilders { } /** - * Implements the wildcard search query. Supported wildcards are *, which - * matches any character sequence (including the empty one), and ?, + * Implements the wildcard search query. Supported wildcards are {@code *}, which + * matches any character sequence (including the empty one), and {@code ?}, * which matches any single character. Note this query can be slow, as it * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries, - * a Wildcard term should not start with one of the wildcards * or - * ?. + * a Wildcard term should not start with one of the wildcards {@code *} or + * {@code ?}. * * @param name The field name * @param query The wildcard query string diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 3920b730d7a..e9d53d8e829 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -443,7 +443,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue. + * Should leading wildcards be allowed or not. Defaults to {@code true}. */ public QueryStringQueryBuilder allowLeadingWildcard(Boolean allowLeadingWildcard) { this.allowLeadingWildcard = allowLeadingWildcard; @@ -455,8 +455,8 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue to enable position increments in result query. Defaults to - * true. + * Set to {@code true} to enable position increments in result query. Defaults to + * {@code true}. *

    * When set, result phrase and multi-phrase queries will be aware of position increments. * Useful when e.g. a StopFilter increases the position increment of the token that follows an omitted token. @@ -531,7 +531,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue to enable analysis on wildcard and prefix queries. + * Set to {@code true} to enable analysis on wildcard and prefix queries. */ public QueryStringQueryBuilder analyzeWildcard(Boolean analyzeWildcard) { this.analyzeWildcard = analyzeWildcard; @@ -602,7 +602,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue to enable escaping of the query string + * Set to {@code true} to enable escaping of the query string */ public QueryStringQueryBuilder escape(boolean escape) { this.escape = escape; @@ -636,7 +636,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuildertrue. + * Defaults to {@code true}. */ public boolean autoGenerateSynonymsPhraseQuery() { return autoGenerateSynonymsPhraseQuery; diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 48f7f9335ab..6223254874d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -236,7 +236,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i } /** - * Should the lower bound be included or not. Defaults to true. + * Should the lower bound be included or not. Defaults to {@code true}. */ public RangeQueryBuilder includeLower(boolean includeLower) { this.includeLower = includeLower; @@ -251,7 +251,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i } /** - * Should the upper bound be included or not. Defaults to true. + * Should the upper bound be included or not. Defaults to {@code true}. */ public RangeQueryBuilder includeUpper(boolean includeUpper) { this.includeUpper = includeUpper; diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java b/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java index e00c19b68b5..669c885276f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpFlag.java @@ -26,13 +26,13 @@ import java.util.Locale; /** * Regular expression syntax flags. Each flag represents optional syntax support in the regular expression: *

      - *
    • INTERSECTION - Support for intersection notation: <expression> & <expression>
    • - *
    • COMPLEMENT - Support for complement notation: <expression> & <expression>
    • - *
    • EMPTY - Support for the empty language symbol: #
    • - *
    • ANYSTRING - Support for the any string symbol: @
    • - *
    • INTERVAL - Support for numerical interval notation: <n-m>
    • - *
    • NONE - Disable support for all syntax options
    • - *
    • ALL - Enables support for all syntax options
    • + *
    • {@code INTERSECTION} - Support for intersection notation: {@code <expression> & <expression>}
    • + *
    • {@code COMPLEMENT} - Support for complement notation: {@code <expression> & <expression>}
    • + *
    • {@code EMPTY} - Support for the empty language symbol: {@code #}
    • + *
    • {@code ANYSTRING} - Support for the any string symbol: {@code @}
    • + *
    • {@code INTERVAL} - Support for numerical interval notation: {@code <n-m>}
    • + *
    • {@code NONE} - Disable support for all syntax options
    • + *
    • {@code ALL} - Enables support for all syntax options
    • *
    * * @see RegexpQueryBuilder#flags(RegexpFlag...) @@ -41,27 +41,27 @@ import java.util.Locale; public enum RegexpFlag { /** - * Enables intersection of the form: <expression> & <expression> + * Enables intersection of the form: {@code <expression> & <expression>} */ INTERSECTION(RegExp.INTERSECTION), /** - * Enables complement expression of the form: ~<expression> + * Enables complement expression of the form: {@code ~<expression>} */ COMPLEMENT(RegExp.COMPLEMENT), /** - * Enables empty language expression: # + * Enables empty language expression: {@code #} */ EMPTY(RegExp.EMPTY), /** - * Enables any string expression: @ + * Enables any string expression: {@code @} */ ANYSTRING(RegExp.ANYSTRING), /** - * Enables numerical interval expression: <n-m> + * Enables numerical interval expression: {@code <n-m>} */ INTERVAL(RegExp.INTERVAL), @@ -90,9 +90,9 @@ public enum RegexpFlag { * Resolves the combined OR'ed value for the given list of regular expression flags. The given flags must follow the * following syntax: *

    - * flag_name(|flag_name)* + * {@code flag_name}(|{@code flag_name})* *

    - * Where flag_name is one of the following: + * Where {@code flag_name} is one of the following: *

      *
    • INTERSECTION
    • *
    • COMPLEMENT
    • @@ -103,7 +103,7 @@ public enum RegexpFlag { *
    • ALL
    • *
    *

    - * Example: INTERSECTION|COMPLEMENT|EMPTY + * Example: {@code INTERSECTION|COMPLEMENT|EMPTY} * * @param flags A string representing a list of regular expression flags * @return The combined OR'ed value for all the flags diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index e5172219539..46a958b58fe 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -50,14 +50,14 @@ import java.util.Objects; * won't throw exceptions for any weird string syntax. It supports * the following: *

      - *
    • '{@code +}' specifies {@code AND} operation: token1+token2 - *
    • '{@code |}' specifies {@code OR} operation: token1|token2 - *
    • '{@code -}' negates a single token: -token0 - *
    • '{@code "}' creates phrases of terms: "term1 term2 ..." - *
    • '{@code *}' at the end of terms specifies prefix query: term* - *
    • '{@code (}' and '{@code)}' specifies precedence: token1 + (token2 | token3) - *
    • '{@code ~}N' at the end of terms specifies fuzzy query: term~1 - *
    • '{@code ~}N' at the end of phrases specifies near/slop query: "term1 term2"~5 + *
    • '{@code +}' specifies {@code AND} operation: {@code token1+token2} + *
    • '{@code |}' specifies {@code OR} operation: {@code token1|token2} + *
    • '{@code -}' negates a single token: {@code -token0} + *
    • '{@code "}' creates phrases of terms: {@code "term1 term2 ..."} + *
    • '{@code *}' at the end of terms specifies prefix query: {@code term*} + *
    • '{@code (}' and '{@code)}' specifies precedence: {@code token1 + (token2 | token3)} + *
    • '{@code ~}N' at the end of terms specifies fuzzy query: {@code term~1} + *
    • '{@code ~}N' at the end of phrases specifies near/slop query: {@code "term1 term2"~5} *
    *

    * See: {@link SimpleQueryStringQueryParser} for more information. @@ -405,7 +405,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuildertrue. + * Defaults to {@code true}. */ public boolean autoGenerateSynonymsPhraseQuery() { return settings.autoGenerateSynonymsPhraseQuery(); diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index 351cddd5900..96782dac0b8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -42,12 +42,12 @@ import java.io.IOException; import java.util.Objects; /** - * Implements the wildcard search query. Supported wildcards are *, which - * matches any character sequence (including the empty one), and ?, + * Implements the wildcard search query. Supported wildcards are {@code *}, which + * matches any character sequence (including the empty one), and {@code ?}, * which matches any single character. Note this query can be slow, as it * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries, - * a Wildcard term should not start with one of the wildcards * or - * ?. + * a Wildcard term should not start with one of the wildcards {@code *} or + * {@code ?}. */ public class WildcardQueryBuilder extends AbstractQueryBuilder implements MultiTermQueryBuilder { public static final String NAME = "wildcard"; @@ -63,12 +63,12 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder*, which - * matches any character sequence (including the empty one), and ?, + * Implements the wildcard search query. Supported wildcards are {@code *}, which + * matches any character sequence (including the empty one), and {@code ?}, * which matches any single character. Note this query can be slow, as it * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries, - * a Wildcard term should not start with one of the wildcards * or - * ?. + * a Wildcard term should not start with one of the wildcards {@code *} or + * {@code ?}. * * @param fieldName The field name * @param value The wildcard query string diff --git a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java index 20f87e047b6..aa8543175d9 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/DeleteByQueryRequest.java @@ -33,13 +33,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * * Delete-by-query now has the following semantics: *

      - *
    • it's non-atomic, a delete-by-query may fail at any time while some documents matching the query have already been + *
    • it's {@code non-atomic}, a delete-by-query may fail at any time while some documents matching the query have already been * deleted
    • - *
    • it's syntactic sugar, a delete-by-query is equivalent to a scroll search and corresponding bulk-deletes by ID
    • - *
    • it's executed on a point-in-time snapshot, a delete-by-query will only delete the documents that are visible at the + *
    • it's {@code syntactic sugar}, a delete-by-query is equivalent to a scroll search and corresponding bulk-deletes by ID
    • + *
    • it's executed on a {@code point-in-time} snapshot, a delete-by-query will only delete the documents that are visible at the * point in time the delete-by-query was started, equivalent to the scroll API
    • - *
    • it's consistent, a delete-by-query will yield consistent results across all replicas of a shard
    • - *
    • it's forward-compatible, a delete-by-query will only send IDs to the shards as deletes such that no queries are + *
    • it's {@code consistent}, a delete-by-query will yield consistent results across all replicas of a shard
    • + *
    • it's {@code forward-compatible}, a delete-by-query will only send IDs to the shards as deletes such that no queries are * stored in the transaction logs that might not be supported in the future.
    • *
    • it's results won't be visible until the index is refreshed.
    • *
    diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index 2f221aa0244..912e03ca799 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -359,7 +359,7 @@ public class SimpleQueryStringQueryParser extends SimpleQueryParser { /** * Whether phrase queries should be automatically generated for multi terms synonyms. - * Defaults to true. + * Defaults to {@code true}. */ public boolean autoGenerateSynonymsPhraseQuery() { return autoGenerateSynonymsPhraseQuery; diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java index 63697702910..342b638db31 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java @@ -42,7 +42,7 @@ public final class ShardSearchStats implements SearchOperationListener { /** * Returns the stats, including group specific stats. If the groups are null/0 length, then nothing * is returned for them. If they are set, then only groups provided will be returned, or - * _all for all groups. + * {@code _all} for all groups. */ public SearchStats stats(String... groups) { SearchStats.Stats total = totalStats.stats(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 92240e9b463..60392ab7990 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1381,7 +1381,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Returns true if this shard can ignore a recovery attempt made to it (since the already doing/done it) + * Returns {@code true} if this shard can ignore a recovery attempt made to it (since the already doing/done it) */ public boolean ignoreRecoveryAttempt() { IndexShardState state = state(); // one time volatile read diff --git a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java index e7b7b719aed..6579fca8996 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -42,7 +42,7 @@ final class InternalIndexingStats implements IndexingOperationListener { /** * Returns the stats, including type specific stats. If the types are null/0 length, then nothing * is returned for them. If they are set, then only types provided will be returned, or - * _all for all types. + * {@code _all} for all types. */ IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... types) { IndexingStats.Stats total = totalStats.stats(isThrottled, currentThrottleInMillis); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 83fded4a1f1..de29386022c 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -399,8 +399,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } /** - * Tries to increment the refCount of this Store instance. This method will return true iff the refCount was - * incremented successfully otherwise false. RefCounts are used to determine when a + * Tries to increment the refCount of this Store instance. This method will return {@code true} iff the refCount was + * incremented successfully otherwise {@code false}. RefCounts are used to determine when a * Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a * corresponding {@link #decRef}, in a finally clause; otherwise the store may never be closed. Note that * {@link #close} simply calls decRef(), which means that the Store will not really be closed until {@link @@ -767,7 +767,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * For backwards compatibility the snapshot might include legacy checksums that * are derived from a dedicated checksum file written by older elasticsearch version pre 1.3 *

    - * Note: This class will ignore the segments.gen file since it's optional and might + * Note: This class will ignore the {@code segments.gen} file since it's optional and might * change concurrently for safety reasons. * * @see StoreFileMetaData @@ -977,22 +977,22 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref *

      *
    • all files in this segment have the same checksum
    • *
    • all files in this segment have the same length
    • - *
    • the segments .si files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the .si file content as it's hash
    • + *
    • the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code .si} file content as it's hash
    • *
    *

    - * The .si file contains a lot of diagnostics including a timestamp etc. in the future there might be + * The {@code .si} file contains a lot of diagnostics including a timestamp etc. in the future there might be * unique segment identifiers in there hardening this method further. *

    - * The per-commit files handles very similar. A commit is composed of the segments_N files as well as generational files like - * deletes (_x_y.del) or field-info (_x_y.fnm) files. On a per-commit level files for a commit are treated + * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files like + * deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated * as identical iff: *

      *
    • all files belonging to this commit have the same checksum
    • *
    • all files belonging to this commit have the same length
    • - *
    • the segments file segments_N files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the segments_N file content as it's hash
    • + *
    • the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code segments_N} file content as it's hash
    • *
    *

    - * NOTE: this diff will not contain the segments.gen file. This file is omitted on recovery. + * NOTE: this diff will not contain the {@code segments.gen} file. This file is omitted on recovery. */ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { final List identical = new ArrayList<>(); @@ -1390,8 +1390,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } /** - * Marks this store as corrupted. This method writes a corrupted_${uuid} file containing the given exception - * message. If a store contains a corrupted_${uuid} file {@link #isMarkedCorrupted()} will return true. + * Marks this store as corrupted. This method writes a {@code corrupted_${uuid}} file containing the given exception + * message. If a store contains a {@code corrupted_${uuid}} file {@link #isMarkedCorrupted()} will return true. */ public void markStoreCorrupted(IOException exception) throws IOException { ensureOpen(); diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index 908063173c2..97310c0f65d 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -126,7 +126,7 @@ public class StoreFileMetaData implements Writeable { /** * Returns a variable length hash of the file represented by this metadata object. This can be the file - * itself if the file is small enough. If the length of the hash is 0 no hash value is available + * itself if the file is small enough. If the length of the hash is {@code 0} no hash value is available */ public BytesRef hash() { return hash; diff --git a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java index 7ea241958f8..1b095beddb4 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/MultiSnapshot.java @@ -87,7 +87,7 @@ final class MultiSnapshot implements Translog.Snapshot { private final LongObjectHashMap bitSets = new LongObjectHashMap<>(); /** - * Marks this sequence number and returns true if it is seen before. + * Marks this sequence number and returns {@code true} if it is seen before. */ boolean getAndSet(long value) { assert value >= 0; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index cc5041bf244..9d8c6c7c093 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -78,15 +78,15 @@ import java.util.stream.Stream; * different engine. *

    * Each Translog has only one translog file open for writes at any time referenced by a translog generation ID. This ID is written to a - * translog.ckp file that is designed to fit in a single disk block such that a write of the file is atomic. The checkpoint file + * {@code translog.ckp} file that is designed to fit in a single disk block such that a write of the file is atomic. The checkpoint file * is written on each fsync operation of the translog and records the number of operations written, the current translog's file generation, * its fsynced offset in bytes, and other important statistics. *

    *

    * When the current translog file reaches a certain size ({@link IndexSettings#INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING}, or when * a clear separation between old and new operations (upon change in primary term), the current file is reopened for read only and a new - * write only file is created. Any non-current, read only translog file always has a translog-${gen}.ckp associated with it - * which is an fsynced copy of its last translog.ckp such that in disaster recovery last fsynced offsets, number of + * write only file is created. Any non-current, read only translog file always has a {@code translog-${gen}.ckp} associated with it + * which is an fsynced copy of its last {@code translog.ckp} such that in disaster recovery last fsynced offsets, number of * operation etc. are still preserved. *

    */ diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 61b009d9b41..a8acf586839 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -89,7 +89,7 @@ public final class TranslogConfig { } /** - * The translog buffer size. Default is 8kb + * The translog buffer size. Default is {@code 8kb} */ public ByteSizeValue getBufferSize() { return bufferSize; diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 83470312a96..5cdac123655 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -249,7 +249,7 @@ public class IndicesService extends AbstractLifecycleComponent } /** - * Returns the node stats indices stats. The includePrevious flag controls + * Returns the node stats indices stats. The {@code includePrevious} flag controls * if old shards stats will be aggregated as well (only for relevant stats, such as * refresh and indexing, not for docs/store). */ @@ -1248,7 +1248,7 @@ public class IndicesService extends AbstractLifecycleComponent } /** - * Returns a new {@link QueryRewriteContext} with the given now provider + * Returns a new {@link QueryRewriteContext} with the given {@code now} provider */ public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) { return new QueryRewriteContext(xContentRegistry, namedWriteableRegistry, client, nowInMillis); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index cb7586b3d02..b54f63f635f 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -173,7 +173,7 @@ import java.util.stream.Stream; import static java.util.stream.Collectors.toList; /** - * A node represent a node within a cluster (cluster.name). The {@link #client()} can be used + * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. */ public class Node implements Closeable { @@ -849,7 +849,7 @@ public class Node implements Closeable { /** - * Returns true if the node is closed. + * Returns {@code true} if the node is closed. */ public boolean isClosed() { return lifecycle.closed(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d4e2323451a..df660c02e18 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1324,7 +1324,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire .si and segments.n files to strengthen the + * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index bbb88b5fff6..cd8ea4f8c34 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -227,7 +227,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterablenull). + * The source of the document as string (can be {@code null}). */ public String getSourceAsString() { if (source == null) { @@ -242,7 +242,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterablenull). + * The source of the document as a map (can be {@code null}). */ public Map getSourceAsMap() { if (source == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index ed7f98c3b0b..8b4a3795c07 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1031,7 +1031,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } /** - * Returns a new {@link QueryRewriteContext} with the given now provider + * Returns a new {@link QueryRewriteContext} with the given {@code now} provider */ public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) { return indicesService.getRewriteContext(nowInMillis); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 9a47635416a..88cc7319948 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -206,7 +206,7 @@ public abstract class AggregatorFactory> { * parent will be {@code null}) * @param collectsFromSingleBucket * If true then the created aggregator will only be collected - * with 0 as a bucket ordinal. Some factories can take + * with {@code 0} as a bucket ordinal. Some factories can take * advantage of this in order to return more optimized * implementations. * @@ -222,7 +222,7 @@ public abstract class AggregatorFactory> { /** * Utility method. Given an {@link AggregatorFactory} that creates - * {@link Aggregator}s that only know how to collect bucket 0, this + * {@link Aggregator}s that only know how to collect bucket {@code 0}, this * returns an aggregator that can collect any bucket. */ protected static Aggregator asMultiBucketAggregator(final AggregatorFactory factory, final SearchContext context, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 7f6e74e68b2..98cadd4aefa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -61,7 +61,7 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable /** * Returns true iff the current reduce phase is the final reduce phase. This indicates if operations like - * pipeline aggregations should be applied or if specific features like minDocCount should be taken into account. + * pipeline aggregations should be applied or if specific features like {@code minDocCount} should be taken into account. * Operations that are potentially loosing information can only be applied during the final reduce phase. */ public boolean isFinalReduce() { @@ -77,7 +77,7 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable } /** - * Adds count buckets to the global count for the request and fails if this number is greater than + * Adds {@code count} buckets to the global count for the request and fails if this number is greater than * the maximum number of buckets allowed in a response */ public void consumeBucketsAndMaybeBreak(int size) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 02cf3adf88a..504758e7a4e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -112,7 +112,7 @@ public abstract class BucketsAggregator extends AggregatorBase { } /** - * Adds count buckets to the global count for the request and fails if this number is greater than + * Adds {@code count} buckets to the global count for the request and fails if this number is greater than * the maximum number of buckets allowed in a response */ protected final void consumeBucketsAndMaybeBreak(int count) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 0912555ea71..8a0b4eedfed 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -129,7 +129,7 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuildernull. + * Defaults to {@code null}. */ public CompositeAggregationBuilder aggregateAfter(Map afterKey) { this.after = afterKey; @@ -137,7 +137,7 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder10. + * The number of composite buckets to return. Defaults to {@code 10}. */ public CompositeAggregationBuilder size(int size) { this.size = size; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index d8f4c28fee4..1dfe70d4b7f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -517,7 +517,7 @@ public final class HyperLogLogPlusPlus implements Releasable { /** * Add k to the hash table associated with bucket. - * Return -1 if the value was already in the set or the new set size if it was added. + * Return {@code -1} if the value was already in the set or the new set size if it was added. */ public int add(long bucket, int k) { sizes = bigArrays.grow(sizes, bucket + 1); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index c11c68f9b25..0bc759a0d47 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -103,7 +103,7 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Set the init script. + * Set the {@code init} script. */ public ScriptedMetricAggregationBuilder initScript(Script initScript) { if (initScript == null) { @@ -114,14 +114,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Get the init script. + * Get the {@code init} script. */ public Script initScript() { return initScript; } /** - * Set the map script. + * Set the {@code map} script. */ public ScriptedMetricAggregationBuilder mapScript(Script mapScript) { if (mapScript == null) { @@ -132,14 +132,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Get the map script. + * Get the {@code map} script. */ public Script mapScript() { return mapScript; } /** - * Set the combine script. + * Set the {@code combine} script. */ public ScriptedMetricAggregationBuilder combineScript(Script combineScript) { if (combineScript == null) { @@ -150,14 +150,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Get the combine script. + * Get the {@code combine} script. */ public Script combineScript() { return combineScript; } /** - * Set the reduce script. + * Set the {@code reduce} script. */ public ScriptedMetricAggregationBuilder reduceScript(Script reduceScript) { if (reduceScript == null) { @@ -168,15 +168,15 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Get the reduce script. + * Get the {@code reduce} script. */ public Script reduceScript() { return reduceScript; } /** - * Set parameters that will be available in the init, - * map and combine phases. + * Set parameters that will be available in the {@code init}, + * {@code map} and {@code combine} phases. */ public ScriptedMetricAggregationBuilder params(Map params) { if (params == null) { @@ -187,8 +187,8 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } /** - * Get parameters that will be available in the init, - * map and combine phases. + * Get parameters that will be available in the {@code init}, + * {@code map} and {@code combine} phases. */ public Map params() { return params; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java index 39e184f557d..a528814e289 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregationBuilder.java @@ -176,7 +176,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder0. + * From index to start the search from. Defaults to {@code 0}. */ public TopHitsAggregationBuilder from(int from) { if (from < 0) { @@ -194,7 +194,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder10. + * The number of search hits to return. Defaults to {@code 10}. */ public TopHitsAggregationBuilder size(int size) { if (size < 0) { @@ -535,7 +535,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilderfalse. + * Defaults to {@code false}. */ public TopHitsAggregationBuilder trackScores(boolean trackScores) { this.trackScores = trackScores; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index 995381373ab..3566792b497 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -41,7 +41,7 @@ import java.util.List; * the sort-by value. *

    * The path has the following form: - *

    {@code ['>'*]['.']}
    + * {@code ['>'*]['.']} *

    * Examples: * diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 582c6ca28f8..c4a6b3da6b1 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -330,7 +330,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R } /** - * From index to start the search from. Defaults to 0. + * From index to start the search from. Defaults to {@code 0}. */ public SearchSourceBuilder from(int from) { if (from < 0) { @@ -348,7 +348,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R } /** - * The number of search hits to return. Defaults to 10. + * The number of search hits to return. Defaults to {@code 10}. */ public SearchSourceBuilder size(int size) { if (size < 0) { @@ -496,7 +496,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R /** * Applies when sorting, and controls if scores will be tracked as well. - * Defaults to false. + * Defaults to {@code false}. */ public SearchSourceBuilder trackScores(boolean trackScores) { this.trackScores = trackScores; @@ -526,7 +526,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R * The sort values that indicates which docs this request should "search after". * The sort values of the search_after must be equal to the number of sort fields in the query and they should be * of the same type (or parsable as such). - * Defaults to null. + * Defaults to {@code null}. */ public Object[] searchAfter() { if (searchAfterBuilder == null) { @@ -639,7 +639,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R } /** - * Should the query be profiled. Defaults to false + * Should the query be profiled. Defaults to {@code false} */ public SearchSourceBuilder profile(boolean profile) { this.profile = profile; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index f8618db3048..e59bd718d32 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -286,8 +286,8 @@ public abstract class AbstractHighlighterBuilderunified, plain and fvj. - * Defaults to unified. + * are {@code unified}, {@code plain} and {@code fvj}. + * Defaults to {@code unified}. * Details of the different highlighter types are covered in the reference guide. */ @SuppressWarnings("unchecked") @@ -305,7 +305,7 @@ public abstract class AbstractHighlighterBuilderhighlighter. + * This option is only applicable when using the plain highlighterType {@code highlighter}. * Permitted values are "simple" or "span" relating to {@link SimpleFragmenter} and * {@link SimpleSpanFragmenter} implementations respectively with the default being "span" */ @@ -340,8 +340,8 @@ public abstract class AbstractHighlighterBuilderscore, which then it will be ordered - * by score of the fragments, or none. + * highlighted text. Can be {@code score}, which then it will be ordered + * by score of the fragments, or {@code none}. */ public HB order(String order) { return order(Order.fromString(order)); @@ -365,9 +365,9 @@ public abstract class AbstractHighlighterBuilderfvh + * Set this to true when using the highlighterType {@code fvh} * and you want to provide highlighting on filter clauses in your - * query. Default is false. + * query. Default is {@code false}. */ @SuppressWarnings("unchecked") public HB highlightFilter(Boolean highlightFilter) { @@ -383,7 +383,7 @@ public abstract class AbstractHighlighterBuilderfvh this setting + * When using the highlighterType {@code fvh} this setting * controls which scanner to use for fragment boundaries, and defaults to "simple". */ @SuppressWarnings("unchecked") @@ -393,7 +393,7 @@ public abstract class AbstractHighlighterBuilderfvh this setting + * When using the highlighterType {@code fvh} this setting * controls which scanner to use for fragment boundaries, and defaults to "simple". */ @SuppressWarnings("unchecked") @@ -410,7 +410,7 @@ public abstract class AbstractHighlighterBuilderfvh this setting + * When using the highlighterType {@code fvh} this setting * controls how far to look for boundary characters, and defaults to 20. */ @SuppressWarnings("unchecked") @@ -427,7 +427,7 @@ public abstract class AbstractHighlighterBuilderfvh this setting + * When using the highlighterType {@code fvh} this setting * defines what constitutes a boundary for highlighting. It’s a single string with * each boundary character defined in it. It defaults to .,!? \t\n */ @@ -445,7 +445,7 @@ public abstract class AbstractHighlighterBuilderfvh and boundaryScannerType break_iterator, this setting + * When using the highlighterType {@code fvh} and boundaryScannerType {@code break_iterator}, this setting * controls the locale to use by the BreakIterator, defaults to "root". */ @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index ff332c7d734..049de439ac7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -60,7 +60,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilderfvh should provide highlighting on filter clauses */ + /** default for whether {@code fvh} should provide highlighting on filter clauses */ public static final boolean DEFAULT_HIGHLIGHT_FILTER = false; /** default for highlight fragments being ordered by score */ public static final boolean DEFAULT_SCORE_ORDERED = false; @@ -79,14 +79,14 @@ public class HighlightBuilder extends AbstractHighlighterBuilder"}; - /** the default opening tags when tag_schema = "styled" */ + /** the default opening tags when {@code tag_schema = "styled"} */ public static final String[] DEFAULT_STYLED_PRE_TAG = { "", "", "", "", "", "", "", "", "", "" }; - /** the default closing tags when tag_schema = "styled" */ + /** the default closing tags when {@code tag_schema = "styled"} */ public static final String[] DEFAULT_STYLED_POST_TAGS = {""}; /** @@ -198,7 +198,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilderstyled and default. + * are {@code styled} and {@code default}. * * @param schemaName The tag scheme name */ @@ -220,7 +220,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilderstyled and default. + * are {@code styled} and {@code default}. * * @param encoder name */ diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java index 6030d3863d8..8b839a7e8c0 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightField.java @@ -68,14 +68,14 @@ public class HighlightField implements ToXContentFragment, Streamable { } /** - * The highlighted fragments. null if failed to highlight (for example, the field is not stored). + * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). */ public Text[] fragments() { return fragments; } /** - * The highlighted fragments. null if failed to highlight (for example, the field is not stored). + * The highlighted fragments. {@code null} if failed to highlight (for example, the field is not stored). */ public Text[] getFragments() { return fragments(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index fce1e323fa7..a7eb0a953ba 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -52,7 +52,7 @@ import java.util.Set; */ public class ContextIndexSearcher extends IndexSearcher implements Releasable { - /** The wrapped {@link IndexSearcher}. The reason why we sometimes prefer delegating to this searcher instead of super is that + /** The wrapped {@link IndexSearcher}. The reason why we sometimes prefer delegating to this searcher instead of {@code super} is that * this instance may have more assertions, for example if it comes from MockInternalEngine which wraps the IndexSearcher into an * AssertingIndexSearcher. */ private final IndexSearcher in; diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 75290a75a8a..70a52c39ee1 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -243,7 +243,7 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract SearchContext trackTotalHits(boolean trackTotalHits); /** - * Indicates if the total hit count for the query should be tracked. Defaults to true + * Indicates if the total hit count for the query should be tracked. Defaults to {@code true} */ public abstract boolean trackTotalHits(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 0a1513e17d0..6f00740487e 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -103,7 +103,7 @@ public interface ShardSearchRequest { * Returns the filter associated with listed filtering aliases. *

    * The list of filtering aliases should be obtained by calling MetaData.filteringAliases. - * Returns null if no filtering is required.

    + * Returns {@code null} if no filtering is required.

    */ static QueryBuilder parseAliasFilter(CheckedFunction filterParser, IndexMetaData metaData, String... aliasNames) { diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java index da506959ce4..5690e584eac 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java @@ -110,7 +110,7 @@ public class QueryRescorerBuilder extends RescorerBuilder } /** - * Sets the original query weight for rescoring. The default is 1.0 + * Sets the original query weight for rescoring. The default is {@code 1.0} */ public QueryRescorerBuilder setQueryWeight(float queryWeight) { this.queryWeight = queryWeight; @@ -119,14 +119,14 @@ public class QueryRescorerBuilder extends RescorerBuilder /** - * Gets the original query weight for rescoring. The default is 1.0 + * Gets the original query weight for rescoring. The default is {@code 1.0} */ public float getQueryWeight() { return this.queryWeight; } /** - * Sets the original query weight for rescoring. The default is 1.0 + * Sets the original query weight for rescoring. The default is {@code 1.0} */ public QueryRescorerBuilder setRescoreQueryWeight(float rescoreQueryWeight) { this.rescoreQueryWeight = rescoreQueryWeight; @@ -134,7 +134,7 @@ public class QueryRescorerBuilder extends RescorerBuilder } /** - * Gets the original query weight for rescoring. The default is 1.0 + * Gets the original query weight for rescoring. The default is {@code 1.0} */ public float getRescoreQueryWeight() { return this.rescoreQueryWeight; @@ -149,7 +149,7 @@ public class QueryRescorerBuilder extends RescorerBuilder } /** - * Gets the original query score mode. The default is total + * Gets the original query score mode. The default is {@code total} */ public QueryRescoreMode getScoreMode() { return this.scoreMode; diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 529cb4e86ac..6a64b1c0cc9 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -145,8 +145,8 @@ public class FieldSortBuilder extends SortBuilder { } /** - * Sets the value when a field is missing in a doc. Can also be set to _last or - * _first to sort missing last or first respectively. + * Sets the value when a field is missing in a doc. Can also be set to {@code _last} or + * {@code _first} to sort missing last or first respectively. */ public FieldSortBuilder missing(Object missing) { this.missing = missing; @@ -162,7 +162,7 @@ public class FieldSortBuilder extends SortBuilder { * Set the type to use in case the current field is not mapped in an index. * Specifying a type tells Elasticsearch what type the sort values should * have, which is important for cross-index search, if there are sort fields - * that exist on some indices only. If the unmapped type is null + * that exist on some indices only. If the unmapped type is {@code null} * then query execution will fail if one or more indices don't have a * mapping for the current field. */ diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index 4f5c3b789f8..3102ddd0e77 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -231,7 +231,7 @@ public class CompletionSuggestionBuilder extends SuggestionBuilderfalse. + * Should duplicates be filtered or not. Defaults to {@code false}. */ public CompletionSuggestionBuilder skipDuplicates(boolean skipDuplicates) { this.skipDuplicates = skipDuplicates; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index 41de330bed6..7b7584f4674 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -156,7 +156,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator * specified. This value will be compared to the string distance result * of each candidate spelling correction. *

    - * Default is 0.5 + * Default is {@code 0.5} */ public DirectCandidateGeneratorBuilder accuracy(float accuracy) { this.accuracy = accuracy; @@ -233,7 +233,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator * Sets the maximum edit distance candidate suggestions can have in * order to be considered as a suggestion. Can only be a value between 1 * and 2. Any other value result in an bad request error being thrown. - * Defaults to 2. + * Defaults to {@code 2}. */ public DirectCandidateGeneratorBuilder maxEdits(Integer maxEdits) { if (maxEdits < 1 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { @@ -250,7 +250,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator /** * A factor that is used to multiply with the size in order to inspect * more candidate suggestions. Can improve accuracy at the cost of - * performance. Defaults to 5. + * performance. Defaults to {@code 5}. */ public DirectCandidateGeneratorBuilder maxInspections(Integer maxInspections) { this.maxInspections = maxInspections; @@ -266,7 +266,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator * can exist in order to be corrected. Can be a relative percentage * number (e.g 0.4) or an absolute number to represent document * frequencies. If an value higher than 1 is specified then fractional - * can not be specified. Defaults to 0.01. + * can not be specified. Defaults to {@code 0.01}. *

    * This can be used to exclude high frequency terms from being * suggested. High frequency terms are usually spelled correctly on top @@ -298,7 +298,7 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator /** * The minimum length a suggest text term must have in order to be - * corrected. Defaults to 4. + * corrected. Defaults to {@code 4}. */ public DirectCandidateGeneratorBuilder minWordLength(int minWordLength) { this.minWordLength = minWordLength; diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 4f20fad38cf..9b87e442ad6 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -55,7 +55,7 @@ import java.util.Objects; import java.util.Set; /** - * Defines the actual suggest command for phrase suggestions ( phrase). + * Defines the actual suggest command for phrase suggestions ( {@code phrase}). */ public class PhraseSuggestionBuilder extends SuggestionBuilder { @@ -185,8 +185,8 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder1 corresponding to unigrams. Use - * 2 for bigrams and 3 for trigrams. + * default value is {@code 1} corresponding to {@code unigrams}. Use + * {@code 2} for {@code bigrams} and {@code 3} for {@code trigrams}. */ public PhraseSuggestionBuilder gramSize(int gramSize) { if (gramSize < 1) { @@ -207,9 +207,9 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder>=1 as an absolute number of query terms. + * number {@code >=1} as an absolute number of query terms. * - * The default is set to 1.0 which corresponds to that only + * The default is set to {@code 1.0} which corresponds to that only * corrections with at most 1 misspelled term are returned. */ public PhraseSuggestionBuilder maxErrors(float maxErrors) { @@ -246,7 +246,7 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder0.95 corresponding to 5% or + * in the dictionary. The default it {@code 0.95} corresponding to 5% or * the real words are misspelled. */ public PhraseSuggestionBuilder realWordErrorLikelihood(float realWordErrorLikelihood) { @@ -269,9 +269,9 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder1.0 will only return suggestions that score - * higher than the input phrase. If set to 0.0 the top N candidates - * are returned. The default is 1.0 + * confidence level of {@code 1.0} will only return suggestions that score + * higher than the input phrase. If set to {@code 0.0} the top N candidates + * are returned. The default is {@code 1.0} */ public PhraseSuggestionBuilder confidence(float confidence) { if (confidence < 0.0) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index 5d004dc8247..5d880c55ff0 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -164,7 +164,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder - * Default is 0.5 + * Default is {@code 0.5} */ public TermSuggestionBuilder accuracy(float accuracy) { if (accuracy < 0.0f || accuracy > 1.0f) { @@ -240,7 +240,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder2. + * {@code 2}. */ public TermSuggestionBuilder maxEdits(int maxEdits) { if (maxEdits < 1 || maxEdits > 2) { @@ -260,7 +260,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder5. + * Defaults to {@code 5}. */ public TermSuggestionBuilder maxInspections(int maxInspections) { if (maxInspections < 0) { @@ -282,7 +282,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder0.01. + * to {@code 0.01}. *

    * This can be used to exclude high frequency terms from being suggested. * High frequency terms are usually spelled correctly on top of this this @@ -329,7 +329,7 @@ public class TermSuggestionBuilder extends SuggestionBuilder4. + * corrected. Defaults to {@code 4}. */ public TermSuggestionBuilder minWordLength(int minWordLength) { if (minWordLength < 1) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 5de0d5e62dd..fe001e5ad19 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -78,9 +78,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl /** * The name of a node attribute to select nodes that should be connected to in the remote cluster. - * For instance a node can be configured with node.attr.gateway: true in order to be eligible as a gateway node between - * clusters. In that case search.remote.node.attr: gateway can be used to filter out other nodes in the remote cluster. - * The value of the setting is expected to be a boolean, true for nodes that can become gateways, false otherwise. + * For instance a node can be configured with {@code node.attr.gateway: true} in order to be eligible as a gateway node between + * clusters. In that case {@code search.remote.node.attr: gateway} can be used to filter out other nodes in the remote cluster. + * The value of the setting is expected to be a boolean, {@code true} for nodes that can become gateways, {@code false} otherwise. */ public static final Setting REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr", Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 4697ee6fbdd..033b72d04d9 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -1812,7 +1812,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements } /** - * Representation of a transport profile settings for a transport.profiles.$profilename.* + * Representation of a transport profile settings for a {@code transport.profiles.$profilename.*} */ public static final class ProfileSettings { public final String profileName; diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index b3471b942da..6ef698f1740 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -59,7 +59,7 @@ public interface Transport extends LifecycleComponent { TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; /** - * Returns true if the node is connected. + * Returns {@code true} if the node is connected. */ boolean nodeConnected(DiscoveryNode node); From 62f2918abce67af35cb7a2fd08f8b143e3cd21b6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 09:00:35 -0700 Subject: [PATCH 09/30] Added changelog entry for deb prerelease version change (#30184) This commit adds a changelog entry for the change in #29000. --- docs/CHANGELOG.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index a880a3c423e..7fc5c48d73a 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -59,6 +59,10 @@ ones that the user is authorized to access in case field level security is enabl [float] === Bug Fixes +Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions +({pull}29000[#29000]) + +=== Regressions Fail snapshot operations early when creating or deleting a snapshot on a repository that has been written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) From af45b4dee4110e3e350a77adfe12ee0ba40fd6d1 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 2 May 2018 18:01:29 +0200 Subject: [PATCH 10/30] Cancelling a peer recovery on the source can leak a primary permit (#30318) The code in `SourceRecoveryHandler` runs under a `CancellableThreads` instance in order to allow long running operations to be interrupted when the recovery is cancelled. Sadly if this happens at just the wrong moment while acquiring a permit from the primary, that primary can be leaked and never be freed. Note that this is slightly better than it sounds - we only cancel recoveries on the source side if the primary shard itself is closed. Relates to https://github.com/elastic/elasticsearch/pull/30316 --- .../recovery/RecoverySourceHandler.java | 50 ++++++++++++++----- .../recovery/RecoverySourceHandlerTests.java | 25 ++++++++++ 2 files changed, 63 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 78f44ee7231..4c543aeeb22 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -29,10 +29,9 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; @@ -44,6 +43,8 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; @@ -67,6 +68,7 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.Locale; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.function.Supplier; @@ -142,7 +144,7 @@ public class RecoverySourceHandler { throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); } assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; - }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered "); + }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ", shard, cancellableThreads, logger); try (Closeable ignored = shard.acquireTranslogRetentionLock()) { final long startingSeqNo; @@ -196,7 +198,7 @@ public class RecoverySourceHandler { * all documents up to maxSeqNo in phase2. */ runUnderPrimaryPermit(() -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId()); + shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); /* @@ -227,17 +229,41 @@ public class RecoverySourceHandler { return targetHistoryUUID != null && targetHistoryUUID.equals(shard.getHistoryUUID()); } - private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, String reason) { + static void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, String reason, + IndexShard primary, CancellableThreads cancellableThreads, Logger logger) { cancellableThreads.execute(() -> { - final PlainActionFuture onAcquired = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); - try (Releasable ignored = onAcquired.actionGet()) { + CompletableFuture permit = new CompletableFuture<>(); + final ActionListener onAcquired = new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + if (permit.complete(releasable) == false) { + releasable.close(); + } + } + + @Override + public void onFailure(Exception e) { + permit.completeExceptionally(e); + } + }; + primary.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); + try (Releasable ignored = FutureUtils.get(permit)) { // check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent // races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated() - if (shard.isPrimaryMode() == false) { - throw new IndexShardRelocatedException(shard.shardId()); + if (primary.isPrimaryMode() == false) { + throw new IndexShardRelocatedException(primary.shardId()); } runnable.run(); + } finally { + // just in case we got an exception (likely interrupted) while waiting for the get + permit.whenComplete((r, e) -> { + if (r != null) { + r.close(); + } + if (e != null) { + logger.trace("suppressing exception on completion (it was already bubbled up or the operation was aborted)", e); + } + }); } }); } @@ -489,11 +515,11 @@ public class RecoverySourceHandler { * the permit then the state of the shard will be relocated and this recovery will fail. */ runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), - shardId + " marking " + request.targetAllocationId() + " as in sync"); + shardId + " marking " + request.targetAllocationId() + " as in sync", shard, cancellableThreads, logger); final long globalCheckpoint = shard.getGlobalCheckpoint(); cancellableThreads.executeIO(() -> recoveryTarget.finalizeRecovery(globalCheckpoint)); runUnderPrimaryPermit(() -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), - shardId + " updating " + request.targetAllocationId() + "'s global checkpoint"); + shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", shard, cancellableThreads, logger); if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index babf8518d44..5ade55ef534 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; @@ -439,6 +440,30 @@ public class RecoverySourceHandlerTests extends ESTestCase { assertFalse(phase2Called.get()); } + public void testCancellationsDoesNotLeakPrimaryPermits() throws Exception { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final IndexShard shard = mock(IndexShard.class); + final AtomicBoolean freed = new AtomicBoolean(true); + when(shard.isPrimaryMode()).thenReturn(true); + doAnswer(invocation -> { + freed.set(false); + ((ActionListener)invocation.getArguments()[0]).onResponse(() -> freed.set(true)); + return null; + }).when(shard).acquirePrimaryOperationPermit(any(), anyString(), anyObject()); + + Thread cancelingThread = new Thread(() -> cancellableThreads.cancel("test")); + cancelingThread.start(); + try { + RecoverySourceHandler.runUnderPrimaryPermit(() -> {}, "test", shard, cancellableThreads, logger); + } catch (CancellableThreads.ExecutionCancelledException e) { + // expected. + } + cancelingThread.join(); + // we have to use assert busy as we may be interrupted while acquiring the permit, if so we want to check + // that the permit is released. + assertBusy(() -> assertTrue(freed.get())); + } + private Store newStore(Path path) throws IOException { return newStore(path, true); } From f0e92676b13f0f5dd259f42bfec38a831a40cc2e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 09:29:35 -0700 Subject: [PATCH 11/30] Tests: Simplify VersionUtils released version splitting (#30322) This commit refactors VersionUtils.resolveReleasedVersions to be simpler, and in the process fixes the behavior to match that of VersionCollection.groovy. closes #30133 --- .../org/elasticsearch/test/VersionUtils.java | 138 ++++++++---------- .../elasticsearch/test/VersionUtilsTests.java | 66 ++++----- 2 files changed, 95 insertions(+), 109 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 766fc80ba56..792f3fba123 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -19,26 +19,25 @@ package org.elasticsearch.test; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; + import org.elasticsearch.Version; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.Random; -import java.util.stream.Collectors; - -import static java.util.Collections.singletonList; -import static java.util.Collections.unmodifiableList; - /** Utilities for selecting versions in tests */ public class VersionUtils { + /** * Sort versions that have backwards compatibility guarantees from * those that don't. Doesn't actually check whether or not the versions @@ -50,73 +49,65 @@ public class VersionUtils { * guarantees in v1 and versions without the guranteees in v2 */ static Tuple, List> resolveReleasedVersions(Version current, Class versionClass) { - List versions = Version.getDeclaredVersions(versionClass); + // group versions into major version + Map> majorVersions = Version.getDeclaredVersions(versionClass).stream() + .collect(Collectors.groupingBy(v -> (int)v.major)); + // this breaks b/c 5.x is still in version list but master doesn't care about it! + //assert majorVersions.size() == 2; + // TODO: remove oldVersions, we should only ever have 2 majors in Version + List oldVersions = majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList()); + List> previousMajor = splitByMinor(majorVersions.get((int)current.major - 1)); + List> currentMajor = splitByMinor(majorVersions.get((int)current.major)); - Version last = versions.remove(versions.size() - 1); - assert last.equals(current) : "The highest version must be the current one " - + "but was [" + last + "] and current was [" + current + "]"; - - if (current.revision != 0) { - /* If we are in a stable branch there should be no unreleased version constants - * because we don't expect to release any new versions in older branches. If there - * are extra constants then gradle will yell about it. */ - return new Tuple<>(unmodifiableList(versions), singletonList(current)); + List unreleasedVersions = new ArrayList<>(); + final List> stableVersions; + if (currentMajor.size() == 1) { + // on master branch + stableVersions = previousMajor; + // remove current + moveLastToUnreleased(currentMajor, unreleasedVersions); + } else { + // on a stable or release branch, ie N.x + stableVersions = currentMajor; + // remove the next maintenance bugfix + moveLastToUnreleased(previousMajor, unreleasedVersions); } - /* If we are on a patch release then we know that at least the version before the - * current one is unreleased. If it is released then gradle would be complaining. */ - int unreleasedIndex = versions.size() - 1; - while (true) { - if (unreleasedIndex < 0) { - throw new IllegalArgumentException("Couldn't find first non-alpha release"); + // remove next minor + Version lastMinor = moveLastToUnreleased(stableVersions, unreleasedVersions); + if (lastMinor.revision == 0) { + if (stableVersions.get(stableVersions.size() - 1).size() == 1) { + // a minor is being staged, which is also unreleased + moveLastToUnreleased(stableVersions, unreleasedVersions); } - /* We don't support backwards compatibility for alphas, betas, and rcs. But - * they were released so we add them to the released list. Usually this doesn't - * matter to consumers, but consumers that do care should filter non-release - * versions. */ - if (versions.get(unreleasedIndex).isRelease()) { - break; - } - unreleasedIndex--; + // remove the next bugfix + moveLastToUnreleased(stableVersions, unreleasedVersions); } - Version unreleased = versions.remove(unreleasedIndex); - if (unreleased.revision == 0) { - /* - * If the last unreleased version is itself a patch release then Gradle enforces that there is yet another unreleased version - * before that. However, we have to skip alpha/betas/RCs too (e.g., consider when the version constants are ..., 5.6.3, 5.6.4, - * 6.0.0-alpha1, ..., 6.0.0-rc1, 6.0.0-rc2, 6.0.0, 6.1.0 on the 6.x branch. In this case, we will have pruned 6.0.0 and 6.1.0 as - * unreleased versions, but we also need to prune 5.6.4. At this point though, unreleasedIndex will be pointing to 6.0.0-rc2, so - * we have to skip backwards until we find a non-alpha/beta/RC again. Then we can prune that version as an unreleased version - * too. - */ - do { - unreleasedIndex--; - } while (versions.get(unreleasedIndex).isRelease() == false); - Version earlierUnreleased = versions.remove(unreleasedIndex); + List releasedVersions = Stream.concat(oldVersions.stream(), + Stream.concat(previousMajor.stream(), currentMajor.stream()).flatMap(List::stream)) + .collect(Collectors.toList()); + Collections.sort(unreleasedVersions); // we add unreleased out of order, so need to sort here + return new Tuple<>(Collections.unmodifiableList(releasedVersions), Collections.unmodifiableList(unreleasedVersions)); + } - // This earlierUnreleased is either the snapshot on the minor branch lower, or its possible its a staged release. If it is a - // staged release, remove it and return it in unreleased as well. - if (earlierUnreleased.revision == 0) { - unreleasedIndex--; - Version actualUnreleasedPreviousMinor = versions.remove(unreleasedIndex); - return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(actualUnreleasedPreviousMinor, - earlierUnreleased, unreleased, current))); - } + // split the given versions into sub lists grouped by minor version + private static List> splitByMinor(List versions) { + Map> byMinor = versions.stream().collect(Collectors.groupingBy(v -> (int)v.minor)); + return byMinor.entrySet().stream().sorted(Map.Entry.comparingByKey()).map(Map.Entry::getValue).collect(Collectors.toList()); + } - return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierUnreleased, unreleased, current))); - } else if (unreleased.major == current.major) { - // need to remove one more of the last major's minor set - do { - unreleasedIndex--; - } while (unreleasedIndex > 0 && versions.get(unreleasedIndex).major == current.major); - if (unreleasedIndex > 0) { - // some of the test cases return very small lists, so its possible this is just the end of the list, if so, dont include it - Version earlierMajorsMinor = versions.remove(unreleasedIndex); - return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierMajorsMinor, unreleased, current))); - } + // move the last version of the last minor in versions to the unreleased versions + private static Version moveLastToUnreleased(List> versions, List unreleasedVersions) { + List lastMinor = new ArrayList<>(versions.get(versions.size() - 1)); + Version lastVersion = lastMinor.remove(lastMinor.size() - 1); + if (lastMinor.isEmpty()) { + versions.remove(versions.size() - 1); + } else { + versions.set(versions.size() - 1, lastMinor); } - return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(unreleased, current))); + unreleasedVersions.add(lastVersion); + return lastVersion; } private static final List RELEASED_VERSIONS; @@ -131,7 +122,7 @@ public class VersionUtils { allVersions.addAll(RELEASED_VERSIONS); allVersions.addAll(UNRELEASED_VERSIONS); Collections.sort(allVersions); - ALL_VERSIONS = unmodifiableList(allVersions); + ALL_VERSIONS = Collections.unmodifiableList(allVersions); } /** @@ -244,5 +235,4 @@ public class VersionUtils { assert compatible.size() > 0; return compatible.get(compatible.size() - 1); } - } diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 3c8b349792b..fea2fb9ad68 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -49,6 +49,7 @@ public class VersionUtilsTests extends ESTestCase { } public void testRandomVersionBetween() { + // TODO: rework this test to use a dummy Version class so these don't need to change with each release // full range Version got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), Version.CURRENT); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); @@ -61,10 +62,10 @@ public class VersionUtilsTests extends ESTestCase { assertTrue(got.onOrBefore(Version.CURRENT)); // sub range - got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, - Version.V_6_0_0_beta1); - assertTrue(got.onOrAfter(Version.V_5_0_0)); - assertTrue(got.onOrBefore(Version.V_6_0_0_beta1)); + got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1, + Version.V_6_2_4); + assertTrue(got.onOrAfter(Version.V_6_0_0_alpha1)); + assertTrue(got.onOrBefore(Version.V_6_2_4)); // unbounded lower got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_beta1); @@ -75,8 +76,8 @@ public class VersionUtilsTests extends ESTestCase { assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, null); - assertTrue(got.onOrAfter(Version.V_5_0_0)); + got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, null); + assertTrue(got.onOrAfter(Version.V_6_0_0)); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); @@ -107,6 +108,8 @@ public class VersionUtilsTests extends ESTestCase { } public static class TestReleaseBranch { + public static final Version V_4_0_0 = Version.fromString("4.0.0"); + public static final Version V_4_0_1 = Version.fromString("4.0.1"); public static final Version V_5_3_0 = Version.fromString("5.3.0"); public static final Version V_5_3_1 = Version.fromString("5.3.1"); public static final Version V_5_3_2 = Version.fromString("5.3.2"); @@ -120,19 +123,24 @@ public class VersionUtilsTests extends ESTestCase { List unreleased = t.v2(); assertThat(released, equalTo(Arrays.asList( + TestReleaseBranch.V_4_0_0, TestReleaseBranch.V_5_3_0, TestReleaseBranch.V_5_3_1, TestReleaseBranch.V_5_3_2, TestReleaseBranch.V_5_4_0))); - assertThat(unreleased, equalTo(Collections.singletonList(TestReleaseBranch.V_5_4_1))); + assertThat(unreleased, equalTo(Arrays.asList( + TestReleaseBranch.V_4_0_1, + TestReleaseBranch.V_5_4_1))); } public static class TestStableBranch { - public static final Version V_5_3_0 = Version.fromString("5.3.0"); - public static final Version V_5_3_1 = Version.fromString("5.3.1"); - public static final Version V_5_3_2 = Version.fromString("5.3.2"); - public static final Version V_5_4_0 = Version.fromString("5.4.0"); - public static final Version CURRENT = V_5_4_0; + public static final Version V_4_0_0 = Version.fromString("4.0.0"); + public static final Version V_4_0_1 = Version.fromString("4.0.1"); + public static final Version V_5_0_0 = Version.fromString("5.0.0"); + public static final Version V_5_0_1 = Version.fromString("5.0.1"); + public static final Version V_5_0_2 = Version.fromString("5.0.2"); + public static final Version V_5_1_0 = Version.fromString("5.1.0"); + public static final Version CURRENT = V_5_1_0; } public void testResolveReleasedVersionsForUnreleasedStableBranch() { Tuple, List> t = VersionUtils.resolveReleasedVersions(TestStableBranch.CURRENT, @@ -141,14 +149,18 @@ public class VersionUtilsTests extends ESTestCase { List unreleased = t.v2(); assertThat(released, equalTo(Arrays.asList( - TestStableBranch.V_5_3_0, - TestStableBranch.V_5_3_1))); + TestStableBranch.V_4_0_0, + TestStableBranch.V_5_0_0, + TestStableBranch.V_5_0_1))); assertThat(unreleased, equalTo(Arrays.asList( - TestStableBranch.V_5_3_2, - TestStableBranch.V_5_4_0))); + TestStableBranch.V_4_0_1, + TestStableBranch.V_5_0_2, + TestStableBranch.V_5_1_0))); } public static class TestStableBranchBehindStableBranch { + public static final Version V_4_0_0 = Version.fromString("4.0.0"); + public static final Version V_4_0_1 = Version.fromString("4.0.1"); public static final Version V_5_3_0 = Version.fromString("5.3.0"); public static final Version V_5_3_1 = Version.fromString("5.3.1"); public static final Version V_5_3_2 = Version.fromString("5.3.2"); @@ -163,9 +175,11 @@ public class VersionUtilsTests extends ESTestCase { List unreleased = t.v2(); assertThat(released, equalTo(Arrays.asList( + TestStableBranchBehindStableBranch.V_4_0_0, TestStableBranchBehindStableBranch.V_5_3_0, TestStableBranchBehindStableBranch.V_5_3_1))); assertThat(unreleased, equalTo(Arrays.asList( + TestStableBranchBehindStableBranch.V_4_0_1, TestStableBranchBehindStableBranch.V_5_3_2, TestStableBranchBehindStableBranch.V_5_4_0, TestStableBranchBehindStableBranch.V_5_5_0))); @@ -221,13 +235,13 @@ public class VersionUtilsTests extends ESTestCase { assertThat(released, equalTo(Arrays.asList( TestNewMajorRelease.V_5_6_0, TestNewMajorRelease.V_5_6_1, - TestNewMajorRelease.V_5_6_2, TestNewMajorRelease.V_6_0_0_alpha1, TestNewMajorRelease.V_6_0_0_alpha2, TestNewMajorRelease.V_6_0_0_beta1, TestNewMajorRelease.V_6_0_0_beta2, TestNewMajorRelease.V_6_0_0))); assertThat(unreleased, equalTo(Arrays.asList( + TestNewMajorRelease.V_5_6_2, TestNewMajorRelease.V_6_0_1))); } @@ -305,24 +319,6 @@ public class VersionUtilsTests extends ESTestCase { TestNewMinorBranchIn6x.V_6_2_0))); } - public static class TestIncorrectCurrentVersion { - public static final Version V_5_3_0 = Version.fromString("5.3.0"); - public static final Version V_5_3_1 = Version.fromString("5.3.1"); - public static final Version V_5_4_0 = Version.fromString("5.4.0"); - public static final Version V_5_4_1 = Version.fromString("5.4.1"); - public static final Version CURRENT = V_5_4_1; - } - - public void testIncorrectCurrentVersion() { - Version previousVersion = TestIncorrectCurrentVersion.V_5_4_0; - AssertionError error = expectThrows(AssertionError.class, () -> - VersionUtils.resolveReleasedVersions(previousVersion, TestIncorrectCurrentVersion.class)); - - String message = error.getMessage(); - assertThat(message, containsString(TestIncorrectCurrentVersion.CURRENT.toString())); - assertThat(message, containsString(previousVersion.toString())); - } - /** * Tests that {@link Version#minimumCompatibilityVersion()} and {@link VersionUtils#allReleasedVersions()} * agree with the list of wire and index compatible versions we build in gradle. From 7790cb5fa93561e2e0ea1250134bef89bd798630 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 2 May 2018 19:35:01 +0300 Subject: [PATCH 12/30] SQL: Reduce number of ranges generated for comparisons (#30267) * SQL: Reduce number of ranges generated for comparisons Rewrote optimization rule for combining ranges by improving the detection of binary comparisons in a tree to better combine them in a range, regardless of their place inside an expression. Additionally, improve the comparisons of Numbers of different types Also, improve reassembly of conjunction/disjunction into balanced trees. Do not promote BinaryComparisons to Ranges since it introduces NULL boundaries and thus a corner-case that needs too much handling Compare BinaryComparisons directly between themselves and to Ranges Fix #30017 --- .../predicate/BinaryComparison.java | 38 +- .../sql/expression/predicate/Predicates.java | 53 +- .../xpack/sql/expression/predicate/Range.java | 23 +- .../xpack/sql/optimizer/Optimizer.java | 498 ++++++++++++++++- .../elasticsearch/xpack/sql/tree/Node.java | 5 + .../sql/optimizer/OptimizerRunTests.java | 49 ++ .../xpack/sql/optimizer/OptimizerTests.java | 524 +++++++++++++++++- .../sql/querydsl/query/MatchQueryTests.java | 13 +- 8 files changed, 1155 insertions(+), 48 deletions(-) create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java index 0fe94feba16..db1ba1d3cdf 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.sql.expression.BinaryOperator; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.DataTypes; // marker class to indicate operations that rely on values public abstract class BinaryComparison extends BinaryOperator { @@ -33,11 +32,42 @@ public abstract class BinaryComparison extends BinaryOperator { return DataType.BOOLEAN; } + /** + * Compares two expression arguments (typically Numbers), if possible. + * Otherwise returns null (the arguments are not comparable or at least + * one of them is null). + */ @SuppressWarnings({ "rawtypes", "unchecked" }) - static Integer compare(Object left, Object right) { - if (left instanceof Comparable && right instanceof Comparable) { - return Integer.valueOf(((Comparable) left).compareTo(right)); + public static Integer compare(Object l, Object r) { + // typical number comparison + if (l instanceof Number && r instanceof Number) { + return compare((Number) l, (Number) r); } + + if (l instanceof Comparable && r instanceof Comparable) { + try { + return Integer.valueOf(((Comparable) l).compareTo(r)); + } catch (ClassCastException cce) { + // when types are not compatible, cce is thrown + // fall back to null + return null; + } + } + return null; } + + static Integer compare(Number l, Number r) { + if (l instanceof Double || r instanceof Double) { + return Double.compare(l.doubleValue(), r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.compare(l.floatValue(), r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.compare(l.longValue(), r.longValue()); + } + + return Integer.valueOf(Integer.compare(l.intValue(), r.intValue())); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java index 7439f6def14..6fb26cb6dcc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java @@ -9,8 +9,11 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import java.util.ArrayList; -import java.util.Collections; import java.util.List; +import java.util.function.BiFunction; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; public abstract class Predicates { @@ -22,7 +25,7 @@ public abstract class Predicates { list.addAll(splitAnd(and.right())); return list; } - return Collections.singletonList(exp); + return singletonList(exp); } public static List splitOr(Expression exp) { @@ -33,15 +36,51 @@ public abstract class Predicates { list.addAll(splitOr(or.right())); return list; } - return Collections.singletonList(exp); + return singletonList(exp); } public static Expression combineOr(List exps) { - return exps.stream().reduce((l, r) -> new Or(l.location(), l, r)).orElse(null); + return combine(exps, (l, r) -> new Or(l.location(), l, r)); } public static Expression combineAnd(List exps) { - return exps.stream().reduce((l, r) -> new And(l.location(), l, r)).orElse(null); + return combine(exps, (l, r) -> new And(l.location(), l, r)); + } + + /** + * Build a binary 'pyramid' from the given list: + *

    +     *       AND
    +     *      /   \
    +     *   AND     AND
    +     *  /   \   /   \
    +     * A     B C     D
    +     * 
    + * + * using the given combiner. + * + * While a bit longer, this method creates a balanced tree as oppose to a plain + * recursive approach which creates an unbalanced one (either to the left or right). + */ + private static Expression combine(List exps, BiFunction combiner) { + if (exps.isEmpty()) { + return null; + } + + // clone the list (to modify it) + List result = new ArrayList<>(exps); + + while (result.size() > 1) { + // combine (in place) expressions in pairs + // NB: this loop modifies the list (just like an array) + for (int i = 0; i < result.size() - 1; i++) { + Expression l = result.remove(i); + Expression r = result.remove(i); + result.add(i, combiner.apply(l, r)); + } + } + + return result.get(0); } public static List inCommon(List l, List r) { @@ -53,7 +92,7 @@ public abstract class Predicates { } } } - return common.isEmpty() ? Collections.emptyList() : common; + return common.isEmpty() ? emptyList() : common; } public static List subtract(List from, List r) { @@ -65,7 +104,7 @@ public abstract class Predicates { } } } - return diff.isEmpty() ? Collections.emptyList() : diff; + return diff.isEmpty() ? emptyList() : diff; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java index 96e427e90f1..54d541ab406 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.DataTypes; import java.util.Arrays; import java.util.List; @@ -66,11 +65,19 @@ public class Range extends Expression { @Override public boolean foldable() { - return value.foldable() && lower.foldable() && upper.foldable(); + if (lower.foldable() && upper.foldable()) { + return areBoundariesInvalid() || value.foldable(); + } + + return false; } @Override public Object fold() { + if (areBoundariesInvalid()) { + return Boolean.FALSE; + } + Object val = value.fold(); Integer lowerCompare = BinaryComparison.compare(lower.fold(), val); Integer upperCompare = BinaryComparison.compare(val, upper().fold()); @@ -79,6 +86,16 @@ public class Range extends Expression { return lowerComparsion && upperComparsion; } + /** + * Check whether the boundaries are invalid ( upper < lower) or not. + * If they do, the value does not have to be evaluate. + */ + private boolean areBoundariesInvalid() { + Integer compare = BinaryComparison.compare(lower.fold(), upper.fold()); + // upper < lower OR upper == lower and the range doesn't contain any equals + return compare != null && (compare > 0 || (compare == 0 && (!includeLower || !includeUpper))); + } + @Override public boolean nullable() { return value.nullable() && lower.nullable() && upper.nullable(); @@ -122,4 +139,4 @@ public class Range extends Expression { sb.append(upper); return sb.toString(); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index a56be01c955..de4c0644b79 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.LessThan; import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; import org.elasticsearch.xpack.sql.expression.predicate.Not; import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Predicates; import org.elasticsearch.xpack.sql.expression.predicate.Range; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.Filter; @@ -60,6 +61,7 @@ import org.elasticsearch.xpack.sql.rule.Rule; import org.elasticsearch.xpack.sql.rule.RuleExecutor; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.session.SingletonExecutable; +import org.elasticsearch.xpack.sql.util.CollectionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -121,9 +123,11 @@ public class Optimizer extends RuleExecutor { new ConstantFolding(), // boolean new BooleanSimplification(), - new BinaryComparisonSimplification(), new BooleanLiteralsOnTheRight(), - new CombineComparisonsIntoRange(), + new BinaryComparisonSimplification(), + // needs to occur before BinaryComparison combinations (see class) + new PropagateEquals(), + new CombineBinaryComparisons(), // prune/elimination new PruneFilters(), new PruneOrderBy(), @@ -1231,7 +1235,7 @@ public class Optimizer extends RuleExecutor { static class BinaryComparisonSimplification extends OptimizerExpressionRule { BinaryComparisonSimplification() { - super(TransformDirection.UP); + super(TransformDirection.DOWN); } @Override @@ -1277,47 +1281,483 @@ public class Optimizer extends RuleExecutor { } } - static class CombineComparisonsIntoRange extends OptimizerExpressionRule { + /** + * Propagate Equals to eliminate conjuncted Ranges. + * When encountering a different Equals or non-containing {@link Range}, the conjunction becomes false. + * When encountering a containing {@link Range}, the range gets eliminated by the equality. + * + * This rule doesn't perform any promotion of {@link BinaryComparison}s, that is handled by + * {@link CombineBinaryComparisons} on purpose as the resulting Range might be foldable + * (which is picked by the folding rule on the next run). + */ + static class PropagateEquals extends OptimizerExpressionRule { - CombineComparisonsIntoRange() { - super(TransformDirection.UP); + PropagateEquals() { + super(TransformDirection.DOWN); } @Override protected Expression rule(Expression e) { - return e instanceof And ? combine((And) e) : e; + if (e instanceof And) { + return propagate((And) e); + } + return e; } - private Expression combine(And and) { - Expression l = and.left(); - Expression r = and.right(); + // combine conjunction + private Expression propagate(And and) { + List ranges = new ArrayList<>(); + List equals = new ArrayList<>(); + List exps = new ArrayList<>(); - if (l instanceof BinaryComparison && r instanceof BinaryComparison) { - // if the same operator is used - BinaryComparison lb = (BinaryComparison) l; - BinaryComparison rb = (BinaryComparison) r; + boolean changed = false; - - if (lb.left().equals(((BinaryComparison) r).left()) && lb.right() instanceof Literal && rb.right() instanceof Literal) { - // >/>= AND />= - else if ((r instanceof GreaterThan || r instanceof GreaterThanOrEqual) - && (l instanceof LessThan || l instanceof LessThanOrEqual)) { - return new Range(and.location(), rb.left(), rb.right(), r instanceof GreaterThanOrEqual, lb.right(), - l instanceof LessThanOrEqual); + for (Expression ex : Predicates.splitAnd(and)) { + if (ex instanceof Range) { + ranges.add((Range) ex); + } else if (ex instanceof Equals) { + Equals otherEq = (Equals) ex; + // equals on different values evaluate to FALSE + if (otherEq.right().foldable()) { + for (Equals eq : equals) { + // cannot evaluate equals so skip it + if (!eq.right().foldable()) { + continue; + } + if (otherEq.left().semanticEquals(eq.left())) { + if (eq.right().foldable() && otherEq.right().foldable()) { + Integer comp = BinaryComparison.compare(eq.right().fold(), otherEq.right().fold()); + if (comp != null) { + // var cannot be equal to two different values at the same time + if (comp != 0) { + return FALSE; + } + } + } + } + } } + equals.add(otherEq); + } else { + exps.add(ex); } } - return and; + // check + for (Equals eq : equals) { + // cannot evaluate equals so skip it + if (!eq.right().foldable()) { + continue; + } + Object eqValue = eq.right().fold(); + + for (int i = 0; i < ranges.size(); i++) { + Range range = ranges.get(i); + + if (range.value().semanticEquals(eq.left())) { + // if equals is outside the interval, evaluate the whole expression to FALSE + if (range.lower().foldable()) { + Integer compare = BinaryComparison.compare(range.lower().fold(), eqValue); + if (compare != null && ( + // eq outside the lower boundary + compare > 0 || + // eq matches the boundary but should not be included + (compare == 0 && !range.includeLower())) + ) { + return FALSE; + } + } + if (range.upper().foldable()) { + Integer compare = BinaryComparison.compare(range.upper().fold(), eqValue); + if (compare != null && ( + // eq outside the upper boundary + compare < 0 || + // eq matches the boundary but should not be included + (compare == 0 && !range.includeUpper())) + ) { + return FALSE; + } + } + + // it's in the range and thus, remove it + ranges.remove(i); + changed = true; + } + } + } + + return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, equals, ranges)) : and; } } + static class CombineBinaryComparisons extends OptimizerExpressionRule { + + CombineBinaryComparisons() { + super(TransformDirection.DOWN); + } + + @Override + protected Expression rule(Expression e) { + if (e instanceof And) { + return combine((And) e); + } else if (e instanceof Or) { + return combine((Or) e); + } + return e; + } + + // combine conjunction + private Expression combine(And and) { + List ranges = new ArrayList<>(); + List bcs = new ArrayList<>(); + List exps = new ArrayList<>(); + + boolean changed = false; + + for (Expression ex : Predicates.splitAnd(and)) { + if (ex instanceof Range) { + Range r = (Range) ex; + if (findExistingRange(r, ranges, true)) { + changed = true; + } else { + ranges.add(r); + } + } else if (ex instanceof BinaryComparison && !(ex instanceof Equals)) { + BinaryComparison bc = (BinaryComparison) ex; + + if (bc.right().foldable() && (findConjunctiveComparisonInRange(bc, ranges) || findExistingComparison(bc, bcs, true))) { + changed = true; + } else { + bcs.add(bc); + } + } else { + exps.add(ex); + } + } + + // finally try combining any left BinaryComparisons into possible Ranges + // this could be a different rule but it's clearer here wrt the order of comparisons + + for (int i = 0; i < bcs.size() - 1; i++) { + BinaryComparison main = bcs.get(i); + + for (int j = i + 1; j < bcs.size(); j++) { + BinaryComparison other = bcs.get(j); + + if (main.left().semanticEquals(other.left())) { + // >/>= AND />= + else if ((other instanceof GreaterThan || other instanceof GreaterThanOrEqual) + && (main instanceof LessThan || main instanceof LessThanOrEqual)) { + bcs.remove(j); + bcs.remove(i); + + ranges.add(new Range(and.location(), main.left(), + other.right(), other instanceof GreaterThanOrEqual, + main.right(), main instanceof LessThanOrEqual)); + + changed = true; + } + } + } + } + + + return changed ? Predicates.combineAnd(CollectionUtils.combine(exps, bcs, ranges)) : and; + } + + // combine disjunction + private Expression combine(Or or) { + List bcs = new ArrayList<>(); + List ranges = new ArrayList<>(); + List exps = new ArrayList<>(); + + boolean changed = false; + + for (Expression ex : Predicates.splitOr(or)) { + if (ex instanceof Range) { + Range r = (Range) ex; + if (findExistingRange(r, ranges, false)) { + changed = true; + } else { + ranges.add(r); + } + } else if (ex instanceof BinaryComparison) { + BinaryComparison bc = (BinaryComparison) ex; + if (bc.right().foldable() && findExistingComparison(bc, bcs, false)) { + changed = true; + } else { + bcs.add(bc); + } + } else { + exps.add(ex); + } + } + + return changed ? Predicates.combineOr(CollectionUtils.combine(exps, bcs, ranges)) : or; + } + + private static boolean findExistingRange(Range main, List ranges, boolean conjunctive) { + if (!main.lower().foldable() && !main.upper().foldable()) { + return false; + } + // NB: the loop modifies the list (hence why the int is used) + for (int i = 0; i < ranges.size(); i++) { + Range other = ranges.get(i); + + if (main.value().semanticEquals(other.value())) { + + // make sure the comparison was done + boolean compared = false; + + boolean lower = false; + boolean upper = false; + // boundary equality (useful to differentiate whether a range is included or not) + // and thus whether it should be preserved or ignored + boolean lowerEq = false; + boolean upperEq = false; + + // evaluate lower + if (main.lower().foldable() && other.lower().foldable()) { + compared = true; + + Integer comp = BinaryComparison.compare(main.lower().fold(), other.lower().fold()); + // values are comparable + if (comp != null) { + // boundary equality + lowerEq = comp == 0 && main.includeLower() == other.includeLower(); + // AND + if (conjunctive) { + // (2 < a < 3) AND (1 < a < 3) -> (1 < a < 3) + lower = comp > 0 || + // (2 < a < 3) AND (2 < a <= 3) -> (2 < a < 3) + (comp == 0 && !main.includeLower() && other.includeLower()); + } + // OR + else { + // (1 < a < 3) OR (2 < a < 3) -> (1 < a < 3) + lower = comp < 0 || + // (2 <= a < 3) OR (2 < a < 3) -> (2 <= a < 3) + (comp == 0 && main.includeLower() && !other.includeLower()) || lowerEq; + } + } + } + // evaluate upper + if (main.upper().foldable() && other.upper().foldable()) { + compared = true; + + Integer comp = BinaryComparison.compare(main.upper().fold(), other.upper().fold()); + // values are comparable + if (comp != null) { + // boundary equality + upperEq = comp == 0 && main.includeUpper() == other.includeUpper(); + + // AND + if (conjunctive) { + // (1 < a < 2) AND (1 < a < 3) -> (1 < a < 2) + upper = comp < 0 || + // (1 < a < 2) AND (1 < a <= 2) -> (1 < a < 2) + (comp == 0 && !main.includeUpper() && other.includeUpper()); + } + // OR + else { + // (1 < a < 3) OR (1 < a < 2) -> (1 < a < 3) + upper = comp > 0 || + // (1 < a <= 3) OR (1 < a < 3) -> (2 < a < 3) + (comp == 0 && main.includeUpper() && !other.includeUpper()) || upperEq; + } + } + } + + // AND - at least one of lower or upper + if (conjunctive) { + // can tighten range + if (lower || upper) { + ranges.remove(i); + ranges.add(i, + new Range(main.location(), main.value(), + lower ? main.lower() : other.lower(), + lower ? main.includeLower() : other.includeLower(), + upper ? main.upper() : other.upper(), + upper ? main.includeUpper() : other.includeUpper())); + } + + // range was comparable + return compared; + } + // OR - needs both upper and lower to loosen range + else { + // can loosen range + if (lower && upper) { + ranges.remove(i); + ranges.add(i, + new Range(main.location(), main.value(), + lower ? main.lower() : other.lower(), + lower ? main.includeLower() : other.includeLower(), + upper ? main.upper() : other.upper(), + upper ? main.includeUpper() : other.includeUpper())); + return true; + } + + // if the range in included, no need to add it + return compared && (!((lower && !lowerEq) || (upper && !upperEq))); + } + } + } + return false; + } + + private boolean findConjunctiveComparisonInRange(BinaryComparison main, List ranges) { + Object value = main.right().fold(); + + // NB: the loop modifies the list (hence why the int is used) + for (int i = 0; i < ranges.size(); i++) { + Range other = ranges.get(i); + + if (main.left().semanticEquals(other.value())) { + + if (main instanceof GreaterThan || main instanceof GreaterThanOrEqual) { + if (other.lower().foldable()) { + Integer comp = BinaryComparison.compare(value, other.lower().fold()); + if (comp != null) { + // 2 < a AND (2 <= a < 3) -> 2 < a < 3 + boolean lowerEq = comp == 0 && other.includeLower() && main instanceof GreaterThan; + // 2 < a AND (1 < a < 3) -> 2 < a < 3 + boolean lower = comp > 0 || lowerEq; + + if (lower) { + ranges.remove(i); + ranges.add(i, + new Range(other.location(), other.value(), + main.right(), lowerEq ? true : other.includeLower(), + other.upper(), other.includeUpper())); + } + + // found a match + return true; + } + } + } else if (main instanceof LessThan || main instanceof LessThanOrEqual) { + if (other.lower().foldable()) { + Integer comp = BinaryComparison.compare(value, other.lower().fold()); + if (comp != null) { + // a < 2 AND (1 < a <= 2) -> 1 < a < 2 + boolean upperEq = comp == 0 && other.includeUpper() && main instanceof LessThan; + // a < 2 AND (1 < a < 3) -> 1 < a < 2 + boolean upper = comp > 0 || upperEq; + + if (upper) { + ranges.remove(i); + ranges.add(i, new Range(other.location(), other.value(), + other.lower(), other.includeLower(), + main.right(), upperEq ? true : other.includeUpper())); + } + + // found a match + return true; + } + } + } + + return false; + } + } + return false; + } + + /** + * Find commonalities between the given comparison in the given list. + * The method can be applied both for conjunctive (AND) or disjunctive purposes (OR). + */ + private static boolean findExistingComparison(BinaryComparison main, List bcs, boolean conjunctive) { + Object value = main.right().fold(); + + // NB: the loop modifies the list (hence why the int is used) + for (int i = 0; i < bcs.size(); i++) { + BinaryComparison other = bcs.get(i); + // skip if cannot evaluate + if (!other.right().foldable()) { + continue; + } + // if bc is a higher/lower value or gte vs gt, use it instead + if ((other instanceof GreaterThan || other instanceof GreaterThanOrEqual) && + (main instanceof GreaterThan || main instanceof GreaterThanOrEqual)) { + + if (main.left().semanticEquals(other.left())) { + Integer compare = BinaryComparison.compare(value, other.right().fold()); + + if (compare != null) { + // AND + if ((conjunctive && + // a > 3 AND a > 2 -> a > 3 + (compare > 0 || + // a > 2 AND a >= 2 -> a > 2 + (compare == 0 && main instanceof GreaterThan && other instanceof GreaterThanOrEqual))) + || + // OR + (!conjunctive && + // a > 2 OR a > 3 -> a > 2 + (compare < 0 || + // a >= 2 OR a > 2 -> a >= 2 + (compare == 0 && main instanceof GreaterThanOrEqual && other instanceof GreaterThan)))) { + bcs.remove(i); + bcs.add(i, main); + } + // found a match + return true; + } + + return false; + } + } + // if bc is a lower/higher value or lte vs lt, use it instead + else if ((other instanceof LessThan || other instanceof LessThanOrEqual) && + (main instanceof LessThan || main instanceof LessThanOrEqual)) { + + if (main.left().semanticEquals(other.left())) { + Integer compare = BinaryComparison.compare(value, other.right().fold()); + + if (compare != null) { + // AND + if ((conjunctive && + // a < 2 AND a < 3 -> a < 2 + (compare < 0 || + // a < 2 AND a <= 2 -> a < 2 + (compare == 0 && main instanceof LessThan && other instanceof LessThanOrEqual))) + || + // OR + (!conjunctive && + // a < 2 OR a < 3 -> a < 3 + (compare > 0 || + // a <= 2 OR a < 2 -> a <= 2 + (compare == 0 && main instanceof LessThanOrEqual && other instanceof LessThan)))) { + bcs.remove(i); + bcs.add(i, main); + + } + // found a match + return true; + } + + return false; + } + } + } + + return false; + } + } static class SkipQueryOnLimitZero extends OptimizerRule { @Override @@ -1435,4 +1875,4 @@ public class Optimizer extends RuleExecutor { enum TransformDirection { UP, DOWN }; -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java index c0d885c6dcc..13b17791475 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.tree; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + import java.util.ArrayList; import java.util.BitSet; import java.util.List; @@ -37,6 +39,9 @@ public abstract class Node> { public Node(Location location, List children) { this.location = (location != null ? location : Location.EMPTY); + if (children.contains(null)) { + throw new SqlIllegalArgumentException("Null children are not allowed"); + } this.children = children; } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java new file mode 100644 index 00000000000..9e2d139460e --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerRunTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.optimizer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.Map; +import java.util.TimeZone; + +public class OptimizerRunTests extends ESTestCase { + + private final SqlParser parser; + private final IndexResolution getIndexResult; + private final FunctionRegistry functionRegistry; + private final Analyzer analyzer; + private final Optimizer optimizer; + + public OptimizerRunTests() { + parser = new SqlParser(); + functionRegistry = new FunctionRegistry(); + + Map mapping = TypesTests.loadMapping("mapping-multi-field-variation.json"); + + EsIndex test = new EsIndex("test", mapping); + getIndexResult = IndexResolution.valid(test); + analyzer = new Analyzer(functionRegistry, getIndexResult, TimeZone.getTimeZone("UTC")); + optimizer = new Optimizer(); + } + + private LogicalPlan plan(String sql) { + return optimizer.optimize(analyzer.analyze(parser.createStatement(sql))); + } + + public void testWhereClause() { + LogicalPlan p = plan("SELECT some.string l FROM test WHERE int IS NOT NULL AND int < 10005 ORDER BY int"); + assertNotNull(p); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index a49538c8d53..9f95712b5d4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.Alias; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.Order; @@ -49,8 +50,10 @@ import org.elasticsearch.xpack.sql.expression.regex.RLike; import org.elasticsearch.xpack.sql.optimizer.Optimizer.BinaryComparisonSimplification; import org.elasticsearch.xpack.sql.optimizer.Optimizer.BooleanLiteralsOnTheRight; import org.elasticsearch.xpack.sql.optimizer.Optimizer.BooleanSimplification; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.CombineBinaryComparisons; import org.elasticsearch.xpack.sql.optimizer.Optimizer.CombineProjections; import org.elasticsearch.xpack.sql.optimizer.Optimizer.ConstantFolding; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.PropagateEquals; import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneDuplicateFunctions; import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneSubqueryAliases; import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceFoldableAttributes; @@ -65,7 +68,7 @@ import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; -import org.joda.time.DateTimeZone; +import org.elasticsearch.xpack.sql.type.EsField; import java.util.Arrays; import java.util.Collections; @@ -74,6 +77,7 @@ import java.util.TimeZone; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; @@ -217,6 +221,10 @@ public class OptimizerTests extends ESTestCase { assertEquals(10, lt.right().fold()); } + // + // Constant folding + // + public void testConstantFolding() { Expression exp = new Add(EMPTY, L(2), L(3)); @@ -314,6 +322,10 @@ public class OptimizerTests extends ESTestCase { return l.value(); } + // + // Logical simplifications + // + public void testBinaryComparisonSimplification() { assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, L(5), L(5)))); assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, L(5), L(5)))); @@ -369,4 +381,512 @@ public class OptimizerTests extends ESTestCase { assertEquals(expected, simplification.rule(actual)); } -} + + // + // Range optimization + // + + // 6 < a <= 5 -> FALSE + public void testFoldExcludingRangeToFalse() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r = new Range(EMPTY, fa, L(6), false, L(5), true); + assertTrue(r.foldable()); + assertEquals(Boolean.FALSE, r.fold()); + } + + // 6 < a <= 5.5 -> FALSE + public void testFoldExcludingRangeWithDifferentTypesToFalse() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r = new Range(EMPTY, fa, L(6), false, L(5.5d), true); + assertTrue(r.foldable()); + assertEquals(Boolean.FALSE, r.fold()); + } + + // Conjunction + + public void testCombineBinaryComparisonsNotComparable() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); + LessThan lt = new LessThan(EMPTY, fa, Literal.FALSE); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + And and = new And(EMPTY, lte, lt); + Expression exp = rule.rule(and); + assertEquals(exp, and); + } + + // a <= 6 AND a < 5 -> a < 5 + public void testCombineBinaryComparisonsUpper() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); + LessThan lt = new LessThan(EMPTY, fa, L(5)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + + Expression exp = rule.rule(new And(EMPTY, lte, lt)); + assertEquals(LessThan.class, exp.getClass()); + LessThan r = (LessThan) exp; + assertEquals(L(5), r.right()); + } + + // 6 <= a AND 5 < a -> 6 <= a + public void testCombineBinaryComparisonsLower() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(6)); + GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + + Expression exp = rule.rule(new And(EMPTY, gte, gt)); + assertEquals(GreaterThanOrEqual.class, exp.getClass()); + GreaterThanOrEqual r = (GreaterThanOrEqual) exp; + assertEquals(L(6), r.right()); + } + + // 5 <= a AND 5 < a -> 5 < a + public void testCombineBinaryComparisonsInclude() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(5)); + GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + + Expression exp = rule.rule(new And(EMPTY, gte, gt)); + assertEquals(GreaterThan.class, exp.getClass()); + GreaterThan r = (GreaterThan) exp; + assertEquals(L(5), r.right()); + } + + // 3 <= a AND 4 < a AND a <= 7 AND a < 6 -> 4 < a < 6 + public void testCombineMultipleBinaryComparisons() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); + GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); + LessThan lt = new LessThan(EMPTY, fa, L(6)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + + Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, gt, new And(EMPTY, lt, lte)))); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(L(4), r.lower()); + assertFalse(r.includeLower()); + assertEquals(L(6), r.upper()); + assertFalse(r.includeUpper()); + } + + // 3 <= a AND TRUE AND 4 < a AND a != 5 AND a <= 7 -> 4 < a <= 7 AND a != 5 AND TRUE + public void testCombineMixedMultipleBinaryComparisons() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); + GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); + Expression ne = new Not(EMPTY, new Equals(EMPTY, fa, L(5))); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + + // TRUE AND a != 5 AND 4 < a <= 7 + Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, Literal.TRUE, new And(EMPTY, gt, new And(EMPTY, ne, lte))))); + assertEquals(And.class, exp.getClass()); + And and = ((And) exp); + assertEquals(Range.class, and.right().getClass()); + Range r = (Range) and.right(); + assertEquals(L(4), r.lower()); + assertFalse(r.includeLower()); + assertEquals(L(7), r.upper()); + assertTrue(r.includeUpper()); + } + + // 1 <= a AND a < 5 -> 1 <= a < 5 + public void testCombineComparisonsIntoRange() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); + LessThan lt = new LessThan(EMPTY, fa, L(5)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(new And(EMPTY, gte, lt)); + assertEquals(Range.class, rule.rule(exp).getClass()); + + Range r = (Range) exp; + assertEquals(L(1), r.lower()); + assertTrue(r.includeLower()); + assertEquals(L(5), r.upper()); + assertFalse(r.includeUpper()); + } + + // a != NULL AND a > 1 AND a < 5 AND a == 10 -> (a != NULL AND a == 10) AND 1 <= a < 5 + public void testCombineUnbalancedComparisonsMixedWithEqualsIntoRange() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + IsNotNull isn = new IsNotNull(EMPTY, fa); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); + + Equals eq = new Equals(EMPTY, fa, L(10)); + LessThan lt = new LessThan(EMPTY, fa, L(5)); + + And and = new And(EMPTY, new And(EMPTY, isn, gte), new And(EMPTY, lt, eq)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(And.class, exp.getClass()); + And a = (And) exp; + assertEquals(Range.class, a.right().getClass()); + + Range r = (Range) a.right(); + assertEquals(L(1), r.lower()); + assertTrue(r.includeLower()); + assertEquals(L(5), r.upper()); + assertFalse(r.includeUpper()); + } + + // (2 < a < 3) AND (1 < a < 4) -> (2 < a < 3) + public void testCombineBinaryComparisonsConjunctionOfIncludedRange() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(r1, exp); + } + + // (2 < a < 3) AND a < 2 -> 2 < a < 2 + public void testCombineBinaryComparisonsConjunctionOfNonOverlappingBoundaries() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(L(2), r.lower()); + assertFalse(r.includeLower()); + assertEquals(L(2), r.upper()); + assertFalse(r.includeUpper()); + assertEquals(Boolean.FALSE, r.fold()); + } + + // (2 < a < 3) AND (2 < a <= 3) -> 2 < a < 3 + public void testCombineBinaryComparisonsConjunctionOfUpperEqualsOverlappingBoundaries() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(r1, exp); + } + + // (2 < a < 3) AND (1 < a < 3) -> 2 < a < 3 + public void testCombineBinaryComparisonsConjunctionOverlappingUpperBoundary() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(r2, exp); + } + + // (2 < a <= 3) AND (1 < a < 3) -> 2 < a < 3 + public void testCombineBinaryComparisonsConjunctionWithDifferentUpperLimitInclusion() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(Range.class, exp.getClass()); + Range r = (Range) exp; + assertEquals(L(2), r.lower()); + assertFalse(r.includeLower()); + assertEquals(L(3), r.upper()); + assertFalse(r.includeUpper()); + } + + // (0 < a <= 1) AND (0 <= a < 2) -> 0 < a <= 1 + public void testRangesOverlappingConjunctionNoLowerBoundary() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); + Range r2 = new Range(EMPTY, fa, L(0), true, L(2), false); + + And and = new And(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(and); + assertEquals(r1, exp); + } + + // Disjunction + + public void testCombineBinaryComparisonsDisjunctionNotComparable() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, Literal.FALSE); + + Or or = new Or(EMPTY, gt1, gt2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(exp, or); + } + + + // 2 < a OR 1 < a OR 3 < a -> 1 < a + public void testCombineBinaryComparisonsDisjunctionLowerBound() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); + GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); + + Or or = new Or(EMPTY, gt1, new Or(EMPTY, gt2, gt3)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(GreaterThan.class, exp.getClass()); + + GreaterThan gt = (GreaterThan) exp; + assertEquals(L(1), gt.right()); + } + + // 2 < a OR 1 < a OR 3 <= a -> 1 < a + public void testCombineBinaryComparisonsDisjunctionIncludeLowerBounds() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); + GreaterThanOrEqual gte3 = new GreaterThanOrEqual(EMPTY, fa, L(3)); + + Or or = new Or(EMPTY, new Or(EMPTY, gt1, gt2), gte3); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(GreaterThan.class, exp.getClass()); + + GreaterThan gt = (GreaterThan) exp; + assertEquals(L(1), gt.right()); + } + + // a < 1 OR a < 2 OR a < 3 -> a < 3 + public void testCombineBinaryComparisonsDisjunctionUpperBound() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + LessThan lt1 = new LessThan(EMPTY, fa, L(1)); + LessThan lt2 = new LessThan(EMPTY, fa, L(2)); + LessThan lt3 = new LessThan(EMPTY, fa, L(3)); + + Or or = new Or(EMPTY, new Or(EMPTY, lt1, lt2), lt3); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(LessThan.class, exp.getClass()); + + LessThan lt = (LessThan) exp; + assertEquals(L(3), lt.right()); + } + + // a < 2 OR a <= 2 OR a < 1 -> a <= 2 + public void testCombineBinaryComparisonsDisjunctionIncludeUpperBounds() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + LessThan lt1 = new LessThan(EMPTY, fa, L(1)); + LessThan lt2 = new LessThan(EMPTY, fa, L(2)); + LessThanOrEqual lte2 = new LessThanOrEqual(EMPTY, fa, L(2)); + + Or or = new Or(EMPTY, lt2, new Or(EMPTY, lte2, lt1)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(LessThanOrEqual.class, exp.getClass()); + + LessThanOrEqual lte = (LessThanOrEqual) exp; + assertEquals(L(2), lte.right()); + } + + // a < 2 OR 3 < a OR a < 1 OR 4 < a -> a < 2 OR 3 < a + public void testCombineBinaryComparisonsDisjunctionOfLowerAndUpperBounds() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + LessThan lt1 = new LessThan(EMPTY, fa, L(1)); + LessThan lt2 = new LessThan(EMPTY, fa, L(2)); + + GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); + GreaterThan gt4 = new GreaterThan(EMPTY, fa, L(4)); + + Or or = new Or(EMPTY, new Or(EMPTY, lt2, gt3), new Or(EMPTY, lt1, gt4)); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(Or.class, exp.getClass()); + + Or ro = (Or) exp; + + assertEquals(LessThan.class, ro.left().getClass()); + LessThan lt = (LessThan) ro.left(); + assertEquals(L(2), lt.right()); + assertEquals(GreaterThan.class, ro.right().getClass()); + GreaterThan gt = (GreaterThan) ro.right(); + assertEquals(L(3), gt.right()); + } + + // (2 < a < 3) OR (1 < a < 4) -> (1 < a < 4) + public void testCombineBinaryComparisonsDisjunctionOfIncludedRangeNotComparable() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(1), false, Literal.FALSE, false); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(or, exp); + } + + + // (2 < a < 3) OR (1 < a < 4) -> (1 < a < 4) + public void testCombineBinaryComparisonsDisjunctionOfIncludedRange() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(Range.class, exp.getClass()); + + Range r = (Range) exp; + assertEquals(L(1), r.lower()); + assertFalse(r.includeLower()); + assertEquals(L(4), r.upper()); + assertFalse(r.includeUpper()); + } + + // (2 < a < 3) OR (1 < a < 2) -> same + public void testCombineBinaryComparisonsDisjunctionOfNonOverlappingBoundaries() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(or, exp); + } + + // (2 < a < 3) OR (2 < a <= 3) -> 2 < a <= 3 + public void testCombineBinaryComparisonsDisjunctionOfUpperEqualsOverlappingBoundaries() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(r2, exp); + } + + // (2 < a < 3) OR (1 < a < 3) -> 1 < a < 3 + public void testCombineBinaryComparisonsOverlappingUpperBoundary() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); + Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(r1, exp); + } + + // (2 < a <= 3) OR (1 < a < 3) -> same (the <= prevents the ranges from being combined) + public void testCombineBinaryComparisonsWithDifferentUpperLimitInclusion() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(or, exp); + } + + // (0 < a <= 1) OR (0 < a < 2) -> 0 < a < 2 + public void testRangesOverlappingNoLowerBoundary() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + + Range r2 = new Range(EMPTY, fa, L(0), false, L(2), false); + Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); + + Or or = new Or(EMPTY, r1, r2); + + CombineBinaryComparisons rule = new CombineBinaryComparisons(); + Expression exp = rule.rule(or); + assertEquals(r2, exp); + } + + // Equals + + // a == 1 AND a == 2 -> FALSE + public void testDualEqualsConjunction() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + Equals eq1 = new Equals(EMPTY, fa, L(1)); + Equals eq2 = new Equals(EMPTY, fa, L(2)); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); + assertEquals(Literal.FALSE, rule.rule(exp)); + } + + // 1 <= a < 10 AND a == 1 -> a == 1 + public void testEliminateRangeByEqualsInInterval() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + Equals eq1 = new Equals(EMPTY, fa, L(1)); + Range r = new Range(EMPTY, fa, L(1), true, L(10), false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(eq1, rule.rule(exp)); + } + + // 1 < a < 10 AND a == 10 -> FALSE + public void testEliminateRangeByEqualsOutsideInterval() { + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + Equals eq1 = new Equals(EMPTY, fa, L(10)); + Range r = new Range(EMPTY, fa, L(1), false, L(10), false); + + PropagateEquals rule = new PropagateEquals(); + Expression exp = rule.rule(new And(EMPTY, eq1, r)); + assertEquals(Literal.FALSE, rule.rule(exp)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java index 431a6a146ae..b8b4b2fb32b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java @@ -8,16 +8,21 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.LocationTests; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; import java.util.Arrays; import java.util.List; import java.util.function.Function; -import static org.hamcrest.Matchers.equalTo; +import static java.util.Collections.emptyMap; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; +import static org.hamcrest.Matchers.equalTo; public class MatchQueryTests extends ESTestCase { static MatchQuery randomMatchQuery() { @@ -62,14 +67,16 @@ public class MatchQueryTests extends ESTestCase { private static MatchQueryBuilder getBuilder(String options) { final Location location = new Location(1, 1); - final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, null, "eggplant", options); + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.KEYWORD, emptyMap(), true)); + final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, fa, "eggplant", options); final MatchQuery mmq = new MatchQuery(location, "eggplant", "foo", mmqp); return (MatchQueryBuilder) mmq.asBuilder(); } public void testToString() { final Location location = new Location(1, 1); - final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, null, "eggplant", ""); + FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.KEYWORD, emptyMap(), true)); + final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, fa, "eggplant", ""); final MatchQuery mmq = new MatchQuery(location, "eggplant", "foo", mmqp); assertEquals("MatchQuery@1:2[eggplant:foo]", mmq.toString()); } From fba2f00a73750698a5629d57182bd6b212a86387 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 09:48:49 -0700 Subject: [PATCH 13/30] Packaging: Unmark systemd service file as a config file (#29004) Systemd overrides should happen through /etc/systemd/system, not directly editing the service file. This commit removes marking the service file as configuration for rpm and deb packages. --- distribution/packages/build.gradle | 1 - docs/CHANGELOG.asciidoc | 1 + docs/reference/migration/migrate_7_0.asciidoc | 2 ++ .../reference/migration/migrate_7_0/packaging.asciidoc | 10 ++++++++++ .../test/resources/packaging/tests/30_deb_package.bats | 9 ++++++--- 5 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 docs/reference/migration/migrate_7_0/packaging.asciidoc diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 33f98386a89..a6759a2e4f1 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -178,7 +178,6 @@ Closure commonPackageConfig(String type, boolean oss) { } // ========= systemd ========= - configurationFile '/usr/lib/systemd/system/elasticsearch.service' into('/usr/lib/tmpfiles.d') { from "${packagingFiles}/systemd/elasticsearch.conf" } diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 7fc5c48d73a..9755acfcb52 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -26,6 +26,7 @@ This section summarizes the changes in each release. <> ({pull}29609[#29609]) +<> ({pull}29004[#29004]) <> ({pull}29635[#29635]) <> ({pull}30185[#30185]) diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 130f0ca80cc..aea6d14fac9 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -28,6 +28,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> * <> * <> * <> @@ -41,6 +42,7 @@ include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] include::migrate_7_0/search.asciidoc[] +include::migrate_7_0/packaging.asciidoc[] include::migrate_7_0/plugins.asciidoc[] include::migrate_7_0/api.asciidoc[] include::migrate_7_0/java.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/packaging.asciidoc b/docs/reference/migration/migrate_7_0/packaging.asciidoc new file mode 100644 index 00000000000..4070d680733 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/packaging.asciidoc @@ -0,0 +1,10 @@ +[[breaking_70_packaging_changes]] +=== Packaging changes + +[[systemd-service-file-config]] +==== systemd service file is no longer configuration + +The systemd service file `/usr/lib/systemd/system/elasticsearch.service` +was previously marked as a configuration file in rpm and deb packages. +Overrides to the systemd elasticsearch service should be made +in `/etc/systemd/system/elasticsearch.service.d/override.conf`. diff --git a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats index 59aaa3e8a07..397660b239a 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats @@ -131,9 +131,13 @@ setup() { # The removal must disable the service # see prerm file if is_systemd; then - # Debian systemd distros usually returns exit code 3 + missing_exit_code=4 + if [ $(systemctl --version | head -1 | awk '{print $2}') -lt 231 ]; then + # systemd before version 231 used exit code 3 when the service did not exist + missing_exit_code=3 + fi run systemctl status elasticsearch.service - [ "$status" -eq 3 ] + [ "$status" -eq $missing_exit_code ] run systemctl is-enabled elasticsearch.service [ "$status" -eq 1 ] @@ -166,7 +170,6 @@ setup() { # The service files are still here assert_file_exist "/etc/init.d/elasticsearch" - assert_file_exist "/usr/lib/systemd/system/elasticsearch.service" } @test "[DEB] purge package" { From 0d7ac9a74c1d4d6f03629c3acef5f98be1f1a482 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 2 May 2018 10:13:42 -0700 Subject: [PATCH 14/30] [DOCS] Enables edit links for X-Pack pages (#30278) --- docs/reference/modules/node.asciidoc | 1 - docs/reference/setup/install.asciidoc | 3 --- x-pack/docs/en/index.asciidoc | 11 ----------- 3 files changed, 15 deletions(-) diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 85917ff6754..cf053df1818 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -325,6 +325,5 @@ the <>, the <> and the <>. ifdef::include-xpack[] -:edit_url!: include::{xes-repo-dir}/node.asciidoc[] endif::include-xpack[] diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 44c806be195..7668d45ee35 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -66,8 +66,5 @@ include::install/rpm.asciidoc[] include::install/windows.asciidoc[] ifdef::include-xpack[] -:edit_url!: include::{xes-repo-dir}/setup/docker.asciidoc[] - -:edit_url: endif::include-xpack[] diff --git a/x-pack/docs/en/index.asciidoc b/x-pack/docs/en/index.asciidoc index bf884cf3324..d19737c05ef 100644 --- a/x-pack/docs/en/index.asciidoc +++ b/x-pack/docs/en/index.asciidoc @@ -1,35 +1,24 @@ include::{es-repo-dir}/index-shared1.asciidoc[] -:edit_url!: include::setup/setup-xes.asciidoc[] -:edit_url: include::{es-repo-dir}/index-shared2.asciidoc[] -:edit_url!: include::release-notes/xpack-breaking.asciidoc[] -:edit_url: include::{es-repo-dir}/index-shared3.asciidoc[] -:edit_url!: include::sql/index.asciidoc[] -:edit_url!: include::monitoring/index.asciidoc[] -:edit_url!: include::rollup/index.asciidoc[] -:edit_url!: include::rest-api/index.asciidoc[] -:edit_url!: include::commands/index.asciidoc[] -:edit_url: include::{es-repo-dir}/index-shared4.asciidoc[] -:edit_url: include::{es-repo-dir}/index-shared5.asciidoc[] From 13917162ad5c59a96ccb4d6a81a5044546c45c22 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 2 May 2018 19:40:29 +0200 Subject: [PATCH 15/30] ReplicationTracker.markAllocationIdAsInSync may hang if allocation is cancelled (#30316) At the end of recovery, we mark the recovering shard as "in sync" on the primary. From this point on the primary will treat any replication failure on it as critical and will reach out to the master to fail the shard. To do so, we wait for the local checkpoint of the recovered shard to be above the global checkpoint (in order to maintain global checkpoint invariant). If the master decides to cancel the allocation of the recovering shard while we wait, the method can currently hang and fail to return. It will also ignore the interrupts that are triggered by the cancelled recovery due to the primary closing. Note that this is crucial as this method is called while holding a primary permit. Since the method never comes back, the permit is never released. The unreleased permit will then block any primary relocation *and* while the primary is trying to relocate all indexing will be blocked for 30m as it waits to acquire the missing permit. --- .../index/seqno/ReplicationTracker.java | 10 ++++++++ .../index/seqno/ReplicationTrackerTests.java | 24 +++++++++++++------ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index dcca3d48254..6548aad7670 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -339,6 +339,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L "shard copy " + entry.getKey() + " is in-sync but not tracked"; } + // all pending in sync shards are tracked + for (String aId : pendingInSync) { + assert checkpoints.get(aId) != null : "aId [" + aId + "] is pending in sync but isn't tracked"; + } + return true; } @@ -521,6 +526,9 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync, inSync)); } } + if (removedEntries) { + pendingInSync.removeIf(aId -> checkpoints.containsKey(aId) == false); + } } else { for (String initializingId : initializingAllocationIds) { if (shardAllocationId.equals(initializingId) == false) { @@ -549,6 +557,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L replicationGroup = calculateReplicationGroup(); if (primaryMode && removedEntries) { updateGlobalCheckpointOnPrimary(); + // notify any waiter for local checkpoint advancement to recheck that their shard is still being tracked. + notifyAllWaiters(); } } assert invariant(); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index d89e4289e1a..6fdce76912e 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -305,7 +305,8 @@ public class ReplicationTrackerTests extends ESTestCase { final AllocationId inSyncAllocationId = AllocationId.newInitializing(); final AllocationId trackingAllocationId = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(inSyncAllocationId); - tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), + final long clusterStateVersion = randomNonNegativeLong(); + tracker.updateFromMaster(clusterStateVersion, Collections.singleton(inSyncAllocationId.getId()), routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { @@ -336,13 +337,22 @@ public class ReplicationTrackerTests extends ESTestCase { assertBusy(() -> assertTrue(tracker.pendingInSync.contains(trackingAllocationId.getId()))); } - tracker.updateLocalCheckpoint(trackingAllocationId.getId(), randomIntBetween(globalCheckpoint, 64)); - // synchronize with the waiting thread to mark that it is complete - barrier.await(); - assertTrue(complete.get()); - assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync); + if (randomBoolean()) { + // normal path, shard catches up + tracker.updateLocalCheckpoint(trackingAllocationId.getId(), randomIntBetween(globalCheckpoint, 64)); + // synchronize with the waiting thread to mark that it is complete + barrier.await(); + assertTrue(complete.get()); + assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync); + } else { + // master changes its mind and cancels the allocation + tracker.updateFromMaster(clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), + routingTable(emptySet(), inSyncAllocationId), emptySet()); + barrier.await(); + assertTrue(complete.get()); + assertNull(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId())); + } assertFalse(tracker.pendingInSync.contains(trackingAllocationId.getId())); - thread.join(); } From 5064ff6ad43d4a1189c7283948e2a272f8b2ee02 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 2 May 2018 10:56:31 -0700 Subject: [PATCH 16/30] [DOCS] Adds native realm configuration details (#30215) --- .../configuring-native-realm.asciidoc | 43 +++++++++++++++++++ .../authentication/native-realm.asciidoc | 41 ++---------------- .../docs/en/security/configuring-es.asciidoc | 2 + 3 files changed, 48 insertions(+), 38 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc new file mode 100644 index 00000000000..3c4f44fdfc9 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -0,0 +1,43 @@ +[role="xpack"] +[[configuring-native-realm]] +=== Configuring a Native Realm + +The easiest way to manage and authenticate users is with the internal `native` +realm. + +The native realm is available by default when no other realms are +configured. If other realm settings have been configured in `elasticsearch.yml`, +you must add the native realm to the realm chain. + +You can configure options for the `native` realm in the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. Explicitly +configuring a native realm enables you to set the order in which it appears in +the realm chain, temporarily disable the realm, and control its cache options. + +. Add a realm configuration of type `native` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `native`. If you are configuring multiple realms, you should also +explicitly set the `order` attribute. ++ +-- +See <> for all of the options you can set for the `native` realm. +For example, the following snippet shows a `native` realm configuration that +sets the `order` to zero so the realm is checked first: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + native1: + type: native + order: 0 +------------------------------------------------------------ +-- + +. Restart {es}. + +. Manage your users in {kib} on the *Management / Security / Users* page. +Alternatively, use the <>. + diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index 1c3afdacdc5..3643e42e02a 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -9,47 +9,12 @@ manage user passwords. [float] ==== Configuring a native realm -The native realm is added to the realm chain by default. You don't need to -explicitly configure a native realm to manage users through the REST APIs. - - -IMPORTANT: When you configure realms in `elasticsearch.yml`, only the -realms you specify are used for authentication. To use the -`native` realm as a fallback, you must include it in the realm chain. - -You can, however, configure options for the `native` realm in the -`xpack.security.authc.realms` namespace in `elasticsearch.yml`. Explicitly -configuring a native realm enables you to set the order in which it appears in -the realm chain, temporary disable the realm, and control its cache options. - -To configure a native realm: - -. Add a realm configuration of type `native` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm -`type` to `native`. If you are configuring multiple realms, you should also -explicitly set the `order` attribute. See <> -for all of the options you can set for the `native` realm. -+ -For example, the following snippet shows a `native` realm configuration that -sets the `order` to zero so the realm is checked first: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - native1: - type: native - order: 0 ------------------------------------------------------------- - -. Restart Elasticsearch. +See {ref}/[Configuring a native realm]. [[native-settings]] ==== Native realm settings -See {ref}/security-settings.html#ref-native-settings[Native Realm Settings]. +See {ref}/security-settings.html#ref-native-settings[Native realm settings]. [[managing-native-users]] ==== Managing native users @@ -58,7 +23,7 @@ See {ref}/security-settings.html#ref-native-settings[Native Realm Settings]. *Management / Security / Users* page. Alternatively, you can manage users through the `user` API. For more -information and examples, see {ref}/security-api-users.html[User Management APIs]. +information and examples, see {ref}/security-api-users.html[User management APIs]. [[migrating-from-file]] NOTE: To migrate file-based users to the `native` realm, use the diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 114fd1cdc4f..9bcae7fe80d 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -73,6 +73,7 @@ user API. . Choose which types of realms you want to use to authenticate users. ** <>. ** <>. +** <>. ** <>. . Set up roles and users to control access to {es}. @@ -135,6 +136,7 @@ include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[] include::authentication/configuring-active-directory-realm.asciidoc[] include::authentication/configuring-file-realm.asciidoc[] +include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] From 383856a175802e415f928e2f65f0378ff4c1c23c Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 2 May 2018 11:22:32 -0700 Subject: [PATCH 17/30] [DOCS] Adds LDAP realm configuration details (#30214) --- .../configuring-ldap-realm.asciidoc | 214 +++++++++++++++ .../authentication/ldap-realm.asciidoc | 246 ++---------------- .../docs/en/security/configuring-es.asciidoc | 2 + .../securing-elasticsearch.asciidoc | 6 +- .../securing-communications/tls-ldap.asciidoc | 55 ++++ 5 files changed, 292 insertions(+), 231 deletions(-) create mode 100644 x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc create mode 100644 x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc new file mode 100644 index 00000000000..b43a0911e04 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -0,0 +1,214 @@ +[role="xpack"] +[[configuring-ldap-realm]] +=== Configuring an LDAP realm + +You can configure {security} to communicate with a Lightweight Directory Access +Protocol (LDAP) server to authenticate users. To integrate with LDAP, you +configure an `ldap` realm and map LDAP groups to user roles. + +For more information about LDAP realms, see +{xpack-ref}/ldap-realm.html[LDAP User Authentication]. + +. Determine which mode you want to use. The `ldap` realm supports two modes of +operation, a user search mode and a mode with specific templates for user DNs. ++ +-- +LDAP user search is the most common mode of operation. In this mode, a specific +user with permission to search the LDAP directory is used to search for the DN +of the authenticating user based on the provided username and an LDAP attribute. +Once found, the user is authenticated by attempting to bind to the LDAP server +using the found DN and the provided password. + +If your LDAP environment uses a few specific standard naming conditions for +users, you can use user DN templates to configure the realm. The advantage of +this method is that a search does not have to be performed to find the user DN. +However, multiple bind operations might be needed to find the correct user DN. +-- + +. To configure an `ldap` realm with user search: + +.. Add a realm configuration of type `ldap` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `ldap`, specify the `url` of the LDAP server, and set +`user_search.base_dn` to the container DN where the users are searched for. If +you are configuring multiple realms, you should also explicitly set the `order` +attribute to control the order in which the realms are consulted during +authentication. See <> for all of the options you can set for +an `ldap` realm. ++ +-- +For example, the following snippet shows an LDAP realm configured with a user search: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" + user_search: + base_dn: "dc=example,dc=com" + attribute: cn + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ + +The password for the `bind_dn` user should be configured by adding the appropriate +`secure_bind_password` setting to the {es} keystore. +For example, the following command adds the password for the example realm above: + +[source, shell] +------------------------------------------------------------ +bin/elasticsearch-keystore add \ +xpack.security.authc.realms.ldap1.secure_bind_password +------------------------------------------------------------ + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +-- + +. To configure an `ldap` realm with user DN templates: + +.. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `ldap`, specify the `url` of the LDAP server, and specify at least one +template with the `user_dn_templates` option. If you are configuring multiple +realms, you should also explicitly set the `order` attribute to control the +order in which the realms are consulted during authentication. See +<> for all of the options you can set for an `ldap` realm. ++ +-- +For example, the following snippet shows an LDAP realm configured with user DN +templates: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + user_dn_templates: + - "cn={0}, ou=users, o=marketing, dc=example, dc=com" + - "cn={0}, ou=users, o=engineering, dc=example, dc=com" + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ + +IMPORTANT: The `bind_dn` setting is not used in template mode. +All LDAP operations run as the authenticating user. + +-- + +. (Optional) Configure how {security} should interact with multiple LDAP servers. ++ +-- +The `load_balance.type` setting can be used at the realm level. {security} +supports both failover and load balancing modes of operation. See +<>. +-- + +. (Optional) To protect passwords, +<>. + +. Restart {es}. + +. Map LDAP groups to roles. ++ +-- +The `ldap` realm enables you to map LDAP users to to roles via their LDAP +groups, or other metadata. This role mapping can be configured via the +{ref}/security-api-role-mapping.html[role-mapping API] or by using a file stored +on each node. When a user authenticates with LDAP, the privileges +for that user are the union of all privileges defined by the roles to which +the user is mapped. + +Within a mapping definition, you specify groups using their distinguished +names. For example, the following mapping configuration maps the LDAP +`admins` group to both the `monitoring` and `user` roles, and maps the +`users` group to the `user` role. + +Configured via the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admins +{ + "roles" : [ "monitoring" , "user" ], + "rules" : { "field" : { + "groups" : "cn=admins,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The LDAP distinguished name (DN) of the `admins` group. + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_users +{ + "roles" : [ "user" ], + "rules" : { "field" : { + "groups" : "cn=users,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The LDAP distinguished name (DN) of the `users` group. + +Or, alternatively, configured via the role-mapping file: +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" +------------------------------------------------------------ +<1> The name of the mapped role. +<2> The LDAP distinguished name (DN) of the `admins` group. +<3> The LDAP distinguished name (DN) of the `users` group. + +For more information, see +{xpack-ref}/ldap-realm.html#mapping-roles-ldap[Mapping LDAP Groups to Roles] +and +{xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles]. +-- + +. (Optional) Configure the `metadata` setting on the LDAP realm to include extra +fields in the user's metadata. ++ +-- +By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata. +For more information, see +{xpack-ref}/ldap-realm.html#ldap-user-metadata[User Metadata in LDAP Realms]. + +The example below includes the user's common name (`cn`) as an additional +field in their metadata. +[source,yaml] +-------------------------------------------------- +xpack: + security: + authc: + realms: + ldap1: + type: ldap + metadata: cn +-------------------------------------------------- +-- \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc index 15b014183aa..4e280c313d8 100644 --- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -1,19 +1,11 @@ [[ldap-realm]] -=== LDAP User Authentication +=== LDAP user authentication You can configure {security} to communicate with a Lightweight Directory Access Protocol (LDAP) server to authenticate users. To integrate with LDAP, you configure an `ldap` realm and map LDAP groups to user roles in the <>. -To protect passwords, communications between Elasticsearch and the LDAP server -should be encrypted using SSL/TLS. Clients and nodes that connect via SSL/TLS to -the LDAP server need to have the LDAP server's certificate or the server's root -CA certificate installed in their _keystore_ or _truststore_. For more information -about installing certificates, see <>. - -==== Configuring an LDAP Realm - LDAP stores users and groups hierarchically, similar to the way folders are grouped in a file system. An LDAP directory's hierarchy is built from containers such as the _organizational unit_ (`ou`), _organization_ (`o`), and @@ -25,128 +17,28 @@ _common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, for example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). The `ldap` realm supports two modes of operation, a user search mode -and a mode with specific templates for user DNs. See -<> for all of the options you can set for an -`ldap` realm. +and a mode with specific templates for user DNs. [[ldap-user-search]] -===== User Search Mode -LDAP user search is the most common mode of operation. In this mode, a specific -user with permission to search the LDAP directory is used to search for the -authenticating user DN based on its username and an LDAP attribute. Once found, -the user will be authenticated by attempting to bind to the LDAP server using the -found DN and the provided password. - -To configure an `ldap` Realm with User Search: - -. Add a realm configuration of type `ldap` to `elasticsearch.yml` under the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` -to `ldap`, specify the `url` of the LDAP server, and set `user_search.base_dn` -to the container DN where the users are searched for. If you are configuring -multiple realms, you should also explicitly set the `order` attribute to control -the order in which the realms are consulted during authentication. See -<> for all of the options you can set for an -`ldap` realm. -+ -For example, the following snippet shows an LDAP realm configured with a user search: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" - user_search: - base_dn: "dc=example,dc=com" - attribute: cn - group_search: - base_dn: "dc=example,dc=com" - files: - role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" - unmapped_groups_as_roles: false ------------------------------------------------------------- -+ -The password for the `bind_dn` user should be configured by adding the appropriate -`secure_bind_password` setting to the {es} keystore. -For example, the following command adds the password for the example realm above: -+ -[source, shell] ------------------------------------------------------------- -bin/elasticsearch-keystore add xpack.security.authc.realms.ldap1.secure_bind_password ------------------------------------------------------------- -+ -IMPORTANT: When you configure realms in `elasticsearch.yml`, only the -realms you specify are used for authentication. If you also want to use the -`native` or `file` realms, you must include them in the realm chain. - -. Restart Elasticsearch - - -===== User DN Templates Mode -If your LDAP environment uses a few specific standard naming conditions for -users, you can use User DN templates to configure the realm. The advantage of -this method is that a search does not have to be performed to find the user DN. -However, multiple bind operations might be needed to find the correct user DN. - -To configure an `ldap` Realm with User DN templates: - -. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the -`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to -`ldap`, specify the `url` of the LDAP server, and specify at least one template -with the `user_dn_templates` option. If you are configuring multiple realms, you -should also explicitly set the `order` attribute to control the order in which -the realms are consulted during authentication. See <> -for all of the options you can set for an `ldap` realm. -+ -For example, the following snippet shows an LDAP realm configured with User DN templates: -+ -[source, yaml] ------------------------------------------------------------- -xpack: - security: - authc: - realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - user_dn_templates: - - "cn={0}, ou=users, o=marketing, dc=example, dc=com" - - "cn={0}, ou=users, o=engineering, dc=example, dc=com" - group_search: - base_dn: "dc=example,dc=com" - files: - role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" - unmapped_groups_as_roles: false ------------------------------------------------------------- - -. Restart Elasticsearch - -IMPORTANT: The `bind_dn` setting is not used in template mode. -All LDAP operations will execute as the authenticating user. +===== User search mode and user DN templates mode +See {ref}/configuring-ldap-realm.html[Configuring an LDAP Realm]. [[ldap-load-balancing]] -===== Load Balancing and Failover +===== Load balancing and failover The `load_balance.type` setting can be used at the realm level to configure how {security} should interact with multiple LDAP servers. {security} supports both failover and load balancing modes of operation. See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings]. - [[ldap-settings]] -===== LDAP Realm Settings +===== LDAP realm settings See {ref}/security-settings.html#ref-ldap-settings[LDAP Realm Settings]. [[mapping-roles-ldap]] -==== Mapping LDAP Groups to Roles +==== Mapping LDAP groups to roles An integral part of a realm authentication process is to resolve the roles associated with the authenticated user. Roles define the privileges a user has @@ -162,63 +54,13 @@ groups, or other metadata. This role mapping can be configured via the {ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored on each node. When a user authenticates with LDAP, the privileges for that user are the union of all privileges defined by the roles to which -the user is mapped. - -Within a mapping definition, you specify groups using their distinguished -names. For example, the following mapping configuration maps the LDAP -`admins` group to both the `monitoring` and `user` roles, and maps the -`users` group to the `user` role. - -Configured via the role-mapping API: -[source,js] --------------------------------------------------- -PUT _xpack/security/role_mapping/admins -{ - "roles" : [ "monitoring" , "user" ], - "rules" : { "field" : { - "groups" : "cn=admins,dc=example,dc=com" <1> - } }, - "enabled": true -} --------------------------------------------------- -// CONSOLE -<1> The LDAP distinguished name (DN) of the `admins` group. - -[source,js] --------------------------------------------------- -PUT _xpack/security/role_mapping/basic_users -{ - "roles" : [ "user" ], - "rules" : { "field" : { - "groups" : "cn=users,dc=example,dc=com" <1> - } }, - "enabled": true -} --------------------------------------------------- -// CONSOLE -<1> The LDAP distinguished name (DN) of the `users` group. - -Or, alternatively, configured via the role-mapping file: -[source, yaml] ------------------------------------------------------------- -monitoring: <1> - - "cn=admins,dc=example,dc=com" <2> -user: - - "cn=users,dc=example,dc=com" <3> - - "cn=admins,dc=example,dc=com" ------------------------------------------------------------- -<1> The name of the mapped role. -<2> The LDAP distinguished name (DN) of the `admins` group. -<3> The LDAP distinguished name (DN) of the `users` group. - -For more information, see <>. +the user is mapped. For more information, see +{ref}/configuring-ldap-realm.html[Configuring an LDAP Realm]. [[ldap-user-metadata]] -==== User Metadata in LDAP Realms +==== User metadata in LDAP realms When a user is authenticated via an LDAP realm, the following properties are -populated in user's _metadata_. This metadata is returned in the -{ref}/security-api-authenticate.html[authenticate API], and can be used with -<> in roles. +populated in the user's _metadata_: |======================= | Field | Description @@ -228,72 +70,16 @@ populated in user's _metadata_. This metadata is returned in the groups were mapped to a role). |======================= +This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API], and can be used with +<> in roles. + Additional fields can be included in the user's metadata by configuring the `metadata` setting on the LDAP realm. This metadata is available for use with the <> or in <>. -The example below includes the user's common name (`cn`) as an additional -field in their metadata. -[source,yaml] --------------------------------------------------- -xpack: - security: - authc: - realms: - ldap1: - type: ldap - metadata: cn --------------------------------------------------- - [[ldap-ssl]] ==== Setting up SSL Between Elasticsearch and LDAP -To protect the user credentials that are sent for authentication, it's highly -recommended to encrypt communications between Elasticsearch and your LDAP server. -Connecting via SSL/TLS ensures that the identity of the LDAP server is -authenticated before {security} transmits the user credentials and the contents -of the connection are encrypted. - -To encrypt communications between Elasticsearch and your LDAP server: - -. Configure the realm's SSL settings on each node to trust certificates signed by the CA that signed your -LDAP server certificates. The following example demonstrates how to trust a CA certificate, -`cacert.pem`, located within the {xpack} configuration directory: -+ -[source,shell] --------------------------------------------------- -xpack: - security: - authc: - realms: - ldap1: - type: ldap - order: 0 - url: "ldaps://ldap.example.com:636" - ssl: - certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] --------------------------------------------------- -+ -The CA cert must be a PEM encoded certificate. -+ -[NOTE] -=============================== -You can also specify the individual server certificates rather than the CA -certificate, but this is only recommended if you have a single LDAP server -or the certificates are self-signed. -=============================== - -. Set the `url` attribute in the realm configuration to specify the LDAPS -protocol and the secure port number. For example, `url: ldaps://ldap.example.com:636`. - -. Restart Elasticsearch. - -NOTE: By default, when you configure {security} to connect to an LDAP server - using SSL/TLS, {security} attempts to verify the hostname or IP address - specified with the `url` attribute in the realm configuration with the - values in the certificate. If the values in the certificate and realm - configuration do not match, {security} does not allow a connection to the - LDAP server. This is done to protect against man-in-the-middle attacks. If - necessary, you can disable this behavior by setting the - `ssl.verification_mode` property to `certificate`. +See {ref}/tls-ldap.html[Encrypting Communications Between {es} and LDAP]. \ No newline at end of file diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index 9bcae7fe80d..fa3a6da801f 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -73,6 +73,7 @@ user API. . Choose which types of realms you want to use to authenticate users. ** <>. ** <>. +** <>. ** <>. ** <>. @@ -136,6 +137,7 @@ include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[] include::authentication/configuring-active-directory-realm.asciidoc[] include::authentication/configuring-file-realm.asciidoc[] +include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index e5c1187264f..da4e3a40b7d 100644 --- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[configuring-tls]] -=== Encrypting Communications in {es} +=== Encrypting communications in {es} {security} enables you to encrypt traffic to, from, and within your {es} cluster. Connections are secured using Transport Layer Security (TLS/SSL). @@ -23,6 +23,9 @@ information, see <>. . If you are using Active Directory user authentication, <>. +. If you are using LDAP user authentication, +<>. + For more information about encrypting communications across the Elastic Stack, see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. @@ -30,3 +33,4 @@ include::node-certificates.asciidoc[] include::tls-transport.asciidoc[] include::tls-http.asciidoc[] include::tls-ad.asciidoc[] +include::tls-ldap.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc new file mode 100644 index 00000000000..f10ced77f71 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[[tls-ldap]] +==== Encrypting communications between {es} and LDAP + +To protect the user credentials that are sent for authentication in an LDAP +realm, it's highly recommended to encrypt communications between {es} and your +LDAP server. Connecting via SSL/TLS ensures that the identity of the LDAP server +is authenticated before {security} transmits the user credentials and the +contents of the connection are encrypted. Clients and nodes that connect via +TLS to the LDAP server need to have the LDAP server's certificate or the +server's root CA certificate installed in their keystore or truststore. + +For more information, see <>. + +. Configure the realm's TLS settings on each node to trust certificates signed +by the CA that signed your LDAP server certificates. The following example +demonstrates how to trust a CA certificate, `cacert.pem`, located within the +{xpack} configuration directory: ++ +-- +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + ssl: + certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] +-------------------------------------------------- + +The CA certificate must be a PEM encoded. + +NOTE: You can also specify the individual server certificates rather than the CA +certificate, but this is only recommended if you have a single LDAP server or +the certificates are self-signed. + +-- + +. Set the `url` attribute in the realm configuration to specify the LDAPS +protocol and the secure port number. For example, `url: ldaps://ldap.example.com:636`. + +. Restart {es}. + +NOTE: By default, when you configure {security} to connect to an LDAP server + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + LDAP server. This is done to protect against man-in-the-middle attacks. If + necessary, you can disable this behavior by setting the + `ssl.verification_mode` property to `certificate`. \ No newline at end of file From 6d6da7c66175d30bb3edc7c7762c281693ee1880 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Wed, 2 May 2018 14:40:57 -0400 Subject: [PATCH 18/30] Fix merging logic of Suggester Options (#29514) Suggester Options have a collate match field that is returned when the prune option is set to true. These values should be merged together in the query reduce phase, otherwise good suggestions that result in rare hits in shards with results that do not arrive first may be incorrectly marked as not matching the collate query. --- .../elasticsearch/search/suggest/Suggest.java | 7 ++++++ .../search/suggest/SuggestTests.java | 24 ++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java index a54f1193df0..e4099193359 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -742,6 +742,13 @@ public class Suggest implements Iterable" + suggestedWord + "
    " + secondWord); + PhraseSuggestion.Entry.Option option1 = new Option(suggestionText, highlighted, 0.7f, false); + PhraseSuggestion.Entry.Option option2 = new Option(suggestionText, highlighted, 0.8f, true); + PhraseSuggestion.Entry.Option option3 = new Option(suggestionText, highlighted, 0.6f); + assertEquals(suggestionText, option1.getText()); + assertEquals(highlighted, option1.getHighlighted()); + assertFalse(option1.collateMatch()); + assertTrue(option1.getScore() > 0.6f); + option1.mergeInto(option2); + assertEquals(suggestionText, option1.getText()); + assertEquals(highlighted, option1.getHighlighted()); + assertTrue(option1.collateMatch()); + assertTrue(option1.getScore() > 0.7f); + option1.mergeInto(option3); + assertEquals(suggestionText, option1.getText()); + assertEquals(highlighted, option1.getHighlighted()); + assertTrue(option1.getScore() > 0.7f); + assertTrue(option1.collateMatch()); + } } From fb0aa562a5e1c4916c9e5338eb57b815b9064df8 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 11:42:05 -0700 Subject: [PATCH 19/30] Network: Remove http.enabled setting (#29601) This commit removes the http.enabled setting. While all real nodes (started with bin/elasticsearch) will always have an http binding, there are many tests that rely on the quickness of not actually needing to bind to 2 ports. For this case, the MockHttpTransport.TestPlugin provides a dummy http transport implementation which is used by default in ESIntegTestCase. closes #12792 --- docs/CHANGELOG.asciidoc | 3 + .../migration/migrate_7_0/settings.asciidoc | 7 ++ docs/reference/modules/http.asciidoc | 13 ---- .../ReindexFromRemoteWithAuthTests.java | 6 +- .../index/reindex/RetryTests.java | 7 +- .../netty4/Netty4HttpRequestSizeLimitIT.java | 6 +- .../netty4/Netty4PipeliningDisabledIT.java | 6 +- .../netty4/Netty4PipeliningEnabledIT.java | 6 +- .../http/ContextAndHeaderTransportIT.java | 1 - .../org/elasticsearch/http/CorsRegexIT.java | 1 - .../elasticsearch/http/DeprecationHttpIT.java | 6 +- .../http/DetailedErrorsDisabledIT.java | 2 +- .../elasticsearch/http/HttpSmokeTestCase.java | 8 +- .../http/ResponseHeaderPluginIT.java | 8 +- .../common/network/NetworkModule.java | 12 +-- .../common/settings/ClusterSettings.java | 1 - .../java/org/elasticsearch/node/Node.java | 46 ++++------- .../client/transport/TransportClientIT.java | 5 +- .../common/network/NetworkModuleTests.java | 16 +--- .../single/SingleNodeDiscoveryIT.java | 5 +- .../org/elasticsearch/node/NodeTests.java | 26 ++++--- .../node/service/NodeServiceTests.java | 43 ----------- .../java/org/elasticsearch/node/MockNode.java | 12 +++ .../elasticsearch/test/ESIntegTestCase.java | 10 ++- .../test/ESSingleNodeTestCase.java | 9 ++- .../elasticsearch/test/MockHttpTransport.java | 76 +++++++++++++++++++ .../ClusterDiscoveryConfiguration.java | 1 - .../org/elasticsearch/node/MockNodeTests.java | 4 +- .../test/test/InternalTestClusterTests.java | 35 ++++----- .../en/settings/security-settings.asciidoc | 4 +- .../en/setup/bootstrap-checks-xes.asciidoc | 2 +- .../xpack/core/XPackSettings.java | 9 +-- ...icenseServiceClusterNotRecoveredTests.java | 8 +- .../license/LicenseServiceClusterTests.java | 8 +- .../license/StartBasicLicenseTests.java | 8 +- .../license/StartTrialLicenseTests.java | 8 +- .../xpack/ml/support/BaseMlIntegTestCase.java | 3 +- .../local/LocalExporterIntegTestCase.java | 1 - .../security/TokenSSLBootstrapCheck.java | 3 +- .../integration/BulkUpdateTests.java | 6 +- .../integration/ClearRealmsCacheTests.java | 7 +- .../integration/ClearRolesCacheTests.java | 7 +- .../integration/ClusterPrivilegeTests.java | 6 +- .../integration/IndexPrivilegeTests.java | 6 +- .../elasticsearch/license/LicensingTests.java | 6 +- .../test/NativeRealmIntegTestCase.java | 7 +- .../xpack/security/SecurityPluginTests.java | 7 +- .../security/TokenSSLBootsrapCheckTests.java | 13 ---- .../security/audit/index/AuditTrailTests.java | 6 +- .../xpack/security/authc/RunAsIntegTests.java | 7 +- .../esnative/ESNativeMigrateToolTests.java | 6 +- .../authc/pki/PkiAuthenticationTests.java | 6 +- .../authc/pki/PkiOptionalClientAuthTests.java | 5 +- .../action/RestAuthenticateActionTests.java | 9 ++- ...ServerTransportFilterIntegrationTests.java | 13 +++- .../filter/IpFilteringIntegrationTests.java | 6 +- .../filter/IpFilteringUpdateTests.java | 6 +- .../transport/ssl/SslIntegrationTests.java | 7 +- .../user/AnonymousUserIntegTests.java | 6 +- .../xpack/ssl/SSLClientAuthTests.java | 7 +- .../xpack/sql/action/SqlLicenseIT.java | 6 +- .../webhook/WebhookIntegrationTests.java | 4 +- .../input/chain/ChainIntegrationTests.java | 6 +- 63 files changed, 340 insertions(+), 260 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/node/service/NodeServiceTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/test/MockHttpTransport.java diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 9755acfcb52..f80c135f882 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -35,6 +35,9 @@ Machine Learning:: * The `max_running_jobs` node property is removed in this release. Use the `xpack.ml.max_open_jobs` setting instead. For more information, see <>. +* <> ({pull}29601[#29601]) + +=== Deprecations Monitoring:: * The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index b09cecf5a48..d62d7e6065d 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -25,3 +25,10 @@ the system property `es.thread_pool.write.use_bulk_as_display_name` was available to keep the display output in APIs as `bulk` instead of `write`. These fallback settings and this system property have been removed. + +[[remove-http-enabled]] +==== Http enabled setting removed + +The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing +use of the transport client. This setting has been removed, as the transport client +will be removed in the future, thus requiring HTTP to always be enabled. diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index c69d4991583..7f29a9db7f6 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -110,16 +110,3 @@ client HTTP responses, defaults to unbounded. It also uses the common <>. - -[float] -=== Disable HTTP - -The http module can be completely disabled and not started by setting -`http.enabled` to `false`. Elasticsearch nodes (and Java clients) communicate -internally using the <>, not HTTP. It -might make sense to disable the `http` layer entirely on nodes which are not -meant to serve REST requests directly. For instance, you could disable HTTP on -<> if you also have -<> which are intended to serve all REST requests. -Be aware, however, that you will not be able to send any REST requests (eg to -retrieve node stats) directly to nodes which have HTTP disabled. diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 31077c405d8..f829c8f22d7 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -75,10 +75,14 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase { ReindexPlugin.class); } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings() { Settings.Builder settings = Settings.builder().put(super.nodeSettings()); - settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); // Whitelist reindexing from the http host we're going to use settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*"); settings.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 131c959af8a..298b0604cb0 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -91,10 +91,13 @@ public class RetryTests extends ESIntegTestCase { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(nodeSettings()).build(); } + @Override + protected boolean addMockHttpTransport() { + return false; // enable HTTP so we can test retries on reindex from remote; in this case the "remote" cluster is just this cluster + } + final Settings nodeSettings() { return Settings.builder() - // enable HTTP so we can test retries on reindex from remote; in this case the "remote" cluster is just this cluster - .put(NetworkModule.HTTP_ENABLED.getKey(), true) // whitelist reindexing from the HTTP host we're going to use .put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "127.0.0.1:*") .build(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index d99820bb864..e8d6d30b02b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -50,11 +50,15 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT) .build(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java index 9f117d4ee21..af0e7c85a8f 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java @@ -38,11 +38,15 @@ import static org.hamcrest.Matchers.hasSize; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("http.pipelining", false) .build(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java index cc3f22be453..9723ee93faf 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java @@ -37,11 +37,15 @@ import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("http.pipelining", true) .build(); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 9d05ef3f05d..965f94607ae 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -90,7 +90,6 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .build(); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index 441f56a8631..47215ae669b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -49,7 +49,6 @@ public class CorsRegexIT extends HttpSmokeTestCase { .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") .put(SETTING_CORS_ENABLED.getKey(), true) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .build(); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java index a795c295d2b..260041fdbda 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java @@ -57,11 +57,15 @@ import static org.hamcrest.Matchers.hasSize; */ public class DeprecationHttpIT extends HttpSmokeTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put("force.http.enabled", true) // change values of deprecated settings so that accessing them is logged .put(TEST_DEPRECATED_SETTING_TRUE1.getKey(), ! TEST_DEPRECATED_SETTING_TRUE1.getDefault(Settings.EMPTY)) .put(TEST_DEPRECATED_SETTING_TRUE2.getKey(), ! TEST_DEPRECATED_SETTING_TRUE2.getDefault(Settings.EMPTY)) diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java index 380937ed010..fa71822e79e 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java @@ -38,12 +38,12 @@ import static org.hamcrest.Matchers.is; */ @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) public class DetailedErrorsDisabledIT extends HttpSmokeTestCase { + // Build our cluster settings @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put(HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.getKey(), false) .build(); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index 52004277c71..316acb02a75 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -55,13 +55,17 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { } } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(NetworkModule.TRANSPORT_TYPE_KEY, nodeTransportTypeKey) - .put(NetworkModule.HTTP_TYPE_KEY, nodeHttpTypeKey) - .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + .put(NetworkModule.HTTP_TYPE_KEY, nodeHttpTypeKey).build(); } @Override diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index ffb23f31f40..7d413cca977 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -37,12 +37,10 @@ import static org.hamcrest.Matchers.equalTo; */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) public class ResponseHeaderPluginIT extends HttpSmokeTestCase { + @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("force.http.enabled", true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 15332d4317f..70d26770a7b 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -71,8 +71,6 @@ public final class NetworkModule { Property.NodeScope); public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, - Property.NodeScope, Property.Deprecated); public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); private final Settings settings; @@ -117,9 +115,9 @@ public final class NetworkModule { this.settings = settings; this.transportClient = transportClient; for (NetworkPlugin plugin : plugins) { - if (transportClient == false && HTTP_ENABLED.get(settings)) { - Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, - circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); + Map> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, + circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); + if (transportClient == false) { for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } @@ -197,10 +195,6 @@ public final class NetworkModule { return factory; } - public boolean isHttpEnabled() { - return transportClient == false && HTTP_ENABLED.get(settings); - } - public Supplier getTransportSupplier() { final String name; if (TRANSPORT_TYPE_SETTING.exists(settings)) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 45eb3cf45ef..c19cbe4687c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -219,7 +219,6 @@ public final class ClusterSettings extends AbstractScopedSettings { GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING, GatewayService.RECOVER_AFTER_NODES_SETTING, GatewayService.RECOVER_AFTER_TIME_SETTING, - NetworkModule.HTTP_ENABLED, NetworkModule.HTTP_DEFAULT_TYPE_SETTING, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING, NetworkModule.HTTP_TYPE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b54f63f635f..054b91dc511 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -440,19 +440,7 @@ public class Node implements Closeable { final ResponseCollectorService responseCollectorService = new ResponseCollectorService(this.settings, clusterService); final SearchTransportService searchTransportService = new SearchTransportService(settings, transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); - final Consumer httpBind; - final HttpServerTransport httpServerTransport; - if (networkModule.isHttpEnabled()) { - httpServerTransport = networkModule.getHttpServerTransportSupplier().get(); - httpBind = b -> { - b.bind(HttpServerTransport.class).toInstance(httpServerTransport); - }; - } else { - httpBind = b -> { - b.bind(HttpServerTransport.class).toProvider(Providers.of(null)); - }; - httpServerTransport = null; - } + final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), @@ -519,7 +507,7 @@ public class Node implements Closeable { b.bind(PeerRecoveryTargetService.class).toInstance(new PeerRecoveryTargetService(settings, threadPool, transportService, recoverySettings, clusterService)); } - httpBind.accept(b); + b.bind(HttpServerTransport.class).toInstance(httpServerTransport); pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p)); b.bind(PersistentTasksService.class).toInstance(persistentTasksService); b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); @@ -541,10 +529,8 @@ public class Node implements Closeable { client.initialize(injector.getInstance(new Key>() {}), () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); - if (NetworkModule.HTTP_ENABLED.get(settings)) { - logger.debug("initializing HTTP handlers ..."); - actionModule.initRestHandlers(() -> clusterService.state().nodes()); - } + logger.debug("initializing HTTP handlers ..."); + actionModule.initRestHandlers(() -> clusterService.state().nodes()); logger.info("initialized"); success = true; @@ -704,18 +690,13 @@ public class Node implements Closeable { } } - - if (NetworkModule.HTTP_ENABLED.get(settings)) { - injector.getInstance(HttpServerTransport.class).start(); - } + injector.getInstance(HttpServerTransport.class).start(); if (WRITE_PORTS_FILE_SETTING.get(settings)) { - if (NetworkModule.HTTP_ENABLED.get(settings)) { - HttpServerTransport http = injector.getInstance(HttpServerTransport.class); - writePortsFile("http", http.boundAddress()); - } TransportService transport = injector.getInstance(TransportService.class); writePortsFile("transport", transport.boundAddress()); + HttpServerTransport http = injector.getInstance(HttpServerTransport.class); + writePortsFile("http", http.boundAddress()); } logger.info("started"); @@ -733,9 +714,7 @@ public class Node implements Closeable { logger.info("stopping ..."); injector.getInstance(ResourceWatcherService.class).stop(); - if (NetworkModule.HTTP_ENABLED.get(settings)) { - injector.getInstance(HttpServerTransport.class).stop(); - } + injector.getInstance(HttpServerTransport.class).stop(); injector.getInstance(SnapshotsService.class).stop(); injector.getInstance(SnapshotShardsService.class).stop(); @@ -781,9 +760,7 @@ public class Node implements Closeable { toClose.add(() -> stopWatch.start("node_service")); toClose.add(nodeService); toClose.add(() -> stopWatch.stop().start("http")); - if (NetworkModule.HTTP_ENABLED.get(settings)) { - toClose.add(injector.getInstance(HttpServerTransport.class)); - } + toClose.add(injector.getInstance(HttpServerTransport.class)); toClose.add(() -> stopWatch.stop().start("snapshot_service")); toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotShardsService.class)); @@ -963,6 +940,11 @@ public class Node implements Closeable { return new InternalClusterInfoService(settings, clusterService, threadPool, client, listeners); } + /** Constructs a {@link org.elasticsearch.http.HttpServerTransport} which may be mocked for tests. */ + protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { + return networkModule.getHttpServerTransportSupplier().get(); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java index f37634eed54..ac667d09163 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; @@ -62,10 +63,10 @@ public class TransportClientIT extends ESIntegTestCase { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("node.name", "testNodeVersionIsUpdated") .put("transport.type", getTestTransportType()) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false) .put("cluster.name", "foobar") - .build(), Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class)).start()) { + .build(), Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class, + MockHttpTransport.TestPlugin.class)).start()) { TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress(); client.addTransportAddress(transportAddress); // since we force transport clients there has to be one node started that we connect to. diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 09dc8607bc4..ba74e373f88 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.util.BigArrays; @@ -128,9 +127,7 @@ public class NetworkModuleTests extends ModuleTestCase { } public void testRegisterTransport() { - Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom") - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .build(); + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom").build(); Supplier custom = () -> null; // content doesn't matter we check reference equality NetworkPlugin plugin = new NetworkPlugin() { @Override @@ -144,15 +141,12 @@ public class NetworkModuleTests extends ModuleTestCase { }; NetworkModule module = newNetworkModule(settings, false, plugin); assertFalse(module.isTransportClient()); - assertFalse(module.isHttpEnabled()); assertSame(custom, module.getTransportSupplier()); // check it works with transport only as well module = newNetworkModule(settings, true, plugin); assertSame(custom, module.getTransportSupplier()); assertTrue(module.isTransportClient()); - assertFalse(module.isHttpEnabled()); - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testRegisterHttpTransport() { @@ -175,15 +169,11 @@ public class NetworkModuleTests extends ModuleTestCase { }); assertSame(custom, module.getHttpServerTransportSupplier()); assertFalse(module.isTransportClient()); - assertTrue(module.isHttpEnabled()); - settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); NetworkModule newModule = newNetworkModule(settings, false); assertFalse(newModule.isTransportClient()); - assertFalse(newModule.isHttpEnabled()); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testOverrideDefault() { @@ -261,7 +251,6 @@ public class NetworkModuleTests extends ModuleTestCase { public void testRegisterInterceptor() { Settings settings = Settings.builder() - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); AtomicInteger called = new AtomicInteger(0); @@ -309,7 +298,6 @@ public class NetworkModuleTests extends ModuleTestCase { }); }); assertEquals("interceptor must not be null", nullPointerException.getMessage()); - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) { diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 9527afed5fe..fc284b9f5e8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenPing; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -40,6 +41,7 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; import java.util.Stack; import java.util.concurrent.CompletableFuture; @@ -123,7 +125,6 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase { return Settings .builder() .put("discovery.type", "single-node") - .put("http.enabled", false) .put("transport.type", getTestTransportType()) /* * We align the port ranges of the two as then with zen discovery these two @@ -151,7 +152,7 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase { 0, false, "other", - Collections.singletonList(getTestTransportPlugin()), + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) { other.beforeTest(random(), 0); final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 254823791d5..1fa86d3f4b8 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -32,10 +32,13 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockHttpTransport; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @@ -55,7 +58,7 @@ public class NodeTests extends ESTestCase { if (name != null) { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } - try (Node node = new MockNode(settings.build(), Collections.singleton(getTestTransportPlugin()))) { + try (Node node = new MockNode(settings.build(), basePlugins())) { final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings(); if (name == null) { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(node.getNodeEnvironment().nodeId().substring(0, 7))); @@ -63,7 +66,6 @@ public class NodeTests extends ESTestCase { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(name)); } } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public static class CheckPlugin extends Plugin { @@ -75,6 +77,13 @@ public class NodeTests extends ESTestCase { } } + private List> basePlugins() { + List> plugins = new ArrayList<>(); + plugins.add(getTestTransportPlugin()); + plugins.add(MockHttpTransport.TestPlugin.class); + return plugins; + } + public void testLoadPluginBootstrapChecks() throws IOException { final String name = randomBoolean() ? randomAlphaOfLength(10) : null; Settings.Builder settings = baseSettings(); @@ -82,7 +91,9 @@ public class NodeTests extends ESTestCase { settings.put(Node.NODE_NAME_SETTING.getKey(), name); } AtomicBoolean executed = new AtomicBoolean(false); - try (Node node = new MockNode(settings.build(), Arrays.asList(getTestTransportPlugin(), CheckPlugin.class)) { + List> plugins = basePlugins(); + plugins.add(CheckPlugin.class); + try (Node node = new MockNode(settings.build(), plugins) { @Override protected void validateNodeBeforeAcceptingRequests(BootstrapContext context, BoundTransportAddress boundTransportAddress, List bootstrapChecks) throws NodeValidationException { @@ -95,7 +106,6 @@ public class NodeTests extends ESTestCase { expectThrows(NodeValidationException.class, () -> node.start()); assertTrue(executed.get()); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testWarnIfPreRelease() { @@ -125,7 +135,7 @@ public class NodeTests extends ESTestCase { public void testNodeAttributes() throws IOException { String attr = randomAlphaOfLength(5); Settings.Builder settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "test_attr", attr); - try (Node node = new MockNode(settings.build(), Collections.singleton(getTestTransportPlugin()))) { + try (Node node = new MockNode(settings.build(), basePlugins())) { final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings(); assertEquals(attr, Node.NODE_ATTRIBUTES.getAsMap(nodeSettings).get("test_attr")); } @@ -133,7 +143,7 @@ public class NodeTests extends ESTestCase { // leading whitespace not allowed attr = " leading"; settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "test_attr", attr); - try (Node node = new MockNode(settings.build(), Collections.singleton(getTestTransportPlugin()))) { + try (Node node = new MockNode(settings.build(), basePlugins())) { fail("should not allow a node attribute with leading whitespace"); } catch (IllegalArgumentException e) { assertEquals("node.attr.test_attr cannot have leading or trailing whitespace [ leading]", e.getMessage()); @@ -142,12 +152,11 @@ public class NodeTests extends ESTestCase { // trailing whitespace not allowed attr = "trailing "; settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "test_attr", attr); - try (Node node = new MockNode(settings.build(), Collections.singleton(getTestTransportPlugin()))) { + try (Node node = new MockNode(settings.build(), basePlugins())) { fail("should not allow a node attribute with trailing whitespace"); } catch (IllegalArgumentException e) { assertEquals("node.attr.test_attr cannot have leading or trailing whitespace [trailing ]", e.getMessage()); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private static Settings.Builder baseSettings() { @@ -155,7 +164,6 @@ public class NodeTests extends ESTestCase { return Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(Node.NODE_DATA_SETTING.getKey(), true); } diff --git a/server/src/test/java/org/elasticsearch/node/service/NodeServiceTests.java b/server/src/test/java/org/elasticsearch/node/service/NodeServiceTests.java deleted file mode 100644 index e9097b81426..00000000000 --- a/server/src/test/java/org/elasticsearch/node/service/NodeServiceTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.node.service; - -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.hasSize; - -public class NodeServiceTests extends ESSingleNodeTestCase { - - @Override - protected Settings nodeSettings() { - return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).build(); - } - - public void testHttpServerDisabled() { - // test for a bug where if HTTP stats were requested but HTTP was disabled, NodeService would hit a NullPointerException - NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().http(true)).actionGet(); - assertThat(response.getNodes(), hasSize(1)); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 8d2f6e89550..79694f8050a 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -33,6 +34,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.env.Environment; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -41,6 +43,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -144,5 +147,14 @@ public class MockNode extends Node { return new MockInternalClusterInfoService(settings, clusterService, threadPool, client, listener); } } + + @Override + protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { + if (getPluginsService().filterPlugins(MockHttpTransport.TestPlugin.class).isEmpty()) { + return super.newHttpTransport(networkModule); + } else { + return new MockHttpTransport(); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index a7fd6768064..7210fadd7ea 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1842,7 +1842,6 @@ public abstract class ESIntegTestCase extends ESTestCase { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(networkSettings.build()) .put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } @@ -1892,6 +1891,11 @@ public abstract class ESIntegTestCase extends ESTestCase { return true; } + /** Returns {@code true} iff this test cluster should use a dummy http transport */ + protected boolean addMockHttpTransport() { + return true; + } + /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -1928,10 +1932,12 @@ public abstract class ESIntegTestCase extends ESTestCase { if (addMockTransportService()) { mocks.add(getTestTransportPlugin()); } - if (addTestZenDiscovery()) { mocks.add(TestZenDiscovery.TestPlugin.class); } + if (addMockHttpTransport()) { + mocks.add(MockHttpTransport.TestPlugin.class); + } mocks.add(TestSeedPlugin.class); return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index cd1aa6b020d..554243d2b22 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -161,6 +161,11 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { return Settings.EMPTY; } + /** True if a dummy http transport should be used, or false if the real http transport should be used. */ + protected boolean addMockHttpTransport() { + return true; + } + private Node newNode() { final Path tempDir = createTempDir(); Settings settings = Settings.builder() @@ -173,7 +178,6 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .put("node.name", "node_s_0") .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("transport.type", getTestTransportType()) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) @@ -192,6 +196,9 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { plugins = new ArrayList<>(plugins); plugins.add(TestZenDiscovery.TestPlugin.class); } + if (addMockHttpTransport()) { + plugins.add(MockHttpTransport.TestPlugin.class); + } Node build = new MockNode(settings, plugins); try { build.start(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockHttpTransport.java b/test/framework/src/main/java/org/elasticsearch/test/MockHttpTransport.java new file mode 100644 index 00000000000..5955bcead67 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/MockHttpTransport.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.plugins.Plugin; + +/** + * A dummy http transport used by tests when not wanting to actually bind to a real address. + */ +public class MockHttpTransport extends AbstractLifecycleComponent implements HttpServerTransport { + + /** + * Marker plugin used by {@link org.elasticsearch.node.MockNode} to enable {@link MockHttpTransport}. + */ + public static class TestPlugin extends Plugin {} + + // dummy address/info that can be read by code expecting objects from the relevant methods, + // but not actually used for a real connection + private static final TransportAddress DUMMY_TRANSPORT_ADDRESS = new TransportAddress(TransportAddress.META_ADDRESS, 0); + private static final BoundTransportAddress DUMMY_BOUND_ADDRESS = new BoundTransportAddress( + new TransportAddress[] { DUMMY_TRANSPORT_ADDRESS }, DUMMY_TRANSPORT_ADDRESS); + private static final HttpInfo DUMMY_HTTP_INFO = new HttpInfo(DUMMY_BOUND_ADDRESS, 0); + private static final HttpStats DUMMY_HTTP_STATS = new HttpStats(0, 0); + + public MockHttpTransport() { + super(Settings.EMPTY); + } + + @Override + protected void doStart() {} + + @Override + protected void doStop() {} + + @Override + protected void doClose() {} + + @Override + public BoundTransportAddress boundAddress() { + return DUMMY_BOUND_ADDRESS; + } + + @Override + public HttpInfo info() { + return DUMMY_HTTP_INFO; + } + + @Override + public HttpStats stats() { + return DUMMY_HTTP_STATS; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index a93d2b364d5..80f47ae6e52 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -137,7 +137,6 @@ public class ClusterDiscoveryConfiguration extends NodeConfigurationSource { // we need to pin the node port & host so we'd know where to point things builder.put(TcpTransport.PORT.getKey(), unicastHostPorts[nodeOrdinal]); builder.put(TcpTransport.HOST.getKey(), IP_ADDR); // only bind on one IF we use v4 here by default - builder.put(NetworkModule.HTTP_ENABLED.getKey(), false); for (int i = 0; i < unicastHostOrdinals.length; i++) { unicastHosts[i] = IP_ADDR + ":" + (unicastHostPorts[unicastHostOrdinals[i]]); } diff --git a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java index a0cdb8c3168..3976d88887f 100644 --- a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java +++ b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockHttpTransport; import java.io.IOException; import java.util.ArrayList; @@ -43,10 +44,10 @@ public class MockNodeTests extends ESTestCase { Settings settings = Settings.builder() // All these are required or MockNode will fail to build. .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .put("transport.type", getTestTransportType()) - .put("http.enabled", false) .build(); List> plugins = new ArrayList<>(); plugins.add(getTestTransportPlugin()); + plugins.add(MockHttpTransport.TestPlugin.class); boolean useMockBigArrays = randomBoolean(); boolean useMockSearchService = randomBoolean(); if (useMockBigArrays) { @@ -69,6 +70,5 @@ public class MockNodeTests extends ESTestCase { assertSame(searchService.getClass(), SearchService.class); } } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 23c665af30a..c70708c73ac 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.transport.TcpTransport; @@ -43,6 +44,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -72,6 +74,10 @@ import static org.hamcrest.Matchers.not; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet public class InternalTestClusterTests extends ESTestCase { + private static Collection> mockPlugins() { + return Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); + } + public void testInitializiationIsConsistent() { long clusterSeed = randomLong(); boolean masterNodes = randomBoolean(); @@ -184,7 +190,6 @@ public class InternalTestClusterTests extends ESTestCase { .put( NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); if (autoManageMinMasterNodes == false) { assert minNumDataNodes == maxNumDataNodes; @@ -210,17 +215,15 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); - final List> mockPlugins = Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins, Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); - boolean shouldAssertSettingsDeprecationsAndWarnings = false; try { { Random random = new Random(seed); @@ -231,10 +234,6 @@ public class InternalTestClusterTests extends ESTestCase { cluster1.beforeTest(random, random.nextDouble()); } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); - if (cluster0.getNodeNames().length > 0) { - shouldAssertSettingsDeprecationsAndWarnings = true; - assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); - } Iterator iterator1 = cluster1.getClients().iterator(); for (Client client : cluster0.getClients()) { assertTrue(iterator1.hasNext()); @@ -247,9 +246,6 @@ public class InternalTestClusterTests extends ESTestCase { cluster1.afterTest(); } finally { IOUtils.close(cluster0, cluster1); - if (shouldAssertSettingsDeprecationsAndWarnings) { - assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); - } } } @@ -265,7 +261,7 @@ public class InternalTestClusterTests extends ESTestCase { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false) + return Settings.builder() .put( NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) @@ -289,8 +285,7 @@ public class InternalTestClusterTests extends ESTestCase { Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class), - Function.identity()); + enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); try { cluster.beforeTest(random(), 0.0); final int originalMasterCount = cluster.numMasterNodes(); @@ -355,7 +350,6 @@ public class InternalTestClusterTests extends ESTestCase { } finally { cluster.close(); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private Path[] getNodePaths(InternalTestCluster cluster, String name) { @@ -378,7 +372,6 @@ public class InternalTestClusterTests extends ESTestCase { public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0) // speedup join timeout as setting initial state timeout to 0 makes split @@ -397,7 +390,7 @@ public class InternalTestClusterTests extends ESTestCase { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } - }, 0, randomBoolean(), "", Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class), Function.identity()); + }, 0, randomBoolean(), "", mockPlugins(), Function.identity()); cluster.beforeTest(random(), 0.0); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { @@ -456,7 +449,6 @@ public class InternalTestClusterTests extends ESTestCase { } finally { cluster.close(); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testTwoNodeCluster() throws Exception { @@ -464,7 +456,7 @@ public class InternalTestClusterTests extends ESTestCase { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false) + return Settings.builder() .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .build(); @@ -486,7 +478,7 @@ public class InternalTestClusterTests extends ESTestCase { Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2, "test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix, - Arrays.asList(getTestTransportPlugin(), TestZenDiscovery.TestPlugin.class), Function.identity()); + mockPlugins(), Function.identity()); try { cluster.beforeTest(random(), 0.0); assertMMNinNodeSetting(cluster, 2); @@ -516,6 +508,5 @@ public class InternalTestClusterTests extends ESTestCase { } finally { cluster.close(); } - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc index 38ceda7d07f..2c4df292857 100644 --- a/x-pack/docs/en/settings/security-settings.asciidoc +++ b/x-pack/docs/en/settings/security-settings.asciidoc @@ -89,8 +89,8 @@ You can set the following token service settings in `xpack.security.authc.token.enabled`:: Set to `false` to disable the built-in token service. Defaults to `true` unless - `xpack.security.http.ssl.enabled` is `false` and `http.enabled` is `true`. - This prevents sniffing the token from a connection over plain http. + `xpack.security.http.ssl.enabled` is `false`. This prevents sniffing the token + from a connection over plain http. `xpack.security.authc.token.timeout`:: The length of time that a token is valid for. By default this value is `20m` or diff --git a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc index 6d831b81ddb..a9150ec056c 100644 --- a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc +++ b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc @@ -76,7 +76,7 @@ If you use {security} and the built-in token service is enabled, you must configure your cluster to use SSL/TLS for the HTTP interface. HTTPS is required in order to use the token service. -In particular, if `xpack.security.authc.token.enabled` and `http.enabled` are +In particular, if `xpack.security.authc.token.enabled` is set to `true` in the `elasticsearch.yml` file, you must also set `xpack.security.http.ssl.enabled` to `true`. For more information about these settings, see <> and <>. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index a88d423be95..aa7b2b216b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -73,13 +73,8 @@ public class XPackSettings { true, Setting.Property.NodeScope); /** Setting for enabling or disabling the token service. Defaults to true */ - public static final Setting TOKEN_SERVICE_ENABLED_SETTING = Setting.boolSetting("xpack.security.authc.token.enabled", (s) -> { - if (NetworkModule.HTTP_ENABLED.get(s)) { - return XPackSettings.HTTP_SSL_ENABLED.getRaw(s); - } else { - return Boolean.TRUE.toString(); - } - }, Setting.Property.NodeScope); + public static final Setting TOKEN_SERVICE_ENABLED_SETTING = Setting.boolSetting("xpack.security.authc.token.enabled", + XPackSettings.HTTP_SSL_ENABLED::getRaw, Setting.Property.NodeScope); /** Setting for enabling or disabling sql. Defaults to true. */ public static final Setting SQL_ENABLED = Setting.boolSetting("xpack.sql.enabled", true, Setting.Property.NodeScope); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java index 4e7356ad63d..d253656fd9c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java @@ -27,12 +27,16 @@ public class LicenseServiceClusterNotRecoveredTests extends AbstractLicensesInte return nodeSettingsBuilder(nodeOrdinal).build(); } + @Override + protected boolean addMockHttpTransport() { + return false; + } + private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put("node.data", true) - .put("resource.reload.interval.high", "500ms") // for license mode file watcher - .put(NetworkModule.HTTP_ENABLED.getKey(), true); + .put("resource.reload.interval.high", "500ms"); // for license mode file watcher } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java index ad508ddb7bc..6bf2befbddd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java @@ -34,12 +34,16 @@ public class LicenseServiceClusterTests extends AbstractLicensesIntegrationTestC return nodeSettingsBuilder(nodeOrdinal).build(); } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put("node.data", true) - .put("resource.reload.interval.high", "500ms") // for license mode file watcher - .put(NetworkModule.HTTP_ENABLED.getKey(), true); + .put("resource.reload.interval.high", "500ms"); // for license mode file watcher } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java index 55b14a4d792..12dd8ff9f3e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -28,13 +28,17 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; @ESIntegTestCase.ClusterScope(scope = SUITE) public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put("node.data", true) - .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic") - .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic").build(); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index b7a09d24b13..2e411f46269 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -27,13 +27,17 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; @ESIntegTestCase.ClusterScope(scope = SUITE) public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put("node.data", true) - .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic") - .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic").build(); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 7a554a51ea4..9f3d1c779f8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; @@ -111,7 +112,7 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase { @Override protected Collection> getMockPlugins() { - return Arrays.asList(TestZenDiscovery.TestPlugin.class, TestSeedPlugin.class); + return Arrays.asList(TestZenDiscovery.TestPlugin.class, TestSeedPlugin.class, MockHttpTransport.TestPlugin.class); } @Before diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTestCase.java index ac44af04483..ed13a3ab127 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTestCase.java @@ -46,7 +46,6 @@ public abstract class LocalExporterIntegTestCase extends MonitoringIntegTestCase .put("xpack.monitoring.exporters." + exporterName + ".enabled", false) .put("xpack.monitoring.exporters." + exporterName + ".cluster_alerts.management.enabled", false) .put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .build(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java index cc6da232479..6ce1e381580 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java @@ -19,10 +19,9 @@ final class TokenSSLBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - final Boolean httpEnabled = NetworkModule.HTTP_ENABLED.get(context.settings); final Boolean httpsEnabled = XPackSettings.HTTP_SSL_ENABLED.get(context.settings); final Boolean tokenServiceEnabled = XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.get(context.settings); - if (httpEnabled && httpsEnabled == false && tokenServiceEnabled) { + if (httpsEnabled == false && tokenServiceEnabled) { final String message = String.format( Locale.ROOT, "HTTPS is required in order to use the token service; " diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java index ce7e58972b8..681735a6a7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -32,11 +32,15 @@ import static org.hamcrest.Matchers.equalTo; public class BulkUpdateTests extends SecurityIntegTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put(XPackSettings.DLS_FLS_ENABLED.getKey(), randomBoolean()) .build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java index fcab6f0d732..7c4f6e364d9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java @@ -174,11 +174,8 @@ public class ClearRealmsCacheTests extends SecurityIntegTestCase { } @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index eadbe3738b6..3c3eddfc14c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -68,11 +68,8 @@ public class ClearRolesCacheTests extends NativeRealmIntegTestCase { } @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } public void testModifyingViaApiClearsCache() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 19d61ed77c5..0baa1e91675 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -56,10 +56,14 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { repositoryLocation = null; } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings() { return Settings.builder().put(super.nodeSettings()) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("path.repo", repositoryLocation) .build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java index b1428040080..8efbddcf57f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -120,10 +120,8 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { "index_a_role:u13\n"; @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 351cf91bf94..2c70bc1ff81 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -104,10 +104,8 @@ public class LicensingTests extends SecurityIntegTestCase { } @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 2727353b36f..5f082eae62f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -56,11 +56,8 @@ public abstract class NativeRealmIntegTestCase extends SecurityIntegTestCase { } @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java index 3f744370329..295ee42ff78 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java @@ -25,11 +25,8 @@ import static org.hamcrest.Matchers.is; public class SecurityPluginTests extends SecurityIntegTestCase { @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("http.enabled", true) //This test requires HTTP - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } public void testThatPluginIsLoaded() throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java index c66b882ac0a..1e9875ae536 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java @@ -19,11 +19,6 @@ public class TokenSSLBootsrapCheckTests extends ESTestCase { assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); - settings = Settings.builder() - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); - assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); - settings = Settings.builder().put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); @@ -35,13 +30,5 @@ public class TokenSSLBootsrapCheckTests extends ESTestCase { .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), false) .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); assertTrue(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); - - settings = Settings.builder() - .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), false) - .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) - .put(NetworkModule.HTTP_ENABLED.getKey(), false).build(); - assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); - - assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java index e64d9bb7e44..8b845fd3393 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java @@ -49,11 +49,15 @@ public class AuditTrailTests extends SecurityIntegTestCase { private static final String ROLE_CAN_RUN_AS = "can_run_as"; private static final String ROLES = ROLE_CAN_RUN_AS + ":\n" + " run_as: [ '" + EXECUTE_USER + "' ]\n"; + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("xpack.security.audit.enabled", true) .put("xpack.security.audit.outputs", "index") .putList("xpack.security.audit.index.events.include", "access_denied", "authentication_failed", "run_as_denied") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java index ce67b84134f..56d34abb9d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java @@ -54,11 +54,8 @@ public class RunAsIntegTests extends SecurityIntegTestCase { } @Override - public Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) - .build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index ebe6b6abf18..839b272d115 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -39,12 +39,16 @@ public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase { useSSL = randomBoolean(); } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override public Settings nodeSettings(int nodeOrdinal) { logger.info("--> use SSL? {}", useSSL); Settings s = Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("xpack.security.http.ssl.enabled", useSSL) .build(); return s; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java index e64a06d435f..73825a3b6fc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -51,13 +51,17 @@ import static org.hamcrest.Matchers.is; */ public class PkiAuthenticationTests extends SecuritySingleNodeTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings() { SSLClientAuth sslClientAuth = randomBoolean() ? SSLClientAuth.REQUIRED : SSLClientAuth.OPTIONAL; Settings.Builder builder = Settings.builder() .put(super.nodeSettings()) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("xpack.security.http.ssl.enabled", true) .put("xpack.security.http.ssl.client_authentication", sslClientAuth) .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java index 720ab17aedb..47263cd3952 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -41,12 +41,15 @@ public class PkiOptionalClientAuthTests extends SecuritySingleNodeTestCase { } @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + protected Settings nodeSettings() { String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); Settings.Builder builder = Settings.builder() .put(super.nodeSettings()) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("xpack.security.http.ssl.enabled", true) .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) .put("xpack.security.authc.realms.file.type", "file") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java index a35af3c749f..fea442287ae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java @@ -35,11 +35,14 @@ public class RestAuthenticateActionTests extends SecurityIntegTestCase { anonymousEnabled = randomBoolean(); } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true); + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); if (anonymousEnabled) { builder.put(AnonymousUser.USERNAME_SETTING.getKey(), "anon") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java index 0964bc5a45d..8af8c5da81a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java @@ -16,6 +16,8 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; @@ -40,6 +42,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.CountDownLatch; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForStore; @@ -114,12 +117,13 @@ public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase .put("xpack.security.audit.enabled", false) .put(XPackSettings.WATCHER_ENABLED.getKey(), false) .put("path.home", home) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(Node.NODE_MASTER_SETTING.getKey(), false) .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); //.put("xpack.ml.autodetect_process", false); + Collection> mockPlugins = Arrays.asList( + LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); addSSLSettingsForStore(nodeSettings, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); - try (Node node = new MockNode(nodeSettings.build(), Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class))) { + try (Node node = new MockNode(nodeSettings.build(), mockPlugins)) { node.start(); ensureStableCluster(cluster().size() + 1); } @@ -150,14 +154,15 @@ public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase .put("xpack.security.enabled", true) .put("xpack.security.audit.enabled", false) .put(XPackSettings.WATCHER_ENABLED.getKey(), false) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("discovery.initial_state_timeout", "0s") .put("path.home", home) .put(Node.NODE_MASTER_SETTING.getKey(), false) .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); //.put("xpack.ml.autodetect_process", false); + Collection> mockPlugins = Arrays.asList( + LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); addSSLSettingsForStore(nodeSettings, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); - try (Node node = new MockNode(nodeSettings.build(), Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class))) { + try (Node node = new MockNode(nodeSettings.build(), mockPlugins)) { node.start(); TransportService instance = node.injector().getInstance(TransportService.class); try (Transport.Connection connection = instance.openConnection(new DiscoveryNode("theNode", transportAddress, Version.CURRENT), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index ef1eeace73b..3a199aec5c8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -37,11 +37,15 @@ public class IpFilteringIntegrationTests extends SecurityIntegTestCase { randomClientPort = randomIntBetween(49000, 65500); // ephemeral port } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("transport.profiles.client.port", randomClientPortRange) // make sure this is "localhost", no matter if ipv4 or ipv6, but be consistent .put("transport.profiles.client.bind_host", "localhost") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java index 9c3572b7676..b50fc1ce428 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -34,12 +34,16 @@ public class IpFilteringUpdateTests extends SecurityIntegTestCase { randomClientPort = randomIntBetween(49000, 65500); } + @Override + protected boolean addMockHttpTransport() { + return httpEnabled == false; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), httpEnabled) .put("xpack.security.transport.filter.deny", "127.0.0.200") .put("transport.profiles.client.port", randomClientPortRange) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java index 37f13806c23..c46bac7e6ef 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java @@ -47,10 +47,15 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class SslIntegrationTests extends SecurityIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put("xpack.security.http.ssl.enabled", true).build(); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java index 738cf68763b..b47c3b3e10f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java @@ -22,11 +22,15 @@ import static org.hamcrest.Matchers.nullValue; public class AnonymousUserIntegTests extends SecurityIntegTestCase { private boolean authorizationExceptionsEnabled = randomBoolean(); + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous") .put(AuthorizationService.ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.getKey(), authorizationExceptionsEnabled) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index 0c885840a17..062a3766811 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -45,6 +45,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class SSLClientAuthTests extends SecurityIntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -54,7 +60,6 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { .put("xpack.security.http.ssl.enabled", true) .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.REQUIRED) .put("transport.profiles.default.xpack.security.ssl.client_authentication", SSLClientAuth.NONE) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .build(); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index d4d9ab4c3e8..a97f66763a9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -55,12 +55,16 @@ public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { return plugins; } + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { // Enable http so we can test JDBC licensing because only exists on the REST layer. return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) .build(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java index 151bf4af189..8dfc5dc25ae 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -49,8 +49,8 @@ public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase private MockWebServer webServer = new MockWebServer(); @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("http.enabled", true).build(); + protected boolean addMockHttpTransport() { + return false; // enable http } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java index 3dbc9de70d5..3a590fbfe28 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java @@ -36,11 +36,15 @@ import static org.hamcrest.Matchers.containsString; public class ChainIntegrationTests extends AbstractWatcherIntegrationTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_ENABLED.getKey(), true) .build(); } From 3e9fe3c9cd53bc74ad4d6e1abf34c917af6283ab Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 2 May 2018 12:08:02 -0700 Subject: [PATCH 20/30] [DOCS] Reorganizes authentication details in Stack Overview (#30280) --- .../docs/en/security/authentication.asciidoc | 350 ------------------ .../active-directory-realm.asciidoc | 15 +- .../authentication/built-in-users.asciidoc | 156 ++++++++ .../configuring-native-realm.asciidoc | 2 +- .../authentication/file-realm.asciidoc | 4 +- .../authentication/internal-users.asciidoc | 12 + .../authentication/ldap-realm.asciidoc | 8 +- .../security/authentication/overview.asciidoc | 50 +++ .../authentication/pki-realm.asciidoc | 2 +- .../security/authentication/realms.asciidoc | 123 ++++++ .../authentication/saml-guide.asciidoc | 40 +- .../authentication/saml-realm.asciidoc | 13 +- .../en/security/how-security-works.asciidoc | 53 +-- x-pack/docs/en/security/index.asciidoc | 2 +- 14 files changed, 383 insertions(+), 447 deletions(-) delete mode 100644 x-pack/docs/en/security/authentication.asciidoc create mode 100644 x-pack/docs/en/security/authentication/built-in-users.asciidoc create mode 100644 x-pack/docs/en/security/authentication/internal-users.asciidoc create mode 100644 x-pack/docs/en/security/authentication/overview.asciidoc create mode 100644 x-pack/docs/en/security/authentication/realms.asciidoc diff --git a/x-pack/docs/en/security/authentication.asciidoc b/x-pack/docs/en/security/authentication.asciidoc deleted file mode 100644 index 8838a0fefbf..00000000000 --- a/x-pack/docs/en/security/authentication.asciidoc +++ /dev/null @@ -1,350 +0,0 @@ -[[setting-up-authentication]] -== Setting Up User Authentication - -Authentication identifies an individual. To gain access to restricted resources, -a user must prove their identity, via passwords, credentials, or some other -means (typically referred to as authentication tokens). - -You can use the native support for managing and authenticating users, or -integrate with external user management systems such as LDAP and Active -Directory. For information about managing native users, -see <>. - -[float] -[[built-in-users]] -=== Built-in Users - -{security} provides built-in user credentials to help you get up and running. -These users have a fixed set of privileges and cannot be authenticated until their -passwords have been set. The `elastic` user can be used to -<>. - -`elastic`:: A built-in _superuser_. See <>. -`kibana`:: The user Kibana uses to connect and communicate with Elasticsearch. -`logstash_system`:: The user Logstash uses when storing monitoring information in Elasticsearch. -`beats_system`:: The user the Beats use when storing monitoring information in Elasticsearch. - - -[float] -[[built-in-user-explanation]] -==== How the Built-in Users Work -These built-in users are stored within a special `.security` index managed by -{security}. -This means that, if the password is changed, or a user is disabled, then that -change is automatically reflected on each node in the cluster. It also means -that if your `.security` index is deleted, or restored from a snapshot, then -any changes you have applied will be lost. - -Although they share the same API, the built-in users are separate and distinct -from users managed by the <>. Disabling the native -realm will not have any effect on the built-in users. The built-in users can -be disabled individually, using the -{ref}/security-api-users.html[user management API]. - -[float] -[[bootstrap-elastic-passwords]] -==== The Elastic Bootstrap Password - -When you install {es}, if the `elastic` user does not already have a password, -it uses a default bootstrap password. The bootstrap password is a transient -password that enables you to run the tools that set all the built-in user passwords. - -By default, the bootstrap password is derived from a randomized `keystore.seed` -setting, which is added to the keystore during installation. You do not need -to know or change this bootstrap password. If you have defined a -`bootstrap.password` setting in the keystore, however, that value is used instead. -For more information about interacting with the keystore, see -{ref}/secure-settings.html[Secure Settings]. - -NOTE: After you <>, -in particular for the `elastic` user, there is no further use for the bootstrap -password. - -[float] -[[set-built-in-user-passwords]] -==== Setting Built-in User Passwords - -You must set the passwords for all built-in users. - -The +elasticsearch-setup-passwords+ tool is the simplest method to set the -built-in users' passwords for the first time. It uses the `elastic` user's -bootstrap password to run user management API requests. For example, you can run -the command in an "interactive" mode, which prompts you to enter new passwords -for the `elastic`, `kibana`, `logstash_system`, and `beats_system` users: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-setup-passwords interactive --------------------------------------------------- - -For more information about the command options, see -{ref}/setup-passwords.html[elasticsearch-setup-passwords]. - -IMPORTANT: After you set a password for the `elastic` user, the bootstrap -password is no longer valid; you cannot run the `elasticsearch-setup-passwords` -command a second time. - -Alternatively, you can set the initial passwords for the built-in users by using -the *Management > Users* page in {kib} or the -{ref}/security-api-change-password.html[Change Password API]. These methods are -more complex. You must supply the `elastic` user and its bootstrap password to -log into {kib} or run the API. This requirement means that you cannot use the -default bootstrap password that is derived from the `keystore.seed` setting. -Instead, you must explicitly set a `bootstrap.password` setting in the keystore -before you start {es}. For example, the following command prompts you to enter a -new bootstrap password: - -[source,shell] ----------------------------------------------------- -bin/elasticsearch-keystore add "bootstrap.password" ----------------------------------------------------- - -You can then start {es} and {kib} and use the `elastic` user and bootstrap -password to log into {kib} and change the passwords. Alternatively, you can -submit Change Password API requests for each built-in user. These methods are -better suited for changing your passwords after the initial setup is complete, -since at that point the bootstrap password is no longer required. - -[float] -[[add-built-in-user-passwords]] -==== Adding Built-in User Passwords To {kib}, Logstash, and Beats - -After the `kibana` user password is set, you need to update the {kib} server -with the new password by setting `elasticsearch.password` in the `kibana.yml` -configuration file: - -[source,yaml] ------------------------------------------------ -elasticsearch.password: kibanapassword ------------------------------------------------ - -The `logstash_system` user is used internally within Logstash when -monitoring is enabled for Logstash. - -To enable this feature in Logstash, you need to update the Logstash -configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in -the `logstash.yml` configuration file: - -[source,yaml] ----------------------------------------------------------- -xpack.monitoring.elasticsearch.password: logstashpassword ----------------------------------------------------------- - -If you have upgraded from an older version of elasticsearch/x-pack, -the `logstash_system` user may have defaulted to _disabled_ for security reasons. -Once the password has been changed, you can enable the user via the following API call: - -[source,js] ---------------------------------------------------------------------- -PUT _xpack/security/user/logstash_system/_enable ---------------------------------------------------------------------- -// CONSOLE - -The `beats_system` user is used internally within Beats when monitoring is -enabled for Beats. - -To enable this feature in Beats, you need to update the configuration for each -of your beats to reference the correct username and password. For example: - -[source,yaml] ----------------------------------------------------------- -xpack.monitoring.elasticsearch.username: beats_system -xpack.monitoring.elasticsearch.password: beatspassword ----------------------------------------------------------- - -If you have upgraded from an older version of {es}, then you may not have set a -password for the `beats_system` user. If this is the case, then you should use -the *Management > Users* page in {kib} or the -{ref}/security-api-change-password.html[Change Password API] to set a password -for this user. - -[float] -[[disabling-default-password]] -==== Disabling Default Password Functionality -[IMPORTANT] -============================================================================= -This setting is deprecated. The elastic user no longer has a default password. -The password must be set before the user can be used. -See <>. -============================================================================= - -[float] -[[internal-users]] -=== Internal Users - -{security} has three _internal_ users (`_system`, `_xpack`, and `_xpack_security`) -that are responsible for the operations that take place inside an {es} cluster. - -These users are only used by requests that originate from within the cluster. -For this reason, they cannot be used to authenticate against the API and there -is no password to manage or reset. - -From time-to-time you may find a reference to one of these users inside your -logs, including <>. - -[[how-authc-works]] -=== How Authentication Works - -Authentication in {security} is handled by one or more authentication services -called _realms_. A _realm_ is used to resolve and authenticate users based on -authentication tokens. {security} provides the following built-in realms: - -_native_:: -An internal realm where users are stored in a dedicated Elasticsearch index. -This realm supports an authentication token in the form of username and password, -and is available by default when no realms are explicitly configured. See -<>. - -_ldap_:: -A realm that uses an external LDAP server to authenticate the -users. This realm supports an authentication token in the form of username and -password, and requires explicit configuration in order to be used. See -<>. - -_active_directory_:: -A realm that uses an external Active Directory Server to authenticate the -users. With this realm, users are authenticated by usernames and passwords. -See <>. - -_pki_:: -A realm that authenticates users using Public Key Infrastructure (PKI). This -realm works in conjunction with SSL/TLS and identifies the users through the -Distinguished Name (DN) of the client's X.509 certificates. See <>. - -_file_:: -An internal realm where users are defined in files stored on each node in the -Elasticsearch cluster. This realm supports an authentication token in the form -of username and password, and is always available. See <>. - -_saml_:: -A realm that facilitates authentication using the SAML 2.0 Web SSO protocol. -This realm is designed to support authentication through {kib}, and is non -intended for use in the REST API. See <>. - - - -{security} also supports custom realms. If you need to integrate with another -authentication system, you can build a custom realm plugin. For more information, -see <>. - -Realms live within a _realm chain_. It is essentially a prioritized list of -configured realms (typically of various types). The order of the list determines -the order in which the realms will be consulted. You should make sure each -configured realm has a distinct `order` setting. In the event that two or more -realms have the same `order`, they will be processed in `name` order. -During the authentication process, {security} will consult and try to -authenticate the request one realm at a time. -Once one of the realms successfully authenticates the request, the authentication -is considered to be successful and the authenticated user will be associated -with the request (which will then proceed to the authorization phase). If a realm -cannot authenticate the request, the next in line realm in the chain will be -consulted. If all realms in the chain could not authenticate the request, the -authentication is then considered to be unsuccessful and an authentication error -will be returned (as HTTP status code `401`). - -NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after - several successive failed login attempts. If the same username exists in - multiple realms, unintentional account lockouts are possible. For more - information, please see <>. - -The default realm chain contains the `native` and `file` realms. To explicitly, -configure a realm chain, you specify the chain in `elasticsearch.yml`. When you -configure a realm chain, only the realms you specify are used for authentication. -To use the `native` and `file` realms, you must include them in the chain. - -The following snippet configures a realm chain that includes the `file` and -`native` realms, as well as two LDAP realms and an Active Directory realm. - -[source,yaml] ----------------------------------------- -xpack.security.authc: - realms: - - file: - type: file - order: 0 - - native: - type: native - order: 1 - - ldap1: - type: ldap - order: 2 - enabled: false - url: 'url_to_ldap1' - ... - - ldap2: - type: ldap - order: 3 - url: 'url_to_ldap2' - ... - - ad1: - type: active_directory - order: 4 - url: 'url_to_ad' ----------------------------------------- - -As can be seen above, each realm has a unique name that identifies it and each -realm type dictates its own set of required and optional settings. That said, -there are three settings that are common to all realms: - -[cols=",^,",options="header"] -|========= -| Setting | Required | Description - -| `type` | true | Identifies the type of the realm. The realm type - determines what other settings the realms should be - configured with. The type can be one of: `native`, - `ldap`, `active_directory`, `pki`, `file`, or in case - of a custom realm, the type name that identifies it. - -| `order` | false | A numeric value representing the priority/index of - the realm within the realm chain. This will determine - the order by which the realms will be consulted - during authentication, with lower order being consulted - first. - -| `enabled` | false | When set to `false` the realm will be disabled and - will not be added to the realm chain. This is useful - for debugging purposes as it enables you to remove - a realm from the chain without deleting and losing - its configuration. -|========= - -Realm types can roughly be classified in two categories: - -Internal:: Realms that are internal to Elasticsearch and don't require any - communication with external parties. They are fully managed by - {security}. There can only be a maximum of one configured realm - per internal realm type. {security} provides two internal realm - types: `native` and `file`. - -External:: Realms that require interaction with parties/components external to - Elasticsearch, typically, with enterprise grade identity management - systems. Unlike internal realms, there can be as many external realms - as one would like - each with its own unique name and configuration. - {security} provides three external realm types: `ldap`, - `active_directory` and `pki`. - -include::authentication/anonymous-access.asciidoc[] - -include::authentication/native-realm.asciidoc[] - -include::authentication/ldap-realm.asciidoc[] - -include::authentication/active-directory-realm.asciidoc[] - -include::authentication/pki-realm.asciidoc[] - -include::authentication/file-realm.asciidoc[] - -include::authentication/saml-realm.asciidoc[] - -include::authentication/custom-realm.asciidoc[] - -include::authentication/user-cache.asciidoc[] - -include::authentication/saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc index f0f209d3fa3..2069176172e 100644 --- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -1,5 +1,5 @@ [[active-directory-realm]] -=== Active Directory User Authentication +=== Active Directory user authentication You can configure {security} to communicate with Active Directory to authenticate users. To integrate with Active Directory, you configure an `active_directory` @@ -33,7 +33,7 @@ retrieves the user's group memberships from the `tokenGroups` attribute on the user's entry in Active Directory. [[ad-load-balancing]] -==== Load Balancing and Failover +==== Load balancing and failover The `load_balance.type` setting can be used at the realm level to configure how {security} should interact with multiple Active Directory servers. Two modes of operation are supported: failover and load balancing. @@ -41,18 +41,17 @@ operation are supported: failover and load balancing. See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings]. [[ad-settings]] -==== Active Directory Realm Settings +==== Active Directory realm settings See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings]. [[mapping-roles-ad]] -==== Mapping Active Directory Users and Groups to Roles +==== Mapping Active Directory users and groups to roles See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm]. [[ad-user-metadata]] -==== User Metadata in Active Directory Realms - +==== User metadata in Active Directory realms When a user is authenticated via an Active Directory realm, the following properties are populated in the user's _metadata_: @@ -72,7 +71,7 @@ Additional metadata can be extracted from the Active Directory server by configu the `metadata` setting on the Active Directory realm. [[active-directory-ssl]] -==== Setting up SSL Between Elasticsearch and Active Directory +==== Setting up SSL between Elasticsearch and Active Directory See -{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory]. \ No newline at end of file +{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory]. diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc new file mode 100644 index 00000000000..2400643755a --- /dev/null +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -0,0 +1,156 @@ +[[built-in-users]] +=== Built-in users + +{security} provides built-in user credentials to help you get up and running. +These users have a fixed set of privileges and cannot be authenticated until their +passwords have been set. The `elastic` user can be used to +<>. + +`elastic`:: A built-in _superuser_. See <>. +`kibana`:: The user Kibana uses to connect and communicate with Elasticsearch. +`logstash_system`:: The user Logstash uses when storing monitoring information in Elasticsearch. +`beats_system`:: The user the Beats use when storing monitoring information in Elasticsearch. + + +[float] +[[built-in-user-explanation]] +==== How the built-in users work +These built-in users are stored within a special `.security` index managed by +{security}. +This means that, if the password is changed, or a user is disabled, then that +change is automatically reflected on each node in the cluster. It also means +that if your `.security` index is deleted, or restored from a snapshot, then +any changes you have applied will be lost. + +Although they share the same API, the built-in users are separate and distinct +from users managed by the <>. Disabling the native +realm will not have any effect on the built-in users. The built-in users can +be disabled individually, using the +{ref}/security-api-users.html[user management API]. + +[float] +[[bootstrap-elastic-passwords]] +==== The Elastic bootstrap password + +When you install {es}, if the `elastic` user does not already have a password, +it uses a default bootstrap password. The bootstrap password is a transient +password that enables you to run the tools that set all the built-in user passwords. + +By default, the bootstrap password is derived from a randomized `keystore.seed` +setting, which is added to the keystore during installation. You do not need +to know or change this bootstrap password. If you have defined a +`bootstrap.password` setting in the keystore, however, that value is used instead. +For more information about interacting with the keystore, see +{ref}/secure-settings.html[Secure Settings]. + +NOTE: After you <>, +in particular for the `elastic` user, there is no further use for the bootstrap +password. + +[float] +[[set-built-in-user-passwords]] +==== Setting built-in user passwords + +You must set the passwords for all built-in users. + +The +elasticsearch-setup-passwords+ tool is the simplest method to set the +built-in users' passwords for the first time. It uses the `elastic` user's +bootstrap password to run user management API requests. For example, you can run +the command in an "interactive" mode, which prompts you to enter new passwords +for the `elastic`, `kibana`, `logstash_system`, and `beats_system` users: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords interactive +-------------------------------------------------- + +For more information about the command options, see +{ref}/setup-passwords.html[elasticsearch-setup-passwords]. + +IMPORTANT: After you set a password for the `elastic` user, the bootstrap +password is no longer valid; you cannot run the `elasticsearch-setup-passwords` +command a second time. + +Alternatively, you can set the initial passwords for the built-in users by using +the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API]. These methods are +more complex. You must supply the `elastic` user and its bootstrap password to +log into {kib} or run the API. This requirement means that you cannot use the +default bootstrap password that is derived from the `keystore.seed` setting. +Instead, you must explicitly set a `bootstrap.password` setting in the keystore +before you start {es}. For example, the following command prompts you to enter a +new bootstrap password: + +[source,shell] +---------------------------------------------------- +bin/elasticsearch-keystore add "bootstrap.password" +---------------------------------------------------- + +You can then start {es} and {kib} and use the `elastic` user and bootstrap +password to log into {kib} and change the passwords. Alternatively, you can +submit Change Password API requests for each built-in user. These methods are +better suited for changing your passwords after the initial setup is complete, +since at that point the bootstrap password is no longer required. + +[float] +[[add-built-in-user-passwords]] +==== Adding Built-in User Passwords To {kib}, Logstash, and Beats + +After the `kibana` user password is set, you need to update the {kib} server +with the new password by setting `elasticsearch.password` in the `kibana.yml` +configuration file: + +[source,yaml] +----------------------------------------------- +elasticsearch.password: kibanapassword +----------------------------------------------- + +The `logstash_system` user is used internally within Logstash when +monitoring is enabled for Logstash. + +To enable this feature in Logstash, you need to update the Logstash +configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in +the `logstash.yml` configuration file: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.password: logstashpassword +---------------------------------------------------------- + +If you have upgraded from an older version of elasticsearch/x-pack, +the `logstash_system` user may have defaulted to _disabled_ for security reasons. +Once the password has been changed, you can enable the user via the following API call: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_enable +--------------------------------------------------------------------- +// CONSOLE + +The `beats_system` user is used internally within Beats when monitoring is +enabled for Beats. + +To enable this feature in Beats, you need to update the configuration for each +of your beats to reference the correct username and password. For example: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.username: beats_system +xpack.monitoring.elasticsearch.password: beatspassword +---------------------------------------------------------- + +If you have upgraded from an older version of {es}, then you may not have set a +password for the `beats_system` user. If this is the case, then you should use +the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API] to set a password +for this user. + +[float] +[[disabling-default-password]] +==== Disabling default password functionality +[IMPORTANT] +============================================================================= +This setting is deprecated. The elastic user no longer has a default password. +The password must be set before the user can be used. +See <>. +============================================================================= diff --git a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc index 3c4f44fdfc9..3cda29c2c71 100644 --- a/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-native-realm.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[configuring-native-realm]] -=== Configuring a Native Realm +=== Configuring a native realm The easiest way to manage and authenticate users is with the internal `native` realm. diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc index 937537ac1a1..cf6f5cacd1c 100644 --- a/x-pack/docs/en/security/authentication/file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -1,5 +1,5 @@ [[file-realm]] -=== File-based User Authentication +=== File-based user authentication You can manage and authenticate users with the built-in `file` realm. With the `file` realm, users are defined in local files on each node in the cluster. @@ -23,4 +23,4 @@ command-line tool. This tool enables you to add and remove users, assign user roles, and manage user passwords. For more information, see -{ref}/configuring-file-realm.html[Configuring a file realm]. \ No newline at end of file +{ref}/configuring-file-realm.html[Configuring a file realm]. diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc new file mode 100644 index 00000000000..53468363dc8 --- /dev/null +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -0,0 +1,12 @@ +[[internal-users]] +=== Internal users + +{security} has three _internal_ users (`_system`, `_xpack`, and `_xpack_security`) +that are responsible for the operations that take place inside an {es} cluster. + +These users are only used by requests that originate from within the cluster. +For this reason, they cannot be used to authenticate against the API and there +is no password to manage or reset. + +From time-to-time you may find a reference to one of these users inside your +logs, including <>. diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc index 4e280c313d8..8180b109226 100644 --- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -20,12 +20,12 @@ The `ldap` realm supports two modes of operation, a user search mode and a mode with specific templates for user DNs. [[ldap-user-search]] -===== User search mode and user DN templates mode +==== User search mode and user DN templates mode See {ref}/configuring-ldap-realm.html[Configuring an LDAP Realm]. [[ldap-load-balancing]] -===== Load balancing and failover +==== Load balancing and failover The `load_balance.type` setting can be used at the realm level to configure how {security} should interact with multiple LDAP servers. {security} supports both failover and load balancing modes of operation. @@ -33,7 +33,7 @@ failover and load balancing modes of operation. See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings]. [[ldap-settings]] -===== LDAP realm settings +==== LDAP realm settings See {ref}/security-settings.html#ref-ldap-settings[LDAP Realm Settings]. @@ -82,4 +82,4 @@ with the <> or in [[ldap-ssl]] ==== Setting up SSL Between Elasticsearch and LDAP -See {ref}/tls-ldap.html[Encrypting Communications Between {es} and LDAP]. \ No newline at end of file +See {ref}/tls-ldap.html[Encrypting Communications Between {es} and LDAP]. diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc new file mode 100644 index 00000000000..ada5453c7a7 --- /dev/null +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -0,0 +1,50 @@ +[[setting-up-authentication]] +== User authentication + +Authentication identifies an individual. To gain access to restricted resources, +a user must prove their identity, via passwords, credentials, or some other +means (typically referred to as authentication tokens). + +The {stack} authenticates users by identifying the users behind the requests +that hit the cluster and verifying that they are who they claim to be. The +authentication process is handled by one or more authentication services called +<>. + +You can use the native support for managing and authenticating users, or +integrate with external user management systems such as LDAP and Active +Directory. + +{security} provides built-in realms such as `native`,`ldap`, `active_directory`, +`pki`, `file`, and `saml`. If none of the built-in realms meet your needs, you +can also build your own custom realm and plug it into the {stack}. + +When {security} is enabled, depending on the realms you've configured, you must +attach your user credentials to the requests sent to {es}. For example, when +using realms that support usernames and passwords you can simply attach +{wikipedia}/Basic_access_authentication[basic auth] header to the requests. + +include::built-in-users.asciidoc[] + +include::internal-users.asciidoc[] + +include::realms.asciidoc[] + +include::active-directory-realm.asciidoc[] + +include::file-realm.asciidoc[] + +include::ldap-realm.asciidoc[] + +include::native-realm.asciidoc[] + +include::pki-realm.asciidoc[] + +include::saml-realm.asciidoc[] + +include::custom-realm.asciidoc[] + +include::anonymous-access.asciidoc[] + +include::user-cache.asciidoc[] + +include::saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc index 47f9670d840..4fc91717f93 100644 --- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -15,6 +15,6 @@ the desired network layers (transport or http), and map the Distinguished Names See {ref}/configuring-pki-realm.html[Configuring a PKI realm]. [[pki-settings]] -==== PKI Realm Settings +==== PKI realm settings See {ref}/security-settings.html#ref-pki-settings[PKI realm settings]. diff --git a/x-pack/docs/en/security/authentication/realms.asciidoc b/x-pack/docs/en/security/authentication/realms.asciidoc new file mode 100644 index 00000000000..7bd48c5c8f0 --- /dev/null +++ b/x-pack/docs/en/security/authentication/realms.asciidoc @@ -0,0 +1,123 @@ +[[realms]] +=== Realms + +Authentication in {security} is handled by one or more authentication services +called _realms_. A _realm_ is used to resolve and authenticate users based on +authentication tokens. {security} provides the following built-in realms: + +_native_:: +An internal realm where users are stored in a dedicated {es} index. +This realm supports an authentication token in the form of username and password, +and is available by default when no realms are explicitly configured. The users +are managed via the {ref}/security-api-users.html[User Management API]. See +<>. + +_ldap_:: +A realm that uses an external LDAP server to authenticate the +users. This realm supports an authentication token in the form of username and +password, and requires explicit configuration in order to be used. See +<>. + +_active_directory_:: +A realm that uses an external Active Directory Server to authenticate the +users. With this realm, users are authenticated by usernames and passwords. +See <>. + +_pki_:: +A realm that authenticates users using Public Key Infrastructure (PKI). This +realm works in conjunction with SSL/TLS and identifies the users through the +Distinguished Name (DN) of the client's X.509 certificates. See <>. + +_file_:: +An internal realm where users are defined in files stored on each node in the +{es} cluster. This realm supports an authentication token in the form +of username and password and is always available. See <>. + +_saml_:: +A realm that facilitates authentication using the SAML 2.0 Web SSO protocol. +This realm is designed to support authentication through {kib} and is not +intended for use in the REST API. See <>. + +{security} also supports custom realms. If you need to integrate with another +authentication system, you can build a custom realm plugin. For more information, +see <>. + +Realms live within a _realm chain_. It is essentially a prioritized list of +configured realms (typically of various types). The order of the list determines +the order in which the realms will be consulted. You should make sure each +configured realm has a distinct `order` setting. In the event that two or more +realms have the same `order`, they will be processed in `name` order. +During the authentication process, {security} will consult and try to +authenticate the request one realm at a time. +Once one of the realms successfully authenticates the request, the authentication +is considered to be successful and the authenticated user will be associated +with the request (which will then proceed to the authorization phase). If a realm +cannot authenticate the request, the next in line realm in the chain will be +consulted. If all realms in the chain could not authenticate the request, the +authentication is then considered to be unsuccessful and an authentication error +will be returned (as HTTP status code `401`). + +NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after + several successive failed login attempts. If the same username exists in + multiple realms, unintentional account lockouts are possible. For more + information, please see <>. + +The default realm chain contains the `native` and `file` realms. To explicitly, +configure a realm chain, you specify the chain in `elasticsearch.yml`. When you +configure a realm chain, only the realms you specify are used for authentication. +To use the `native` and `file` realms, you must include them in the chain. + +The following snippet configures a realm chain that includes the `file` and +`native` realms, as well as two LDAP realms and an Active Directory realm. + +[source,yaml] +---------------------------------------- +xpack.security.authc: + realms: + + file: + type: file + order: 0 + + native: + type: native + order: 1 + + ldap1: + type: ldap + order: 2 + enabled: false + url: 'url_to_ldap1' + ... + + ldap2: + type: ldap + order: 3 + url: 'url_to_ldap2' + ... + + ad1: + type: active_directory + order: 4 + url: 'url_to_ad' +---------------------------------------- + +As can be seen above, each realm has a unique name that identifies it and each +realm type dictates its own set of required and optional settings. That said, +there are +{ref}/security-settings.html#ref-realm-settings[settings that are common to all realms]. + +Realm types can roughly be classified in two categories: + +Internal:: Realms that are internal to Elasticsearch and don't require any + communication with external parties. They are fully managed by + {security}. There can only be a maximum of one configured realm + per internal realm type. {security} provides two internal realm + types: `native` and `file`. + +External:: Realms that require interaction with parties/components external to + {es}, typically, with enterprise grade identity management + systems. Unlike internal realms, there can be as many external realms + as one would like - each with its own unique name and configuration. + {security} provides the following external realm types: `ldap`, + `active_directory`, `saml`, and `pki`. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 70d8180cedb..d1f7961fecb 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -1,6 +1,6 @@ [[saml-guide]] -== Configuring SAML Single-Sign-On on the Elastic Stack +== Configuring SAML single-sign-on on the Elastic Stack The Elastic Stack supports SAML single-sign-on (SSO) into {kib}, using {es} as a backend service. In SAML terminology, the Elastic Stack is operating as a @@ -21,7 +21,7 @@ the primary (or sole) authentication method for users of that {kib} instance. Once you enable SAML authentication in {kib} it will affect all users who try to login. The <> section provides more detail about how this works. -=== The Identity Provider +=== The identity provider The Elastic Stack supports the SAML 2.0 _Web Browser SSO_ and the SAML 2.0 _Single Logout_ profiles and can integrate with any Identity Provider (IdP) @@ -69,7 +69,7 @@ For `` messages, the message itself must be signed, and the signature should be provided as a URL parameter, as required by the HTTP-Redirect binding. -=== Configure {es} for SAML Authentication +=== Configure {es} for SAML authentication There are five configuration steps to enable SAML authentication in {es}: @@ -88,7 +88,7 @@ authentication. For more information, see {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. -==== Enable the Token Service +==== Enable the token service The {es} SAML implementation makes use of the {es} Token Service. This service is automatically enabled if you configure TLS on the HTTP interface, and can be @@ -100,7 +100,7 @@ xpack.security.authc.token.enabled: true ------------------------------------------------------------ [[saml-create-realm]] -==== Create a SAML Realm +==== Create a SAML realm SAML authentication is enabled by configuring a SAML realm within the authentication chain for {es}. @@ -137,7 +137,7 @@ The configuration values used in the example above are: xpack.security.authc.realms.saml:: This defines a new authentication realm named "saml1". - See <> for more explanation of realms. + See <> for more explanation of realms. type:: The `type` must be `saml` order:: @@ -184,7 +184,7 @@ attribute.principal:: See <>. attribute.groups:: See <>. [[saml-attribute-mapping]] -==== Attribute Mapping +==== Attribute mapping When a user connects to {kib} through your Identity Provider, the Identity Provider will supply a SAML Assertion about the user. The assertion will contain @@ -237,7 +237,7 @@ The recommended steps for configuring these SAML attributes are as follows: in your IdP. In the example above, we have configured the `principal` and `groups` attributes. -===== Special Attribute Names +===== Special attribute names In general, {es} expects that the configured value for an attribute will be a URI such as `urn:oid:0.9.2342.19200300.100.1.1`, however there are some @@ -288,7 +288,7 @@ xpack.security.authc.realms.saml1: ------------------------------------------------------------ [[saml-user-properties]] -===== {es} User Properties +===== {es} user properties The {es} SAML realm can be configured to map SAML `attributes` to the following properties on the authenticated user: @@ -308,7 +308,7 @@ name:: _(Optional)_ The user's full name. mail:: _(Optional)_ The user's email address. dn:: _(Optional)_ The user's X.500 _Distinguished Name_. -===== Extracting partial values from SAML Attributes +===== Extracting partial values from SAML attributes There are some occasions where the IdP's attribute may contain more information than you wish to use within {es}. A common example of this is one where the @@ -354,7 +354,7 @@ you make sure your regular expressions are as precise as possible so that you do not inadvertently open an avenue for user impersonation attacks. [[saml-logout]] -==== SAML Logout +==== SAML logout The SAML protocol supports the concept of Single Logout (SLO). The level of support for SLO varies between Identity Providers. @@ -367,7 +367,7 @@ By default the Elastic Stack will support SAML SLO if the following are true: - You configure `sp.logout` - The setting `idp.use_single_logout` is not `false` -===== IdP SLO Service +===== IdP SLO service One of the values that {es} reads from the IdP's SAML metadata is the ``. In order for Single Logout to work with the Elastic @@ -401,7 +401,7 @@ will ignore the SLO service that your IdP provides. In this case, when a user logs out of {kib} it will invalidate their {es} session (security token), but will not perform any logout at the IdP. -===== Using {kib} without Single Logout +===== Using {kib} without single logout If your IdP does not support Single Logout, or you choose not to use it, then {kib} will perform a "local logout" only. @@ -428,7 +428,7 @@ The possible solutions to this problem are: [[saml-enc-sign]] -==== Encryption and Signing +==== Encryption and signing The Elastic Stack supports generating signed SAML messages (for authentication and/or logout), verifying signed SAML messages from the IdP (for both @@ -453,7 +453,7 @@ configured for the Elastic Stack Service Provider. Encryption certificates are rarely needed, but the Elastic Stack supports them for cases where IdPs or local policies mandate their use. -===== Generating certificates and keys. +===== Generating certificates and keys {es} supports certificates and keys in either PEM, PKCS#12 or JKS format. Some Identity Providers are more restrictive in the formats they support, and @@ -486,7 +486,7 @@ The generated zip archive will contain 3 files: Encryption certificates can be generated with the same process. -===== Configuring {es} for Signing +===== Configuring {es} for signing By default, {security} will sign _all_ outgoing SAML messages if a signing key has been configured. @@ -527,7 +527,7 @@ A list of message types to sign. A message type is identified by the _local name_ of the XML element used for the message. Supported values are: `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. -===== Configuring {es} for Encrypted Messages +===== Configuring {es} for encrypted messages {security} supports a single key for message decryption. If a key is configured, then {security} will attempt to use it to decrypt @@ -583,7 +583,7 @@ The {ref}/saml-metadata.html[documentation for the elasticsearch-saml-metadata u describes how to run it, and the available command line options. [[saml-role-mapping]] -=== Configuring Role Mappings +=== Configuring role mappings When a user authenticates using SAML, they are identified to the Elastic Stack, but this does not automatically grant them access to perform any actions or @@ -649,7 +649,7 @@ PUT /_xpack/security/role_mapping/saml-finance // TEST [[saml-user-metadata]] -=== User Metadata +=== User metadata By default users who authenticate via SAML will have some additional metadata fields. @@ -737,7 +737,7 @@ for `http` or `443` for `https`). These values must be aligned with the URLs used in the {es} configuration for `sp.acs` and `sp.logout`. -==== Supporting SAML and Basic authentication in {kib} +==== Supporting SAML and basic authentication in {kib} The SAML support in {kib} is designed on the expectation that it will be the primary (or sole) authentication method for users of that {kib} instance. diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc index bbf7d597b30..c05f82d341b 100644 --- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -1,5 +1,5 @@ [[saml-realm]] -=== SAML Authentication +=== SAML authentication {security} supports user authentication using SAML Single Sign On. {security} provides this support using the Web Browser SSO profile of the SAML 2.0 protocol. @@ -21,23 +21,20 @@ Elastic Stack, there is a step-by-step guide to The remainder of this document will describe {es} specific configuration options for SAML realms. - [[saml-settings]] -==== SAML Realm Settings +==== SAML realm settings See {ref}/security-settings.html#ref-saml-settings[SAML Realm Settings]. - -===== SAML Realm Signing Settings +==== SAML realm signing settings See {ref}/security-settings.html#ref-saml-signing-settings[SAML Realm Signing Settings]. - -===== SAML Realm Encryption Settings +==== SAML realm encryption settings See {ref}/security-settings.html#ref-saml-encryption-settings[SAML Realm Encryption Settings]. -===== SAML Realm SSL Settings +==== SAML realm SSL settings See {ref}/security-settings.html#ref-saml-ssl-settings[SAML Realm SSL Settings]. diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc index 8cd7befc642..ae402dfe05e 100644 --- a/x-pack/docs/en/security/how-security-works.asciidoc +++ b/x-pack/docs/en/security/how-security-works.asciidoc @@ -9,62 +9,11 @@ layers. {security} provides the means to secure the Elastic cluster on several levels: - * User authentication + * <> * Authorization and Role Based Access Control (a.k.a RBAC) * Node/Client Authentication and Channel Encryption * Auditing - -[float] -=== User Authentication - -User authentication is the process of identifying the users behind the requests -that hit the cluster and verifying that indeed they are who they claim to be. The -authentication process is handled by one or more authentication services called -_realms_. {security} provides the following built-in realms: - -|====== -| `native` | | | An internal realm where users are stored in a dedicated - Elasticsearch index. With this realm, users are - authenticated by usernames and passwords. The users - are managed via the - {ref}/security-api-users.html[User Management API]. - -| `ldap` | | | A realm that uses an external LDAP server to authenticate - the users. With this realm, users are authenticated by - usernames and passwords. - -| `active_directory` | | | A realm that uses an external Active Directory Server - to authenticate the users. With this realm, users - are authenticated by usernames and passwords. - -| `pki` | | | A realm that authenticates users using Public Key - Infrastructure (PKI). This realm works in conjunction - with SSL/TLS and identifies the users through the - Distinguished Name (DN) of the client's X.509 - certificates. - -| `file` | | | An internal realm where users are defined in files - stored on each node in the Elasticsearch cluster. - With this realm, users are authenticated by usernames - and passwords. The users are managed via dedicated - tools that are provided by {xpack} on installation. - -| `saml` | | | A realm that uses SAML 2.0 Web SSO. This realm is - designed to be used with {kib}. -|====== - -If none of the built-in realms meets your needs, you can also build your own -custom realm and plug it into {xpack}. - -When {security} is enabled, depending on the realms you've configured, you will -need to attach your user credentials to the requests sent to Elasticsearch. For -example, when using realms that support usernames and passwords you can simply -attach {wikipedia}/Basic_access_authentication[basic auth] header to the requests. - -For more information on user authentication see <> - - [float] === Authorization diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index 6804a5d3a98..188353d01a3 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -98,7 +98,7 @@ include::getting-started.asciidoc[] include::how-security-works.asciidoc[] -include::authentication.asciidoc[] +include::authentication/overview.asciidoc[] include::authorization.asciidoc[] From cca1a2a7cf55230a21f1b777009dfa90c428bb85 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 2 May 2018 22:19:43 +0300 Subject: [PATCH 21/30] Make licensing FIPS-140 compliant (#30251) Necessary changes so that the licensing functionality can be used in a JVM in FIPS 140 approved mode. * Uses adequate salt length in encryption * Changes key derivation to PBKDF2WithHmacSHA512 from a custom approach with SHA512 and manual key stretching * Removes redundant manual padding Other relevant changes: * Uses the SAH512 hash instead of the encrypted key bytes as the key fingerprint to be included in the license specification * Removes the explicit verification check of the encryption key as this is implicitly checked in signature verification. --- .../license/licensor/LicenseSigner.java | 28 ++- .../src/test/resources/private.key | Bin 1232 -> 1232 bytes .../src/test/resources/public.key | Bin 304 -> 294 bytes x-pack/plugin/core/snapshot.key | Bin 304 -> 294 bytes .../org/elasticsearch/license/CryptUtils.java | 181 +++++++++--------- .../org/elasticsearch/license/License.java | 3 +- .../elasticsearch/license/LicenseService.java | 6 +- .../elasticsearch/license/LicenseUtils.java | 9 + .../license/LicenseVerifier.java | 17 +- .../license/SelfGeneratedLicense.java | 25 ++- .../StartupSelfGeneratedLicenseTask.java | 28 ++- .../license/LicenseServiceClusterTests.java | 21 ++ .../license/SelfGeneratedLicenseTests.java | 4 +- .../org/elasticsearch/license/TestUtils.java | 20 +- .../core/src/test/resources/private.key | Bin 1232 -> 1232 bytes .../plugin/core/src/test/resources/public.key | Bin 304 -> 294 bytes .../test/license/20_put_license.yml | 2 +- 17 files changed, 213 insertions(+), 131 deletions(-) diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java index 1b28878e888..a2b8dc9908f 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java @@ -8,6 +8,7 @@ package org.elasticsearch.license.licensor; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -20,7 +21,10 @@ import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.Path; import java.security.InvalidKeyException; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; import java.security.SecureRandom; import java.security.Signature; import java.security.SignatureException; @@ -35,9 +39,7 @@ import java.util.Map; public class LicenseSigner { private static final int MAGIC_LENGTH = 13; - private final Path publicKeyPath; - private final Path privateKeyPath; public LicenseSigner(final Path privateKeyPath, final Path publicKeyPath) { @@ -59,9 +61,11 @@ public class LicenseSigner { Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"); licenseSpec.toXContent(contentBuilder, new ToXContent.MapParams(licenseSpecViewMode)); final byte[] signedContent; + final boolean preV4 = licenseSpec.version() < License.VERSION_CRYPTO_ALGORITHMS; try { final Signature rsa = Signature.getInstance("SHA512withRSA"); - rsa.initSign(CryptUtils.readEncryptedPrivateKey(Files.readAllBytes(privateKeyPath))); + PrivateKey decryptedPrivateKey = CryptUtils.readEncryptedPrivateKey(Files.readAllBytes(privateKeyPath)); + rsa.initSign(decryptedPrivateKey); final BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); BytesRef ref; while((ref = iterator.next()) != null) { @@ -77,15 +81,17 @@ public class LicenseSigner { final byte[] magic = new byte[MAGIC_LENGTH]; SecureRandom random = new SecureRandom(); random.nextBytes(magic); - final byte[] hash = Base64.getEncoder().encode(Files.readAllBytes(publicKeyPath)); - assert hash != null; - byte[] bytes = new byte[4 + 4 + MAGIC_LENGTH + 4 + hash.length + 4 + signedContent.length]; + final byte[] publicKeyBytes = Files.readAllBytes(publicKeyPath); + PublicKey publicKey = CryptUtils.readPublicKey(publicKeyBytes); + final byte[] pubKeyFingerprint = preV4 ? Base64.getEncoder().encode(CryptUtils.writeEncryptedPublicKey(publicKey)) : + getPublicKeyFingerprint(publicKeyBytes); + byte[] bytes = new byte[4 + 4 + MAGIC_LENGTH + 4 + pubKeyFingerprint.length + 4 + signedContent.length]; ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); byteBuffer.putInt(licenseSpec.version()) .putInt(magic.length) .put(magic) - .putInt(hash.length) - .put(hash) + .putInt(pubKeyFingerprint.length) + .put(pubKeyFingerprint) .putInt(signedContent.length) .put(signedContent); @@ -93,4 +99,10 @@ public class LicenseSigner { .fromLicenseSpec(licenseSpec, Base64.getEncoder().encodeToString(bytes)) .build(); } + + private byte[] getPublicKeyFingerprint(byte[] keyBytes) { + MessageDigest sha256 = MessageDigests.sha256(); + sha256.update(keyBytes); + return sha256.digest(); + } } diff --git a/x-pack/license-tools/src/test/resources/private.key b/x-pack/license-tools/src/test/resources/private.key index 1f545803d875598d976b206250d5e95667c794ef..d0a658c3f07bf66a440c2085264e7df1cfb24d5a 100644 GIT binary patch literal 1232 zcmV;>1TXt!CXCA;xn6gI33Sx;gr<}{3~`2Di&z>_^uze-UP~880u>SHBYftf_|OGd zKL~W)R^OoSkiUKFxd zg;AUXjQUkMIRj*|Q4Y#n$Et3#`WO1|%~boac<@X=J}R1h?D-o}O|U}uH)fWVXi4n3 zMLC0^^);i>eW~B5tyaF*}3}T?qe%fEgUVNz(*0csSvD+DWT5QnV=GP{RnmL?IQvhV^sfXx= z4ug@Ucv9Kv|J#`x$%Y-b(l}U7RYQo%mX9$M+!`fbQo=ibfwM!rX;zT@Nsvwip?*)s zeX#ngV-Y39T*we7XtjK>9K;+Z8;Vs}4BI=pYJ27IUnB83!p<)by=RCjnSZw7;QUX-^fq7 z2Y|=wC%c>W5n6v~gF*@7{XCZtP|tPD3d!LcPv>h$e>7@hat*Xs0(R#{TzA{qC(|Qf z=_!83Rt%zq)=K4}rD2%uW{e3VIf8^$X3B!;xosvD3MV(Q7Q>v!h}s%m2vR315?~ z#_r25G{7;)$V;z3iW<$Vhw{9_5%iV0hM7Mz<7DPCx8;CqotW5XVk9>Q(Vc$EQJ?d; zYHYjhm~pmFxLw`X1#0#z;F--#7IMS^EwwpT+HBf}GwRC8G@JY7bx!c-W&arLDgdaKrzD#nKkfzD%T8P>4PHt^t?n%TP4!8P6u8n2SK{uW;txI51X#~UG@vq3kfh- u=U`2%yZJhw|6{o@4>6LZmM)1TXv1%Bc_wbLO>E3}&#D<@-rDJ$(p047kR zkI3mLE>J{X5~_|GE5>Bxd81`fV@~0j8z81fwu78=uPV1?-b_fL$snD`NE}Zsv(l}S z+Dr52K=W~W#&Y(zE)IK9$=-71EtPe#X0XhttX&@1ia@?lvh&>PD=A#>rfdl2Ru)*H zKaKRTp&qI~gmO-)fq;gzrbngk1r$cUKd~25@6;rjPJf6eLtCHTaDU8Rbt#KGKlZ%g zPd`h6iZ4kWgcXo9P=@b~YoN9WQmtytp^dd%)MXVsW%n~=T9#TzSWvr*YCR25X&mo6 zJoJz>&ju@jBz@-J;se7XMiU5&x@g8>FTI*r^O6Xv2(VF_R@Vk3wQBmIE>&XqH^t=M z5_c~f_`fm)R~9ZqEv|Ma3$nQ}LvHLD`h~L2VKXBZwZnP`c6%Qwnwq8Z0!hd2+^*Gk z6R7QdYCwK?kavUU0Vd*Mq5K{M!Vk zzSLrHwA_oZsoml1+m@@Wyh8$ZA^E2_FpxTTRz7~A$x?V+XFW0!aa4^L&A*^Evlz{gN8ae8h9=J=b zC8F19FN`xNpxA!48b`dGRK3GbQMOho1>byaB*psVh{^^yqVL4J0~$j7viqxma>pYOhkH~PwpJT1j6^yHDMWGO{vw8PyI zIrYD&$F3e@0!346r3BY7JkLDD+7E5EE6oiV(gQ62K@eQRE&#pRq81Tut!V+1TCM~srn6-g`!g_`uozhhJEnN6u9$5|& zLC>$@xw*^CD*fb$ngUWU)yZ%TqX*wM!HU2(O2RRFnw&?^e*xOZWZ?n`No9lb?BLpV=@K}hkB uJ-}qCO)pTE(AWQ24*9x_mu179!iEY^J(*;5;=+=*kZC+fDbTdAaQBA0ondAf&n5h4F(A+hDe6@4FLfG1potr0S^E$f&mHwf&l>lq`lcIsk6j2LTWnY z&Vy*<@g!8mW^?2RawcJR8$@Rx7zP}_aA{rE_%h*d&Qd(+L(pjjdeVA&wh2Wj&Cd*Z zsAyWOeYm&fsNS~>RGo+$v4hB&zx<^`yrvNn4$>3*Y5|jIkB_^*jI^bNe!(9hh|dR5 zd^En1l{tj+Ib)Y`HPe`4vLOsv-FsH1v7%VOr9CWGt&)IbARFa7rR>qO^Jl6#Xhfy$ zV5yh@0~!y9>f2a$=(U>gIOVJb!)|=HX}*mq$FiCrGx9WH4_WAD0CT{Z09Qh{Yjsa= sK(Y<>67jV1{NHszF%CC1e=iYPvTcIF(gc-Bv!3;}#x#Bf0s{d60pXE@5dZ)H literal 304 zcmV-00nh&Zj>mD`1#arZ%pYe0ikx>%eT^BphgE4yij^?pV%9R)x^ebh;_QdCuh!4Q zW6Uc_((oG8AST4JhGxlr-^T)p35dE~WSJg)5Pxr{Pw2i$WUj~xH9;uswX|N?9rjnL zM2)ISwd)TEefcdmYt+G`S`=?3?m4#RvU5l z8S}dpYjk&Ql8jx?OdR#2dLn!Y6H$vDY@hN`@N?om`8<6Sk6Xbq)*aK}TGk$XHH)u} z%E~t-*cAfm8$=KaS>=&0ondAf&n5h4F(A+hDe6@4FLfG1potr0S^E$f&mHwf&l>lq`lcIsk6j2LTWnY z&Vy*<@g!8mW^?2RawcJR8$@Rx7zP}_aA{rE_%h*d&Qd(+L(pjjdeVA&wh2Wj&Cd*Z zsAyWOeYm&fsNS~>RGo+$v4hB&zx<^`yrvNn4$>3*Y5|jIkB_^*jI^bNe!(9hh|dR5 zd^En1l{tj+Ib)Y`HPe`4vLOsv-FsH1v7%VOr9CWGt&)IbARFa7rR>qO^Jl6#Xhfy$ zV5yh@0~!y9>f2a$=(U>gIOVJb!)|=HX}*mq$FiCrGx9WH4_WAD0CT{Z09Qh{Yjsa= sK(Y<>67jV1{NHszF%CC1e=iYPvTcIF(gc-Bv!3;}#x#Bf0s{d60pXE@5dZ)H literal 304 zcmV-00nh&Zj>mD`1#arZ%pYe0ikx>%eT^BphgE4yij^?pV%9R)x^ebh;_QdCuh!4Q zW6Uc_((oG8AST4JhGxlr-^T)p35dE~WSJg)5Pxr{Pw2i$WUj~xH9;uswX|N?9rjnL zM2)ISwd)TEefcdmYt+G`S`=?3?m4#RvU5l z8S}dpYjk&Ql8jx?OdR#2dLn!Y6H$vDY@hN`@N?om`8<6Sk6Xbq)*aK}TGk$XHH)u} z%E~t-*cAfm8$=KaS>=&passPhrase */ - public static PrivateKey readEncryptedPrivateKey(byte[] fileContents, char[] passPhrase) { - PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec(decrypt(fileContents, passPhrase)); + public static PrivateKey readEncryptedPrivateKey(byte[] fileContents, char[] passPhrase, boolean preV4) { + byte[] keyBytes = preV4 ? decryptV3Format(fileContents) : decrypt(fileContents, passPhrase); + PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec(keyBytes); try { - return KeyFactory.getInstance(keyAlgorithm).generatePrivate(privateKeySpec); + return KeyFactory.getInstance(KEY_ALGORITHM).generatePrivate(privateKeySpec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new IllegalStateException(e); } } /** - * Read encrypted public key file content with provided passPhrase + * Read public key file content */ - public static PublicKey readEncryptedPublicKey(byte[] fileContents, char[] passPhrase) { - X509EncodedKeySpec publicKeySpec = new X509EncodedKeySpec(decrypt(fileContents, passPhrase)); + public static PublicKey readPublicKey(byte[] fileContents) { + X509EncodedKeySpec publicKeySpec = new X509EncodedKeySpec(fileContents); try { - return KeyFactory.getInstance(CryptUtils.keyAlgorithm).generatePublic(publicKeySpec); + return KeyFactory.getInstance(CryptUtils.KEY_ALGORITHM).generatePublic(publicKeySpec); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new IllegalStateException(e); } @@ -112,9 +88,9 @@ public class CryptUtils { /** * Returns encrypted public key file content with provided passPhrase */ - public static byte[] writeEncryptedPublicKey(PublicKey publicKey, char[] passPhrase) { + public static byte[] writeEncryptedPublicKey(PublicKey publicKey) { X509EncodedKeySpec encodedKeySpec = new X509EncodedKeySpec(publicKey.getEncoded()); - return encrypt(encodedKeySpec.getEncoded(), passPhrase); + return encrypt(encodedKeySpec.getEncoded(), DEFAULT_PASS_PHRASE); } /** @@ -128,33 +104,25 @@ public class CryptUtils { /** * Encrypts provided data with DEFAULT_PASS_PHRASE */ - public static byte[] encrypt(byte[] data) { - try { - return encrypt(data, hashPassPhrase(DEFAULT_PASS_PHRASE)); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException(e); - } + static byte[] encrypt(byte[] data) { + return encrypt(data, DEFAULT_PASS_PHRASE); } /** * Decrypts provided encryptedData with DEFAULT_PASS_PHRASE */ - public static byte[] decrypt(byte[] encryptedData) { - try { - return decrypt(encryptedData, hashPassPhrase(DEFAULT_PASS_PHRASE)); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException(e); - } + static byte[] decrypt(byte[] encryptedData) { + return decrypt(encryptedData, DEFAULT_PASS_PHRASE); } /** * Encrypts provided data with passPhrase */ - public static byte[] encrypt(byte[] data, char[] passPhrase) { + private static byte[] encrypt(byte[] data, char[] passPhrase) { try { - final Cipher encryptionCipher = getEncryptionCipher(getSecretKey(passPhrase)); - return encryptionCipher.doFinal(pad(data, minimumPadding)); - } catch (InvalidKeySpecException | IllegalBlockSizeException | BadPaddingException e) { + final Cipher encryptionCipher = getEncryptionCipher(deriveSecretKey(passPhrase)); + return encryptionCipher.doFinal(data); + } catch (IllegalBlockSizeException | BadPaddingException e) { throw new IllegalStateException(e); } } @@ -164,29 +132,60 @@ public class CryptUtils { */ private static byte[] decrypt(byte[] encryptedData, char[] passPhrase) { try { - final Cipher cipher = getDecryptionCipher(getSecretKey(passPhrase)); - return unPad(cipher.doFinal(encryptedData)); - } catch (IllegalBlockSizeException | BadPaddingException | InvalidKeySpecException e) { + final Cipher cipher = getDecryptionCipher(deriveSecretKey(passPhrase)); + return cipher.doFinal(encryptedData); + } catch (IllegalBlockSizeException | BadPaddingException e) { throw new IllegalStateException(e); } - } - private static SecretKey getSecretKey(char[] passPhrase) throws InvalidKeySpecException { + static byte[] encryptV3Format(byte[] data) { try { - PBEKeySpec keySpec = new PBEKeySpec(passPhrase, salt, iterationCount, aesKeyLength); + SecretKey encryptionKey = getV3Key(); + final Cipher encryptionCipher = getEncryptionCipher(encryptionKey); + return encryptionCipher.doFinal(pad(data, 20)); + } catch (GeneralSecurityException e) { + throw new IllegalStateException(e); + } + } - byte[] shortKey = SecretKeyFactory.getInstance("PBEWithSHA1AndDESede"). - generateSecret(keySpec).getEncoded(); + static byte[] decryptV3Format(byte[] data) { + try { + SecretKey decryptionKey = getV3Key(); + final Cipher decryptionCipher = getDecryptionCipher(decryptionKey); + return unPad(decryptionCipher.doFinal(data)); + } catch (GeneralSecurityException e) { + throw new IllegalStateException(e); + } + } - byte[] intermediaryKey = new byte[aesKeyLength / 8]; - for (int i = 0, j = 0; i < aesKeyLength / 8; i++) { - intermediaryKey[i] = shortKey[j]; - if (++j == shortKey.length) - j = 0; - } + private static SecretKey getV3Key() throws NoSuchAlgorithmException, InvalidKeySpecException { + final byte[] salt = { + (byte) 0xA9, (byte) 0xA2, (byte) 0xB5, (byte) 0xDE, + (byte) 0x2A, (byte) 0x8A, (byte) 0x9A, (byte) 0xE6 + }; + final byte[] passBytes = "elasticsearch-license".getBytes(StandardCharsets.UTF_8); + final byte[] digest = MessageDigest.getInstance("SHA-512").digest(passBytes); + final char[] hashedPassphrase = Base64.getEncoder().encodeToString(digest).toCharArray(); + PBEKeySpec keySpec = new PBEKeySpec(hashedPassphrase, salt, 1024, 128); + byte[] shortKey = SecretKeyFactory.getInstance("PBEWithSHA1AndDESede"). + generateSecret(keySpec).getEncoded(); + byte[] intermediaryKey = new byte[16]; + for (int i = 0, j = 0; i < 16; i++) { + intermediaryKey[i] = shortKey[j]; + if (++j == shortKey.length) + j = 0; + } + return new SecretKeySpec(intermediaryKey, "AES"); + } - return new SecretKeySpec(intermediaryKey, "AES"); + private static SecretKey deriveSecretKey(char[] passPhrase) { + try { + PBEKeySpec keySpec = new PBEKeySpec(passPhrase, SALT, KDF_ITERATION_COUNT, ENCRYPTION_KEY_LENGTH); + + SecretKey secretKey = SecretKeyFactory.getInstance(KDF_ALGORITHM). + generateSecret(keySpec); + return new SecretKeySpec(secretKey.getEncoded(), CIPHER_ALGORITHM); } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { throw new IllegalStateException(e); } @@ -202,8 +201,8 @@ public class CryptUtils { private static Cipher getCipher(int mode, SecretKey secretKey) { try { - Cipher cipher = Cipher.getInstance(secretKey.getAlgorithm()); - cipher.init(mode, secretKey, random); + Cipher cipher = Cipher.getInstance(CIPHER_ALGORITHM); + cipher.init(mode, secretKey, RANDOM); return cipher; } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException e) { throw new IllegalStateException(e); @@ -228,7 +227,7 @@ public class CryptUtils { // fill the rest with random bytes byte[] fill = new byte[padded - 1]; - random.nextBytes(fill); + RANDOM.nextBytes(fill); System.arraycopy(fill, 0, out, i, padded - 1); out[length] = (byte) (padded + 1); @@ -246,10 +245,4 @@ public class CryptUtils { return out; } - - private static char[] hashPassPhrase(String passPhrase) throws NoSuchAlgorithmException { - final byte[] passBytes = passPhrase.getBytes(StandardCharsets.UTF_8); - final byte[] digest = MessageDigest.getInstance(passHashAlgorithm).digest(passBytes); - return Base64.getEncoder().encodeToString(digest).toCharArray(); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index df94a9132a0..144eec96858 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -38,7 +38,8 @@ public class License implements ToXContentObject { public static final int VERSION_START = 1; public static final int VERSION_NO_FEATURE_TYPE = 2; public static final int VERSION_START_DATE = 3; - public static final int VERSION_CURRENT = VERSION_START_DATE; + public static final int VERSION_CRYPTO_ALGORITHMS = 4; + public static final int VERSION_CURRENT = VERSION_CRYPTO_ALGORITHMS; /** * XContent param name to deserialize license(s) with diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 14e72142715..fa0c239aab1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -402,9 +402,9 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste boolean noLicense = noLicenseInPrevMetadata && noLicenseInCurrentMetadata; // auto-generate license if no licenses ever existed or if the current license is basic and - // needs extended. this will trigger a subsequent cluster changed event - if (currentClusterState.getNodes().isLocalNodeElectedMaster() - && (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense))) { + // needs extended or if the license signature needs to be updated. this will trigger a subsequent cluster changed event + if (currentClusterState.getNodes().isLocalNodeElectedMaster() && + (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || LicenseUtils.signatureNeedsUpdate(currentLicense))) { registerOrUpdateSelfGeneratedLicense(); } } else if (logger.isDebugEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index c5ab35f862c..8fcdc05bcf9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -37,4 +37,13 @@ public class LicenseUtils { public static boolean licenseNeedsExtended(License license) { return "basic".equals(license.type()) && license.expiryDate() != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; } + + /** + * Checks if the signature of a self generated license with older version needs to be + * recreated with the new key + */ + public static boolean signatureNeedsUpdate(License license) { + return ("basic".equals(license.type()) || "trial".equals(license.type())) && + (license.version() < License.VERSION_CRYPTO_ALGORITHMS); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java index c670f070ad7..a879dc9ed18 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java @@ -37,9 +37,9 @@ public class LicenseVerifier { * @param license to verify * @return true if valid, false otherwise */ - public static boolean verifyLicense(final License license, byte[] encryptedPublicKeyData) { + public static boolean verifyLicense(final License license, byte[] publicKeyData) { byte[] signedContent = null; - byte[] signatureHash = null; + byte[] publicKeyFingerprint = null; try { byte[] signatureBytes = Base64.getDecoder().decode(license.signature()); ByteBuffer byteBuffer = ByteBuffer.wrap(signatureBytes); @@ -48,32 +48,27 @@ public class LicenseVerifier { byte[] magic = new byte[magicLen]; byteBuffer.get(magic); int hashLen = byteBuffer.getInt(); - signatureHash = new byte[hashLen]; - byteBuffer.get(signatureHash); + publicKeyFingerprint = new byte[hashLen]; + byteBuffer.get(publicKeyFingerprint); int signedContentLen = byteBuffer.getInt(); signedContent = new byte[signedContentLen]; byteBuffer.get(signedContent); XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); license.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); Signature rsa = Signature.getInstance("SHA512withRSA"); - rsa.initVerify(CryptUtils.readEncryptedPublicKey(encryptedPublicKeyData)); + rsa.initVerify(CryptUtils.readPublicKey(publicKeyData)); BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); BytesRef ref; while((ref = iterator.next()) != null) { rsa.update(ref.bytes, ref.offset, ref.length); } - return rsa.verify(signedContent) - && Arrays.equals(Base64.getEncoder().encode(encryptedPublicKeyData), signatureHash); + return rsa.verify(signedContent); } catch (IOException | NoSuchAlgorithmException | SignatureException | InvalidKeyException e) { throw new IllegalStateException(e); } finally { - Arrays.fill(encryptedPublicKeyData, (byte) 0); if (signedContent != null) { Arrays.fill(signedContent, (byte) 0); } - if (signatureHash != null) { - Arrays.fill(signatureHash, (byte) 0); - } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java index 7ec6b0b95eb..0bc49d517cd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java @@ -19,25 +19,36 @@ import java.nio.ByteBuffer; import java.util.Base64; import java.util.Collections; -import static org.elasticsearch.license.CryptUtils.decrypt; +import static org.elasticsearch.license.CryptUtils.encryptV3Format; import static org.elasticsearch.license.CryptUtils.encrypt; +import static org.elasticsearch.license.CryptUtils.decryptV3Format; +import static org.elasticsearch.license.CryptUtils.decrypt; class SelfGeneratedLicense { public static License create(License.Builder specBuilder) { + return create(specBuilder, License.VERSION_CURRENT); + } + + public static License create(License.Builder specBuilder, int version) { License spec = specBuilder .issuer("elasticsearch") - .version(License.VERSION_CURRENT) + .version(version) .build(); final String signature; try { XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); spec.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); - byte[] encrypt = encrypt(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + byte[] encrypt; + if (version < License.VERSION_CRYPTO_ALGORITHMS) { + encrypt = encryptV3Format(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + } else { + encrypt = encrypt(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + } byte[] bytes = new byte[4 + 4 + encrypt.length]; ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); - // always generate license version -VERSION_CURRENT - byteBuffer.putInt(-License.VERSION_CURRENT) + // Set -version in signature + byteBuffer.putInt(-version) .putInt(encrypt.length) .put(encrypt); signature = Base64.getEncoder().encodeToString(bytes); @@ -56,9 +67,11 @@ class SelfGeneratedLicense { byte[] content = new byte[contentLen]; byteBuffer.get(content); final License expectedLicense; + // Version in signature is -version, so check for -(-version) < 4 + byte[] decryptedContent = (-version < License.VERSION_CRYPTO_ALGORITHMS) ? decryptV3Format(content) : decrypt(content); // EMPTY is safe here because we don't call namedObject try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, decrypt(content))) { + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, decryptedContent)) { parser.nextToken(); expectedLicense = License.builder().fromLicenseSpec(License.fromXContent(parser), license.signature()).version(-version).build(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index ef654513c80..77695f64538 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -58,15 +58,41 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { throw new IllegalArgumentException("Illegal self generated license type [" + type + "]. Must be trial or basic."); } - return updateWithLicense(currentState, type); } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) { return extendBasic(currentState, currentLicensesMetaData); + } else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense())) { + return updateLicenseSignature(currentState, currentLicensesMetaData); } else { return currentState; } } + private ClusterState updateLicenseSignature(ClusterState currentState, LicensesMetaData currentLicenseMetaData) { + License license = currentLicenseMetaData.getLicense(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + String type = license.type(); + long issueDate = license.issueDate(); + long expiryDate; + if ("basic".equals(type)) { + expiryDate = LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + } else { + expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis(); + } + License.Builder specBuilder = License.builder() + .uid(license.uid()) + .issuedTo(license.issuedTo()) + .maxNodes(selfGeneratedLicenseMaxNodes) + .issueDate(issueDate) + .type(type) + .expiryDate(expiryDate); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion(); + LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion); + mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + @Override public void onFailure(String source, @Nullable Exception e) { logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java index 6bf2befbddd..a1a709a9d8f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java @@ -153,6 +153,27 @@ public class LicenseServiceClusterTests extends AbstractLicensesIntegrationTestC assertLicenseActive(false); } + public void testClusterRestartWithOldSignature() throws Exception { + wipeAllLicenses(); + internalCluster().startNode(); + ensureGreen(); + assertLicenseActive(true); + putLicense(TestUtils.generateSignedLicenseOldSignature()); + LicensingClient licensingClient = new LicensingClient(client()); + assertThat(licensingClient.prepareGetLicense().get().license().version(), equalTo(License.VERSION_START_DATE)); + logger.info("--> restart node"); + internalCluster().fullRestart(); // restart so that license is updated + ensureYellow(); + logger.info("--> await node for enabled"); + assertLicenseActive(true); + licensingClient = new LicensingClient(client()); + assertThat(licensingClient.prepareGetLicense().get().license().version(), equalTo(License.VERSION_CURRENT)); //license updated + internalCluster().fullRestart(); // restart once more and verify updated license is active + ensureYellow(); + logger.info("--> await node for enabled"); + assertLicenseActive(true); + } + private void assertOperationMode(License.OperationMode operationMode) throws InterruptedException { boolean success = awaitBusy(() -> { for (XPackLicenseState licenseState : internalCluster().getDataNodeInstances(XPackLicenseState.class)) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java index bf480797814..aa27dbdcb49 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java @@ -19,7 +19,7 @@ import java.util.Base64; import java.util.Collections; import java.util.UUID; -import static org.elasticsearch.license.CryptUtils.encrypt; +import static org.elasticsearch.license.CryptUtils.encryptV3Format; import static org.hamcrest.Matchers.equalTo; @@ -98,7 +98,7 @@ public class SelfGeneratedLicenseTests extends ESTestCase { try { XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); spec.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); - byte[] encrypt = encrypt(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + byte[] encrypt = encryptV3Format(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); byte[] bytes = new byte[4 + 4 + encrypt.length]; ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); byteBuffer.putInt(-spec.version()) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java index e8fa6f32a9b..d236dacaa4d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java @@ -209,12 +209,11 @@ public class TestUtils { this.maxNodes = maxNodes; } } - - public static Path getTestPriKeyPath() throws Exception { + private static Path getTestPriKeyPath() throws Exception { return getResourcePath("/private.key"); } - public static Path getTestPubKeyPath() throws Exception { + private static Path getTestPubKeyPath() throws Exception { return getResourcePath("/public.key"); } @@ -244,6 +243,19 @@ public class TestUtils { return generateSignedLicense(type, randomIntBetween(License.VERSION_START, License.VERSION_CURRENT), issueDate, expiryDuration); } + public static License generateSignedLicenseOldSignature() { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .version(License.VERSION_START_DATE) + .issuedTo("customer") + .maxNodes(5) + .type("trial") + .issueDate(issueDate) + .expiryDate(issueDate + TimeValue.timeValueHours(24).getMillis()); + return SelfGeneratedLicense.create(specBuilder, License.VERSION_START_DATE); + } + /** * This method which chooses the license type randomly if the type is null. However, it will not randomly * choose trial or basic types as those types can only be self-generated. @@ -269,7 +281,7 @@ public class TestUtils { builder.subscriptionType((type != null) ? type : randomFrom("dev", "gold", "platinum", "silver")); builder.feature(randomAlphaOfLength(10)); } - LicenseSigner signer = new LicenseSigner(getTestPriKeyPath(), getTestPubKeyPath()); + final LicenseSigner signer = new LicenseSigner(getTestPriKeyPath(), getTestPubKeyPath()); return signer.sign(builder.build()); } diff --git a/x-pack/plugin/core/src/test/resources/private.key b/x-pack/plugin/core/src/test/resources/private.key index 1f545803d875598d976b206250d5e95667c794ef..d0a658c3f07bf66a440c2085264e7df1cfb24d5a 100644 GIT binary patch literal 1232 zcmV;>1TXt!CXCA;xn6gI33Sx;gr<}{3~`2Di&z>_^uze-UP~880u>SHBYftf_|OGd zKL~W)R^OoSkiUKFxd zg;AUXjQUkMIRj*|Q4Y#n$Et3#`WO1|%~boac<@X=J}R1h?D-o}O|U}uH)fWVXi4n3 zMLC0^^);i>eW~B5tyaF*}3}T?qe%fEgUVNz(*0csSvD+DWT5QnV=GP{RnmL?IQvhV^sfXx= z4ug@Ucv9Kv|J#`x$%Y-b(l}U7RYQo%mX9$M+!`fbQo=ibfwM!rX;zT@Nsvwip?*)s zeX#ngV-Y39T*we7XtjK>9K;+Z8;Vs}4BI=pYJ27IUnB83!p<)by=RCjnSZw7;QUX-^fq7 z2Y|=wC%c>W5n6v~gF*@7{XCZtP|tPD3d!LcPv>h$e>7@hat*Xs0(R#{TzA{qC(|Qf z=_!83Rt%zq)=K4}rD2%uW{e3VIf8^$X3B!;xosvD3MV(Q7Q>v!h}s%m2vR315?~ z#_r25G{7;)$V;z3iW<$Vhw{9_5%iV0hM7Mz<7DPCx8;CqotW5XVk9>Q(Vc$EQJ?d; zYHYjhm~pmFxLw`X1#0#z;F--#7IMS^EwwpT+HBf}GwRC8G@JY7bx!c-W&arLDgdaKrzD#nKkfzD%T8P>4PHt^t?n%TP4!8P6u8n2SK{uW;txI51X#~UG@vq3kfh- u=U`2%yZJhw|6{o@4>6LZmM)1TXv1%Bc_wbLO>E3}&#D<@-rDJ$(p047kR zkI3mLE>J{X5~_|GE5>Bxd81`fV@~0j8z81fwu78=uPV1?-b_fL$snD`NE}Zsv(l}S z+Dr52K=W~W#&Y(zE)IK9$=-71EtPe#X0XhttX&@1ia@?lvh&>PD=A#>rfdl2Ru)*H zKaKRTp&qI~gmO-)fq;gzrbngk1r$cUKd~25@6;rjPJf6eLtCHTaDU8Rbt#KGKlZ%g zPd`h6iZ4kWgcXo9P=@b~YoN9WQmtytp^dd%)MXVsW%n~=T9#TzSWvr*YCR25X&mo6 zJoJz>&ju@jBz@-J;se7XMiU5&x@g8>FTI*r^O6Xv2(VF_R@Vk3wQBmIE>&XqH^t=M z5_c~f_`fm)R~9ZqEv|Ma3$nQ}LvHLD`h~L2VKXBZwZnP`c6%Qwnwq8Z0!hd2+^*Gk z6R7QdYCwK?kavUU0Vd*Mq5K{M!Vk zzSLrHwA_oZsoml1+m@@Wyh8$ZA^E2_FpxTTRz7~A$x?V+XFW0!aa4^L&A*^Evlz{gN8ae8h9=J=b zC8F19FN`xNpxA!48b`dGRK3GbQMOho1>byaB*psVh{^^yqVL4J0~$j7viqxma>pYOhkH~PwpJT1j6^yHDMWGO{vw8PyI zIrYD&$F3e@0!346r3BY7JkLDD+7E5EE6oiV(gQ62K@eQRE&#pRq81Tut!V+1TCM~srn6-g`!g_`uozhhJEnN6u9$5|& zLC>$@xw*^CD*fb$ngUWU)yZ%TqX*wM!HU2(O2RRFnw&?^e*xOZWZ?n`No9lb?BLpV=@K}hkB uJ-}qCO)pTE(AWQ24*9x_mu179!iEY^J(*;5;=+=*kZC+fDbTdAaQBA0ondAf&n5h4F(A+hDe6@4FLfG1potr0S^E$f&mHwf&l>lq`lcIsk6j2LTWnY z&Vy*<@g!8mW^?2RawcJR8$@Rx7zP}_aA{rE_%h*d&Qd(+L(pjjdeVA&wh2Wj&Cd*Z zsAyWOeYm&fsNS~>RGo+$v4hB&zx<^`yrvNn4$>3*Y5|jIkB_^*jI^bNe!(9hh|dR5 zd^En1l{tj+Ib)Y`HPe`4vLOsv-FsH1v7%VOr9CWGt&)IbARFa7rR>qO^Jl6#Xhfy$ zV5yh@0~!y9>f2a$=(U>gIOVJb!)|=HX}*mq$FiCrGx9WH4_WAD0CT{Z09Qh{Yjsa= sK(Y<>67jV1{NHszF%CC1e=iYPvTcIF(gc-Bv!3;}#x#Bf0s{d60pXE@5dZ)H literal 304 zcmV-00nh&Zj>mD`1#arZ%pYe0ikx>%eT^BphgE4yij^?pV%9R)x^ebh;_QdCuh!4Q zW6Uc_((oG8AST4JhGxlr-^T)p35dE~WSJg)5Pxr{Pw2i$WUj~xH9;uswX|N?9rjnL zM2)ISwd)TEefcdmYt+G`S`=?3?m4#RvU5l z8S}dpYjk&Ql8jx?OdR#2dLn!Y6H$vDY@hN`@N?om`8<6Sk6Xbq)*aK}TGk$XHH)u} z%E~t-*cAfm8$=KaS>=& Date: Wed, 2 May 2018 12:22:55 -0700 Subject: [PATCH 22/30] Fix NPE when CumulativeSum agg encounters null/empty bucket (#29641) Fix NPE when CumulativeSum agg encounters null/empty bucket If the cusum agg encounters a null value, it's because the value is missing (like the first value from a derivative agg), the path is not valid, or the bucket in the path was empty. Previously cusum would just explode on the null, but this changes it so we only increment the sum if the value is non-null and finite. This is safe because even if the cusum encounters all null or empty buckets, the cumulative sum is still zero (like how the sum agg returns zero even if all the docs were missing values) I went ahead and tweaked AggregatorTestCase to allow testing pipelines, so that I could delete the IT test and reimplement it as AggTests. Closes #27544 --- docs/CHANGELOG.asciidoc | 3 + .../CumulativeSumPipelineAggregator.java | 16 +- .../CumulativeSumAggregatorTests.java | 316 ++++++++++++++++++ .../pipeline/CumulativeSumIT.java | 167 --------- .../aggregations/AggregatorTestCase.java | 8 + 5 files changed, 338 insertions(+), 172 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java delete mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumIT.java diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index f80c135f882..0a04cc950e3 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -70,6 +70,8 @@ Fixed prerelease version of elasticsearch in the `deb` package to sort before GA Fail snapshot operations early when creating or deleting a snapshot on a repository that has been written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) + //[float] //=== Regressions @@ -100,6 +102,7 @@ multi-argument versions. ({pull}29623[#29623]) Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) //[float] //=== Regressions diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java index 8a1b70fdd14..e1441132452 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/cumulativesum/CumulativeSumPipelineAggregator.java @@ -79,11 +79,17 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator { double sum = 0; for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) { Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS); - sum += thisBucketValue; - List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList(), metaData())); + + // Only increment the sum if it's a finite value, otherwise "increment by zero" is correct + if (thisBucketValue != null && thisBucketValue.isInfinite() == false && thisBucketValue.isNaN() == false) { + sum += thisBucketValue; + } + + List aggs = StreamSupport + .stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); + aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<>(), metaData())); Bucket newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs)); newBuckets.add(newBucket); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java new file mode 100644 index 00000000000..fa46921a941 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -0,0 +1,316 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; + +public class CumulativeSumAggregatorTests extends AggregatorTestCase { + + private static final String HISTO_FIELD = "histo"; + private static final String VALUE_FIELD = "value_field"; + + private static final List datasetTimes = Arrays.asList( + "2017-01-01T01:07:45", + "2017-01-02T03:43:34", + "2017-01-03T04:11:00", + "2017-01-04T05:11:31", + "2017-01-05T08:24:05", + "2017-01-06T13:09:32", + "2017-01-07T13:47:43", + "2017-01-08T16:14:34", + "2017-01-09T17:09:50", + "2017-01-10T22:55:46"); + + private static final List datasetValues = Arrays.asList(1,2,3,4,5,6,7,8,9,10); + + public void testSimple() throws IOException { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new AvgAggregationBuilder("the_avg").field(VALUE_FIELD)); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "the_avg")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(10, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + double sum = 0.0; + for (Histogram.Bucket bucket : buckets) { + sum += ((InternalAvg) (bucket.getAggregations().get("the_avg"))).value(); + assertThat(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))).value(), equalTo(sum)); + } + }); + } + + /** + * First value from a derivative is null, so this makes sure the cusum can handle that + */ + public void testDerivative() throws IOException { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new AvgAggregationBuilder("the_avg").field(VALUE_FIELD)); + aggBuilder.subAggregation(new DerivativePipelineAggregationBuilder("the_deriv", "the_avg")); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "the_deriv")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(10, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + double sum = 0.0; + for (int i = 0; i < buckets.size(); i++) { + if (i == 0) { + assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(0.0)); + } else { + sum += 1.0; + assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(sum)); + } + } + }); + } + + public void testDocCount() throws IOException { + Query query = new MatchAllDocsQuery(); + + int numDocs = randomIntBetween(6, 20); + int interval = randomIntBetween(2, 5); + + int minRandomValue = 0; + int maxRandomValue = 20; + + int numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + long[] valueCounts = new long[numValueBuckets]; + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("histo") + .field(VALUE_FIELD) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "_count")); + + executeTestCase(query, aggBuilder, histogram -> { + List buckets = ((Histogram)histogram).getBuckets(); + + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double sum = 0; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + sum += bucket.getDocCount(); + InternalSimpleValue cumulativeSumValue = bucket.getAggregations().get("cusum"); + assertThat(cumulativeSumValue, notNullValue()); + assertThat(cumulativeSumValue.getName(), equalTo("cusum")); + assertThat(cumulativeSumValue.value(), equalTo(sum)); + } + }, indexWriter -> { + Document document = new Document(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + document.add(new NumericDocValuesField(VALUE_FIELD, fieldValue)); + final int bucket = (fieldValue / interval); + valueCounts[bucket]++; + + indexWriter.addDocument(document); + document.clear(); + } + }); + } + + public void testMetric() throws IOException { + Query query = new MatchAllDocsQuery(); + + int numDocs = randomIntBetween(6, 20); + int interval = randomIntBetween(2, 5); + + int minRandomValue = 0; + int maxRandomValue = 20; + + int numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + long[] valueCounts = new long[numValueBuckets]; + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("histo") + .field(VALUE_FIELD) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue); + aggBuilder.subAggregation(new SumAggregationBuilder("sum").field(VALUE_FIELD)); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "sum")); + + executeTestCase(query, aggBuilder, histogram -> { + List buckets = ((Histogram)histogram).getBuckets(); + + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double bucketSum = 0; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + bucketSum += sum.value(); + + InternalSimpleValue sumBucketValue = bucket.getAggregations().get("cusum"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("cusum")); + assertThat(sumBucketValue.value(), equalTo(bucketSum)); + } + }, indexWriter -> { + Document document = new Document(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + document.add(new NumericDocValuesField(VALUE_FIELD, fieldValue)); + final int bucket = (fieldValue / interval); + valueCounts[bucket]++; + + indexWriter.addDocument(document); + document.clear(); + } + }); + } + + public void testNoBuckets() throws IOException { + int numDocs = randomIntBetween(6, 20); + int interval = randomIntBetween(2, 5); + + int minRandomValue = 0; + int maxRandomValue = 20; + + int numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + long[] valueCounts = new long[numValueBuckets]; + + Query query = new MatchNoDocsQuery(); + + HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("histo") + .field(VALUE_FIELD) + .interval(interval); + aggBuilder.subAggregation(new SumAggregationBuilder("sum").field(VALUE_FIELD)); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "sum")); + + executeTestCase(query, aggBuilder, histogram -> { + List buckets = ((Histogram)histogram).getBuckets(); + + assertThat(buckets.size(), equalTo(0)); + + }, indexWriter -> { + Document document = new Document(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + document.add(new NumericDocValuesField(VALUE_FIELD, fieldValue)); + final int bucket = (fieldValue / interval); + valueCounts[bucket]++; + + indexWriter.addDocument(document); + document.clear(); + } + }); + } + + @SuppressWarnings("unchecked") + private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consumer verify) throws IOException { + executeTestCase(query, aggBuilder, verify, indexWriter -> { + Document document = new Document(); + int counter = 0; + for (String date : datasetTimes) { + if (frequently()) { + indexWriter.commit(); + } + + long instant = asLong(date); + document.add(new SortedNumericDocValuesField(HISTO_FIELD, instant)); + document.add(new NumericDocValuesField(VALUE_FIELD, datasetValues.get(counter))); + indexWriter.addDocument(document); + document.clear(); + counter += 1; + } + }); + } + + @SuppressWarnings("unchecked") + private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consumer verify, + CheckedConsumer setup) throws IOException { + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + setup.accept(indexWriter); + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); + DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(HISTO_FIELD); + + MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + valueFieldType.setHasDocValues(true); + valueFieldType.setName("value_field"); + + InternalAggregation histogram; + histogram = searchAndReduce(indexSearcher, query, aggBuilder, new MappedFieldType[]{fieldType, valueFieldType}); + verify.accept(histogram); + } + } + } + + private static long asLong(String dateTime) { + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumIT.java deleted file mode 100644 index 6a748bd3c84..00000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumIT.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.pipeline; - -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.cumulativeSum; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class CumulativeSumIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; - - @Override - public void setupSuiteScopeCluster() throws Exception { - createIndex("idx"); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); - } - indexRandom(true, builders); - ensureSearchable(); - } - - public void testDocCount() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(cumulativeSum("cumulative_sum", "_count"))).execute().actionGet(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - double sum = 0; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - sum += bucket.getDocCount(); - InternalSimpleValue cumulativeSumValue = bucket.getAggregations().get("cumulative_sum"); - assertThat(cumulativeSumValue, notNullValue()); - assertThat(cumulativeSumValue.getName(), equalTo("cumulative_sum")); - assertThat(cumulativeSumValue.value(), equalTo(sum)); - } - - } - - public void testMetric() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(cumulativeSum("cumulative_sum", "sum"))).execute().actionGet(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - for (int i = 0; i < buckets.size(); ++i) { - Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - bucketSum += sum.value(); - - InternalSimpleValue sumBucketValue = bucket.getAggregations().get("cumulative_sum"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("cumulative_sum")); - assertThat(sumBucketValue.value(), equalTo(bucketSum)); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(rangeQuery(SINGLE_VALUED_FIELD_NAME).lt(minRandomValue)) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(cumulativeSum("cumulative_sum", "sum"))).execute().actionGet(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 010eb1d7cdc..73ac501ec1d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -61,6 +61,8 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase; @@ -79,6 +81,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; @@ -369,6 +372,11 @@ public abstract class AggregatorTestCase extends ESTestCase { @SuppressWarnings("unchecked") A internalAgg = (A) aggs.get(0).doReduce(aggs, context); + if (internalAgg.pipelineAggregators().size() > 0) { + for (PipelineAggregator pipelineAggregator : internalAgg.pipelineAggregators()) { + internalAgg = (A) pipelineAggregator.reduce(internalAgg, context); + } + } InternalAggregationTestCase.assertMultiBucketConsumer(internalAgg, reduceBucketConsumer); return internalAgg; } From e697299103d3818ddcdff47779c9fdc9cef204c9 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 2 May 2018 13:09:25 -0700 Subject: [PATCH 23/30] [DOCS] Fixes broken links to bootstrap user (#30349) --- x-pack/docs/en/commands/setup-passwords.asciidoc | 2 +- x-pack/docs/en/security/configuring-es.asciidoc | 2 +- .../securing-communications/configuring-tls-docker.asciidoc | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/docs/en/commands/setup-passwords.asciidoc b/x-pack/docs/en/commands/setup-passwords.asciidoc index 42f3c824496..b323dc8e5c1 100644 --- a/x-pack/docs/en/commands/setup-passwords.asciidoc +++ b/x-pack/docs/en/commands/setup-passwords.asciidoc @@ -20,7 +20,7 @@ bin/elasticsearch-setup-passwords auto|interactive This command is intended for use only during the initial configuration of {xpack}. It uses the -{xpack-ref}/setting-up-authentication.html#bootstrap-elastic-passwords[`elastic` bootstrap password] +{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[`elastic` bootstrap password] to run user management API requests. After you set a password for the `elastic` user, the bootstrap password is no longer active and you cannot use this command. Instead, you can change passwords by using the *Management > Users* UI in {kib} diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index fa3a6da801f..de3895d34b0 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -47,7 +47,7 @@ information, see + -- {security} provides -{xpack-ref}/setting-up-authentication.html#built-in-users[built-in users] to +{stack-ov}/built-in-users.html[built-in users] to help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. diff --git a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc index 05d6574b56f..affac534b6f 100644 --- a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -142,7 +142,8 @@ services: volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}} ---- -<1> Bootstrap `elastic` with the password defined in `.env`. See {xpack-ref}/setting-up-authentication.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password]. +<1> Bootstrap `elastic` with the password defined in `.env`. See +{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password]. <2> Disable verification of authenticity for inter-node communication. Allows creating self-signed certificates without having to pin specific internal IP addresses. endif::[] From 3aec8c772585254695e7caf4f0f2fff083ad7f0e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 2 May 2018 14:33:34 -0700 Subject: [PATCH 24/30] Packaging: Set elasticsearch user to have non-existent homedir (#29007) This commit adds setting the homedir for the elasticsearch user to the adduser command in the packaging preinstall script. While the elasticsearch user is a system user, it is sometimes conventient to have an existing homedir (even if it is not writeable). For example, running cron as the elasticsearch user will try to change dir to the homedir. closes #14453 --- distribution/packages/src/common/scripts/preinst | 6 ++++-- qa/vagrant/src/test/resources/packaging/utils/packages.bash | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/distribution/packages/src/common/scripts/preinst b/distribution/packages/src/common/scripts/preinst index a9e5295cbc5..2aec2172ad8 100644 --- a/distribution/packages/src/common/scripts/preinst +++ b/distribution/packages/src/common/scripts/preinst @@ -27,6 +27,7 @@ case "$1" in adduser --quiet \ --system \ --no-create-home \ + --home /nonexistent \ --ingroup elasticsearch \ --disabled-password \ --shell /bin/false \ @@ -50,8 +51,9 @@ case "$1" in # Create elasticsearch user if not existing if ! id elasticsearch > /dev/null 2>&1 ; then echo -n "Creating elasticsearch user..." - useradd -r \ - -M \ + useradd --system \ + --no-create-home \ + --home-dir /nonexistent \ --gid elasticsearch \ --shell /sbin/nologin \ --comment "elasticsearch user" \ diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index a214cd6940f..57f1ebd1c61 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -88,6 +88,8 @@ verify_package_installation() { id elasticsearch getent group elasticsearch + # homedir is set in /etc/passwd but to a non existent directory + assert_file_not_exist $(getent passwd elasticsearch | cut -d: -f6) assert_file "$ESHOME" d root root 755 assert_file "$ESHOME/bin" d root root 755 From 226b45e50944eed90e8cf7ed6bb356f459a00063 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 2 May 2018 15:51:59 -0600 Subject: [PATCH 25/30] Update versions for start_trial after backport (#30218) This commit is a follow up to #30135. It updates the stream compatibility versions in the start_trial requests and responses to reflect that fact that this work has been backported to 6.3. --- .../org/elasticsearch/license/PostStartTrialRequest.java | 3 +-- .../org/elasticsearch/license/PostStartTrialResponse.java | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java index c6293646c09..cf94312b6a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java @@ -55,8 +55,7 @@ public class PostStartTrialRequest extends MasterNodeRequest acknowledgeMessages = new HashMap<>(size); @@ -98,8 +97,7 @@ class PostStartTrialResponse extends ActionResponse { @Override public void writeTo(StreamOutput out) throws IOException { - // TODO: Change to 6.3 after backport - Version version = Version.V_7_0_0_alpha1; + Version version = Version.V_6_3_0; if (out.getVersion().onOrAfter(version)) { out.writeEnum(status); out.writeOptionalString(acknowledgeMessage); From 427d6912eafb1fe28d65b8bca89a2c6eab984f87 Mon Sep 17 00:00:00 2001 From: lcawley Date: Wed, 2 May 2018 15:30:49 -0700 Subject: [PATCH 26/30] [DOCS] Fixes link to TLS LDAP info --- x-pack/docs/en/security/authentication/ldap-realm.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc index 8180b109226..205c18429bc 100644 --- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -82,4 +82,4 @@ with the <> or in [[ldap-ssl]] ==== Setting up SSL Between Elasticsearch and LDAP -See {ref}/tls-ldap.html[Encrypting Communications Between {es} and LDAP]. +See {ref}/configuring-tls.html#tls-ldap[Encrypting Communications Between {es} and LDAP]. From b3516786ecc1276f8509a45b4d0e8cdebb82be4e Mon Sep 17 00:00:00 2001 From: lcawley Date: Wed, 2 May 2018 15:52:14 -0700 Subject: [PATCH 27/30] [DOCS] Removed X-Pack Breaking Changes --- x-pack/docs/en/index.asciidoc | 2 -- .../en/release-notes/xpack-breaking.asciidoc | 36 ------------------- 2 files changed, 38 deletions(-) delete mode 100644 x-pack/docs/en/release-notes/xpack-breaking.asciidoc diff --git a/x-pack/docs/en/index.asciidoc b/x-pack/docs/en/index.asciidoc index d19737c05ef..3133053c5bd 100644 --- a/x-pack/docs/en/index.asciidoc +++ b/x-pack/docs/en/index.asciidoc @@ -5,8 +5,6 @@ include::setup/setup-xes.asciidoc[] include::{es-repo-dir}/index-shared2.asciidoc[] -include::release-notes/xpack-breaking.asciidoc[] - include::{es-repo-dir}/index-shared3.asciidoc[] include::sql/index.asciidoc[] diff --git a/x-pack/docs/en/release-notes/xpack-breaking.asciidoc b/x-pack/docs/en/release-notes/xpack-breaking.asciidoc deleted file mode 100644 index 1c106ec5a28..00000000000 --- a/x-pack/docs/en/release-notes/xpack-breaking.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -[role="xpack"] -[[breaking-changes-xes]] -= {xpack} Breaking Changes - -[partintro] --- -This section summarizes the changes that you need to be aware of when migrating -your application from one version of {xpack} to another. - -* <> - -See also: - -* <> -* {kibana-ref}/breaking-changes.html[{kib} Breaking Changes] -* {logstash-ref}/breaking-changes.html[Logstash Breaking Changes] - --- - -[role="xpack"] -[[breaking-7.0.0-xes]] -== {xpack} Breaking changes in 7.0.0 - - -Machine Learning:: -* The `max_running_jobs` node property is removed in this release. Use the -`xpack.ml.max_open_jobs` setting instead. For more information, <>. - -Security:: -* The fields returned as part of the mappings section by get index, get -mappings, get field mappings and field capabilities API are now only the ones -that the user is authorized to access in case field level security is enabled. - -See also: - -* <> From 824e648662d4decd3a448591d4c3467ea3dd24f9 Mon Sep 17 00:00:00 2001 From: Andy Bristol Date: Wed, 2 May 2018 18:09:44 -0700 Subject: [PATCH 28/30] [test] add debug logging for packaging test --- .../src/test/resources/packaging/tests/setup_passwords.bash | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash b/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash index 14bd8be6826..c13a2951136 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash @@ -55,8 +55,9 @@ echo 'y' | $ESHOME/bin/elasticsearch-setup-passwords auto SETUP_AUTO echo "$output" > /tmp/setup-passwords-output [ "$status" -eq 0 ] || { - echo "Expected x-pack elasticsearch-setup-passwords tool exit code to be zero" + echo "Expected x-pack elasticsearch-setup-passwords tool exit code to be zero but got $status" cat /tmp/setup-passwords-output + debug_collect_logs false } From 2c38d12e236a3279daa344bc8b52f0e8f49ad4b5 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 3 May 2018 09:47:12 +0200 Subject: [PATCH 29/30] Watcher: Make start/stop cycle more predictable and synchronous (#30118) The current implementation starts/stops watcher using an executor. This can result in our of order operations. This commit reduces those executor calls to an absolute minimum in order to be able to do state changes within the cluster state listener method, which runs in sequence. When a state change occurs that forces the watcher service to pause (like no watcher index, no master node, no local shards), the service is now in a paused state. Pausing is a super lightweight operation, which marks the ExecutionService as paused and waits for the currently executing watches to finish in the background via an executor. The same applies for stopping, the potentially long running operation is outsourced in to an executor, as waiting for executed watches is decoupled from the current state. The only other long running operation is starting, where watches need to be loaded. This is also done via an executor, but has an additional protection by checking the cluster state version it was started with. If another cluster state version was trying to load the watches, then this loading will not take effect. This PR also cleans up some unused states, like the a simple boolean in the HistoryStore/TriggeredWatchStore marking it as started or stopped, as this can now be caught in the execution service. Another advantage of this approach is the fact, that now only triggered watches are not getting executed, while watches that are run via the Execute Watch API will still be executed regardless if watcher is stopped or not. Lastly the TickerScheduleTriggerEngine thread now only starts on data nodes. --- docs/CHANGELOG.asciidoc | 5 + .../xpack/test/rest/XPackRestIT.java | 55 +- .../elasticsearch/xpack/watcher/Watcher.java | 4 +- .../watcher/WatcherIndexingListener.java | 4 +- .../watcher/WatcherLifeCycleService.java | 257 ++----- .../xpack/watcher/WatcherService.java | 269 ++++--- .../watcher/execution/ExecutionService.java | 138 ++-- .../execution/TriggeredWatchStore.java | 62 +- .../xpack/watcher/history/HistoryStore.java | 46 +- .../stats/TransportWatcherStatsAction.java | 11 +- .../engine/TickerScheduleTriggerEngine.java | 55 +- .../watcher/WatcherLifeCycleServiceTests.java | 684 +++++++----------- .../xpack/watcher/WatcherServiceTests.java | 23 +- .../execution/ExecutionServiceTests.java | 5 +- .../execution/TriggeredWatchStoreTests.java | 82 +-- .../watcher/history/HistoryStoreTests.java | 28 +- .../test/integration/BootStrapTests.java | 12 +- .../test/integration/WatchAckTests.java | 4 - .../TransportWatcherStatsActionTests.java | 16 +- .../SmokeTestWatcherTestSuiteIT.java | 88 ++- 20 files changed, 810 insertions(+), 1038 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 0a04cc950e3..7c14d41724a 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -96,6 +96,11 @@ option. ({pull}30140[#29658]) Added new "Request" object flavored request methods. Prefer these instead of the multi-argument versions. ({pull}29623[#29623]) +The cluster state listener to decide if watcher should be +stopped/started/paused now runs far less code in an executor but is more +synchronous and predictable. Also the trigger engine thread is only started on +data nodes. And the Execute Watch API can be triggered regardless is watcher is +started or stopped. ({pull}30118[#30118]) [float] === Bug Fixes diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index dcca2677f2c..99a6e29e334 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.test.rest; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.http.HttpStatus; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Response; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -17,6 +18,7 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -83,7 +85,6 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase { templates.addAll(Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix())); - templates.addAll(Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES)); for (String template : templates) { awaitCallApi("indices.exists_template", singletonMap("name", template), emptyList(), @@ -97,19 +98,49 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase { // ensure watcher is started, so that a test can stop watcher and everything still works fine if (isWatcherTest()) { assertBusy(() -> { - try { - ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); - String state = (String) response.evaluate("stats.0.watcher_state"); - if ("started".equals(state) == false) { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); - } - // assertion required to exit the assertBusy lambda - assertThat(state, is("started")); - } catch (IOException e) { - throw new AssertionError(e); + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + ClientYamlTestResponse startResponse = + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); } }); + + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + awaitCallApi("indices.exists_template", singletonMap("name", template), emptyList(), + response -> true, + () -> "Exception when waiting for [" + template + "] template to be created"); + } + + boolean existsWatcherIndex = adminClient().performRequest("HEAD", ".watches").getStatusLine().getStatusCode() == 200; + if (existsWatcherIndex == false) { + return; + } + Response response = adminClient().performRequest("GET", ".watches/_search", Collections.singletonMap("size", "1000")); + ObjectPath objectPathResponse = ObjectPath.createFromResponse(response); + int totalHits = objectPathResponse.evaluate("hits.total"); + if (totalHits > 0) { + List> hits = objectPathResponse.evaluate("hits.hits"); + for (Map hit : hits) { + String id = (String) hit.get("_id"); + assertOK(adminClient().performRequest("DELETE", "_xpack/watcher/watch/" + id)); + } + } } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 57fcff76715..6c4ac1994ff 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -351,7 +351,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin { final WatchParser watchParser = new WatchParser(settings, triggerService, registry, inputRegistry, cryptoService, getClock()); final ExecutionService executionService = new ExecutionService(settings, historyStore, triggeredWatchStore, watchExecutor, - getClock(), watchParser, clusterService, client); + getClock(), watchParser, clusterService, client, threadPool.generic()); final Consumer> triggerEngineListener = getTriggerEngineListener(executionService); triggerService.register(triggerEngineListener); @@ -360,7 +360,7 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin { watchParser, client); final WatcherLifeCycleService watcherLifeCycleService = - new WatcherLifeCycleService(settings, threadPool, clusterService, watcherService); + new WatcherLifeCycleService(settings, clusterService, watcherService); listener = new WatcherIndexingListener(settings, watchParser, getClock(), triggerService); clusterService.addListener(listener); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 37836ca94f8..8e0fbcb7cb4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -123,10 +123,10 @@ final class WatcherIndexingListener extends AbstractComponent implements Indexin boolean shouldBeTriggered = shardAllocationConfiguration.shouldBeTriggered(watch.id()); if (shouldBeTriggered) { if (watch.status().state().isActive()) { - logger.debug("adding watch [{}] to trigger", watch.id()); + logger.debug("adding watch [{}] to trigger service", watch.id()); triggerService.add(watch); } else { - logger.debug("removing watch [{}] to trigger", watch.id()); + logger.debug("removing watch [{}] to trigger service", watch.id()); triggerService.remove(watch.id()); } } else { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index bec496068e3..eef9e019b7a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -21,29 +21,19 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.upgrade.UpgradeField; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; -import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.watch.Watch; -import org.elasticsearch.xpack.watcher.support.WatcherIndexTemplateRegistry; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class WatcherLifeCycleService extends AbstractComponent implements ClusterStateListener { @@ -54,30 +44,14 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste public static final Setting SETTING_REQUIRE_MANUAL_START = Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); - private static final String LIFECYCLE_THREADPOOL_NAME = "watcher-lifecycle"; - - private final WatcherService watcherService; - private final ExecutorService executor; - private AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); - private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. + private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); + private final AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); private final boolean requireManualStart; + private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. + private volatile WatcherService watcherService; - WatcherLifeCycleService(Settings settings, ThreadPool threadPool, ClusterService clusterService, - WatcherService watcherService) { - // use a single thread executor so that lifecycle changes are handled in the order they - // are submitted in - this(settings, clusterService, watcherService, EsExecutors.newFixed( - LIFECYCLE_THREADPOOL_NAME, - 1, - 1000, - daemonThreadFactory(settings, LIFECYCLE_THREADPOOL_NAME), - threadPool.getThreadContext())); - } - - WatcherLifeCycleService(Settings settings, ClusterService clusterService, - WatcherService watcherService, ExecutorService executorService) { + WatcherLifeCycleService(Settings settings, ClusterService clusterService, WatcherService watcherService) { super(settings); - this.executor = executorService; this.watcherService = watcherService; this.requireManualStart = SETTING_REQUIRE_MANUAL_START.get(settings); clusterService.addListener(this); @@ -91,58 +65,12 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste }); } - public synchronized void stop(String reason) { - watcherService.stop(reason); - } - synchronized void shutDown() { + this.state.set(WatcherState.STOPPING); shutDown = true; - stop("shutdown initiated"); - stopExecutor(); - } - - void stopExecutor() { - ThreadPool.terminate(executor, 10L, TimeUnit.SECONDS); - } - - private synchronized void start(ClusterState state) { - if (shutDown) { - return; - } - final WatcherState watcherState = watcherService.state(); - if (watcherState != WatcherState.STOPPED) { - logger.debug("not starting watcher. watcher can only start if its current state is [{}], but its current state now is [{}]", - WatcherState.STOPPED, watcherState); - return; - } - - // If we start from a cluster state update we need to check if previously we stopped manually - // otherwise Watcher would start upon the next cluster state update while the user instructed Watcher to not run - WatcherMetaData watcherMetaData = state.getMetaData().custom(WatcherMetaData.TYPE); - if (watcherMetaData != null && watcherMetaData.manuallyStopped()) { - logger.debug("not starting watcher. watcher was stopped manually and therefore cannot be auto-started"); - return; - } - - // ensure that templates are existing before starting watcher - // the watcher index template registry is independent from watcher being started or stopped - if (WatcherIndexTemplateRegistry.validate(state) == false) { - logger.debug("not starting watcher, watcher templates are missing in the cluster state"); - return; - } - - if (watcherService.validate(state)) { - logger.trace("starting... (based on cluster state version [{}])", state.getVersion()); - try { - // we need to populate the allocation ids before the next cluster state listener comes in - checkAndSetAllocationIds(state, false); - watcherService.start(state); - } catch (Exception e) { - logger.warn("failed to start watcher. please wait for the cluster to become ready or try to start Watcher manually", e); - } - } else { - logger.debug("not starting watcher. because the cluster isn't ready yet to run watcher"); - } + clearAllocationIds(); + watcherService.shutDown(); + this.state.set(WatcherState.STOPPED); } /** @@ -169,43 +97,83 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste } if (Strings.isNullOrEmpty(event.state().nodes().getMasterNodeId())) { - clearAllocationIds(); - executor.execute(() -> this.stop("no master node")); + pauseExecution("no master node"); return; } if (event.state().getBlocks().hasGlobalBlock(ClusterBlockLevel.WRITE)) { - clearAllocationIds(); - executor.execute(() -> this.stop("write level cluster block")); + pauseExecution("write level cluster block"); return; } - if (isWatcherStoppedManually(event.state())) { - clearAllocationIds(); - executor.execute(() -> this.stop("watcher manually marked to shutdown by cluster state update")); - } else { - final WatcherState watcherState = watcherService.state(); - if (watcherState == WatcherState.STARTED && event.state().nodes().getLocalNode().isDataNode()) { - checkAndSetAllocationIds(event.state(), true); - } else if (watcherState != WatcherState.STARTED && watcherState != WatcherState.STARTING) { - IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData()); - IndexMetaData triggeredWatchesIndexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, - event.state().metaData()); - boolean isIndexInternalFormatWatchIndex = watcherIndexMetaData == null || - UpgradeField.checkInternalIndexFormat(watcherIndexMetaData); - boolean isIndexInternalFormatTriggeredWatchIndex = triggeredWatchesIndexMetaData == null || - UpgradeField.checkInternalIndexFormat(triggeredWatchesIndexMetaData); - if (isIndexInternalFormatTriggeredWatchIndex && isIndexInternalFormatWatchIndex) { - checkAndSetAllocationIds(event.state(), false); - executor.execute(() -> start(event.state())); - } else { - logger.warn("not starting watcher, upgrade API run required: .watches[{}], .triggered_watches[{}]", - isIndexInternalFormatWatchIndex, isIndexInternalFormatTriggeredWatchIndex); + boolean isWatcherStoppedManually = isWatcherStoppedManually(event.state()); + // if this is not a data node, we need to start it ourselves possibly + if (event.state().nodes().getLocalNode().isDataNode() == false && + isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + watcherService.start(event.state()); + this.state.set(WatcherState.STARTED); + return; + } + + if (isWatcherStoppedManually) { + if (this.state.get() == WatcherState.STARTED) { + clearAllocationIds(); + watcherService.stop("watcher manually marked to shutdown by cluster state update"); + this.state.set(WatcherState.STOPPED); + } + return; + } + + DiscoveryNode localNode = event.state().nodes().getLocalNode(); + RoutingNode routingNode = event.state().getRoutingNodes().node(localNode.getId()); + if (routingNode == null) { + pauseExecution("routing node in cluster state undefined. network issue?"); + return; + } + + IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData()); + if (watcherIndexMetaData == null) { + pauseExecution("no watcher index found"); + return; + } + + String watchIndex = watcherIndexMetaData.getIndex().getName(); + List localShards = routingNode.shardsWithState(watchIndex, RELOCATING, STARTED); + // no local shards, empty out watcher and dont waste resources! + if (localShards.isEmpty()) { + pauseExecution("no local watcher shards found"); + return; + } + + List currentAllocationIds = localShards.stream() + .map(ShardRouting::allocationId) + .map(AllocationId::getId) + .sorted() + .collect(Collectors.toList()); + + if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + if (watcherService.validate(event.state())) { + previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + if (state.get() == WatcherState.STARTED) { + watcherService.reload(event.state(), "new local watcher shard allocation ids"); + } else if (state.get() == WatcherState.STOPPED) { + watcherService.start(event.state()); + this.state.set(WatcherState.STARTED); } + } else { + clearAllocationIds(); + this.state.set(WatcherState.STOPPED); } } } + private void pauseExecution(String reason) { + if (clearAllocationIds()) { + watcherService.pauseExecution(reason); + } + this.state.set(WatcherState.STARTED); + } + /** * check if watcher has been stopped manually via the stop API */ @@ -215,60 +183,6 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste } /** - * check and optionally set the current allocation ids - * - * @param state the current cluster state - * @param callWatcherService should the watcher service be called for starting/stopping/reloading or should this be treated as a - * dryrun so that the caller is responsible for this - */ - private void checkAndSetAllocationIds(ClusterState state, boolean callWatcherService) { - IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData()); - if (watcherIndexMetaData == null) { - if (clearAllocationIds() && callWatcherService) { - executor.execute(wrapWatcherService(() -> watcherService.pauseExecution("no watcher index found"), - e -> logger.error("error pausing watch execution", e))); - } - return; - } - - DiscoveryNode localNode = state.nodes().getLocalNode(); - RoutingNode routingNode = state.getRoutingNodes().node(localNode.getId()); - // this can happen if the node does not hold any data - if (routingNode == null) { - if (clearAllocationIds() && callWatcherService) { - executor.execute(wrapWatcherService( - () -> watcherService.pauseExecution("no routing node for local node found, network issue?"), - e -> logger.error("error pausing watch execution", e))); - } - return; - } - - String watchIndex = watcherIndexMetaData.getIndex().getName(); - List localShards = routingNode.shardsWithState(watchIndex, RELOCATING, STARTED); - // no local shards, empty out watcher and dont waste resources! - if (localShards.isEmpty()) { - if (clearAllocationIds() && callWatcherService) { - executor.execute(wrapWatcherService(() -> watcherService.pauseExecution("no local watcher shards found"), - e -> logger.error("error pausing watch execution", e))); - } - return; - } - - List currentAllocationIds = localShards.stream() - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .collect(Collectors.toList()); - Collections.sort(currentAllocationIds); - - if (previousAllocationIds.get().equals(currentAllocationIds) == false) { - previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); - if (callWatcherService) { - executor.execute(wrapWatcherService(() -> watcherService.reload(state, "new local watcher shard allocation ids"), - e -> logger.error("error reloading watcher", e))); - } - } - } - /** * clear out current allocation ids if not already happened * @return true, if existing allocation ids were cleaned out, false otherwise @@ -283,26 +197,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste return previousAllocationIds.get(); } - /** - * Wraps an abstract runnable to easier supply onFailure and doRun methods via lambdas - * This ensures that the uncaught exception handler in the executing threadpool does not get called - * - * @param run The code to be executed in the runnable - * @param exceptionConsumer The exception handling code to be executed, if the runnable fails - * @return The AbstractRunnable instance to pass to the executor - */ - private static AbstractRunnable wrapWatcherService(Runnable run, Consumer exceptionConsumer) { - - return new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - exceptionConsumer.accept(e); - } - - @Override - protected void doRun() throws Exception { - run.run(); - } - }; + public WatcherState getState() { + return state.get(); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 56c56baae89..d280a150e8d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.watcher; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -26,16 +25,22 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.upgrade.UpgradeField; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.execution.ExecutionService; import org.elasticsearch.xpack.watcher.execution.TriggeredWatch; import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.support.WatcherIndexTemplateRegistry; import org.elasticsearch.xpack.watcher.trigger.TriggerService; import org.elasticsearch.xpack.watcher.watch.WatchParser; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; @@ -48,19 +53,24 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; import static org.elasticsearch.xpack.core.watcher.watch.Watch.INDEX; - public class WatcherService extends AbstractComponent { + private static final String LIFECYCLE_THREADPOOL_NAME = "watcher-lifecycle"; + private final TriggerService triggerService; private final TriggeredWatchStore triggeredWatchStore; private final ExecutionService executionService; @@ -68,12 +78,12 @@ public class WatcherService extends AbstractComponent { private final int scrollSize; private final WatchParser parser; private final Client client; - // package-private for testing - final AtomicReference state = new AtomicReference<>(WatcherState.STOPPED); private final TimeValue defaultSearchTimeout; + private final AtomicLong processedClusterStateVersion = new AtomicLong(0); + private final ExecutorService executor; - public WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore, - ExecutionService executionService, WatchParser parser, Client client) { + WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore, + ExecutionService executionService, WatchParser parser, Client client, ExecutorService executor) { super(settings); this.triggerService = triggerService; this.triggeredWatchStore = triggeredWatchStore; @@ -83,108 +93,137 @@ public class WatcherService extends AbstractComponent { this.defaultSearchTimeout = settings.getAsTime("xpack.watcher.internal.ops.search.default_timeout", TimeValue.timeValueSeconds(30)); this.parser = parser; this.client = client; + this.executor = executor; + } + + WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore, + ExecutionService executionService, WatchParser parser, Client client) { + this(settings, triggerService, triggeredWatchStore, executionService, parser, client, + EsExecutors.newFixed(LIFECYCLE_THREADPOOL_NAME, 1, 1000, daemonThreadFactory(settings, LIFECYCLE_THREADPOOL_NAME), + client.threadPool().getThreadContext())); } /** - * Ensure that watcher can be started, by checking if all indices are marked as up and ready in the cluster state + * Ensure that watcher can be reloaded, by checking if all indices are marked as up and ready in the cluster state * @param state The current cluster state * @return true if everything is good to go, so that the service can be started */ public boolean validate(ClusterState state) { - boolean executionServiceValid = executionService.validate(state); - if (executionServiceValid) { - try { - IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData()); - // no watch index yet means we are good to go - if (indexMetaData == null) { - return true; - } else { - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - logger.debug("watch index [{}] is marked as closed, watcher cannot be started", indexMetaData.getIndex().getName()); - return false; - } else { - return state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); - } - } - } catch (IllegalStateException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("error getting index meta data [{}]: ", Watch.INDEX), e); + // template check makes only sense for non existing indices, we could refine this + boolean hasValidWatcherTemplates = WatcherIndexTemplateRegistry.validate(state); + if (hasValidWatcherTemplates == false) { + logger.debug("missing watcher index templates, not starting watcher service"); + return false; + } + + IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData()); + IndexMetaData triggeredWatchesIndexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, + state.metaData()); + boolean isIndexInternalFormatWatchIndex = watcherIndexMetaData == null || + UpgradeField.checkInternalIndexFormat(watcherIndexMetaData); + boolean isIndexInternalFormatTriggeredWatchIndex = triggeredWatchesIndexMetaData == null || + UpgradeField.checkInternalIndexFormat(triggeredWatchesIndexMetaData); + if (isIndexInternalFormatTriggeredWatchIndex == false || isIndexInternalFormatWatchIndex == false) { + logger.warn("not starting watcher, upgrade API run required: .watches[{}], .triggered_watches[{}]", + isIndexInternalFormatWatchIndex, isIndexInternalFormatTriggeredWatchIndex); + return false; + } + + try { + boolean storesValid = TriggeredWatchStore.validate(state) && HistoryStore.validate(state); + if (storesValid == false) { return false; } - } - return false; - } - - public void start(ClusterState clusterState) throws Exception { - // starting already triggered, exit early - WatcherState currentState = state.get(); - if (currentState == WatcherState.STARTING || currentState == WatcherState.STARTED) { - throw new IllegalStateException("watcher is already in state ["+ currentState +"]"); - } - - if (state.compareAndSet(WatcherState.STOPPED, WatcherState.STARTING)) { - try { - logger.debug("starting watch service..."); - - executionService.start(); - Collection watches = loadWatches(clusterState); - triggerService.start(watches); - - Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, clusterState); - executionService.executeTriggeredWatches(triggeredWatches); - - state.set(WatcherState.STARTED); - logger.debug("watch service has started"); - } catch (Exception e) { - state.set(WatcherState.STOPPED); - throw e; - } - } else { - logger.debug("could not transition state from stopped to starting, current state [{}]", state.get()); + return watcherIndexMetaData == null || (watcherIndexMetaData.getState() == IndexMetaData.State.OPEN && + state.routingTable().index(watcherIndexMetaData.getIndex()).allPrimaryShardsActive()); + } catch (IllegalStateException e) { + logger.debug("error validating to start watcher", e); + return false; } } /** - * Stops the watcher service and it's subservices. Should only be called, when watcher is stopped manually + * Stops the watcher service and marks its services as paused */ public void stop(String reason) { - WatcherState currentState = state.get(); - if (currentState == WatcherState.STOPPING || currentState == WatcherState.STOPPED) { - logger.trace("watcher is already in state [{}] not stopping", currentState); - } else { - try { - if (state.compareAndSet(WatcherState.STARTED, WatcherState.STOPPING)) { - logger.info("stopping watch service, reason [{}]", reason); - triggerService.stop(); - executionService.stop(); - state.set(WatcherState.STOPPED); - logger.debug("watch service has stopped"); - } else { - logger.debug("could not transition state from started to stopping, current state [{}]", state.get()); - } - } catch (Exception e) { - state.set(WatcherState.STOPPED); - logger.error("Error stopping watcher", e); - } - } + logger.info("stopping watch service, reason [{}]", reason); + executionService.pause(); + triggerService.pauseExecution(); } /** - * Reload the watcher service, does not switch the state from stopped to started, just keep going - * @param clusterState cluster state, which is needed to find out about local shards + * shuts down the trigger service as well to make sure there are no lingering threads + * also no need to check anything, as this is final, we just can go to status STOPPED */ - public void reload(ClusterState clusterState, String reason) { + void shutDown() { + logger.info("stopping watch service, reason [shutdown initiated]"); + executionService.pause(); + triggerService.stop(); + stopExecutor(); + logger.debug("watch service has stopped"); + } + + void stopExecutor() { + ThreadPool.terminate(executor, 10L, TimeUnit.SECONDS); + } + /** + * Reload the watcher service, does not switch the state from stopped to started, just keep going + * @param state cluster state, which is needed to find out about local shards + */ + void reload(ClusterState state, String reason) { + // this method contains the only async code block, being called by the cluster state listener + // the reason for this is, that loading he watches is done in a sync manner and thus cannot be done on the cluster state listener + // thread + // + // this method itself is called by the cluster state listener, so will never be called in parallel + // setting the cluster state version allows us to know if the async method has been overtaken by another async method + // this is unlikely, but can happen, if the thread pool schedules two of those runnables at the same time + // by checking the cluster state version before and after loading the watches we can potentially just exit without applying the + // changes + processedClusterStateVersion.set(state.getVersion()); pauseExecution(reason); + triggerService.pauseExecution(); - // load watches - Collection watches = loadWatches(clusterState); - watches.forEach(triggerService::add); + executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), + e -> logger.error("error reloading watcher", e))); + } - // then load triggered watches, which might have been in the queue that we just cleared, - // maybe we dont need to execute those anymore however, i.e. due to shard shuffling - // then someone else will - Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, clusterState); - executionService.executeTriggeredWatches(triggeredWatches); + public void start(ClusterState state) { + processedClusterStateVersion.set(state.getVersion()); + executor.execute(wrapWatcherService(() -> reloadInner(state, "starting", true), + e -> logger.error("error starting watcher", e))); + } + + /** + * reload the watches and start scheduling them + */ + private synchronized void reloadInner(ClusterState state, String reason, boolean loadTriggeredWatches) { + // exit early if another thread has come in between + if (processedClusterStateVersion.get() != state.getVersion()) { + logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", + state.getVersion(), processedClusterStateVersion.get()); + } + + Collection watches = loadWatches(state); + Collection triggeredWatches = Collections.emptyList(); + if (loadTriggeredWatches) { + triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, state); + } + + // if we had another state coming in the meantime, we will not start the trigger engines with these watches, but wait + // until the others are loaded + if (processedClusterStateVersion.get() == state.getVersion()) { + executionService.unPause(); + triggerService.start(watches); + if (triggeredWatches.isEmpty() == false) { + executionService.executeTriggeredWatches(triggeredWatches); + } + logger.debug("watch service has been reloaded, reason [{}]", reason); + } else { + logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", + state.getVersion(), processedClusterStateVersion.get()); + } } /** @@ -192,8 +231,7 @@ public class WatcherService extends AbstractComponent { * manual watch execution, i.e. via the execute watch API */ public void pauseExecution(String reason) { - int cancelledTaskCount = executionService.pauseExecution(); - triggerService.pauseExecution(); + int cancelledTaskCount = executionService.pause(); logger.info("paused watch execution, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); } @@ -212,7 +250,7 @@ public class WatcherService extends AbstractComponent { List watches = new ArrayList<>(); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { RefreshResponse refreshResponse = client.admin().indices().refresh(new RefreshRequest(INDEX)) - .actionGet(TimeValue.timeValueSeconds(5)); + .actionGet(TimeValue.timeValueSeconds(5)); if (refreshResponse.getSuccessfulShards() < indexMetaData.getNumberOfShards()) { throw illegalState("not all required shards have been refreshed"); } @@ -230,12 +268,12 @@ public class WatcherService extends AbstractComponent { List watchIndexShardRoutings = clusterState.getRoutingTable().allShards(watchIndexName); SearchRequest searchRequest = new SearchRequest(INDEX) - .scroll(scrollTimeout) - .preference(Preference.ONLY_LOCAL.toString()) - .source(new SearchSourceBuilder() - .size(scrollSize) - .sort(SortBuilders.fieldSort("_doc")) - .version(true)); + .scroll(scrollTimeout) + .preference(Preference.ONLY_LOCAL.toString()) + .source(new SearchSourceBuilder() + .size(scrollSize) + .sort(SortBuilders.fieldSort("_doc")) + .version(true)); response = client.search(searchRequest).actionGet(defaultSearchTimeout); if (response.getTotalShards() != response.getSuccessfulShards()) { @@ -249,11 +287,11 @@ public class WatcherService extends AbstractComponent { Map> sortedShards = new HashMap<>(localShards.size()); for (ShardRouting localShardRouting : localShards) { List sortedAllocationIds = watchIndexShardRoutings.stream() - .filter(sr -> localShardRouting.getId() == sr.getId()) - .map(ShardRouting::allocationId).filter(Objects::nonNull) - .map(AllocationId::getId).filter(Objects::nonNull) - .sorted() - .collect(Collectors.toList()); + .filter(sr -> localShardRouting.getId() == sr.getId()) + .map(ShardRouting::allocationId).filter(Objects::nonNull) + .map(AllocationId::getId).filter(Objects::nonNull) + .sorted() + .collect(Collectors.toList()); sortedShards.put(localShardRouting.getId(), sortedAllocationIds); } @@ -262,8 +300,8 @@ public class WatcherService extends AbstractComponent { for (SearchHit hit : response.getHits()) { // find out if this hit should be processed locally Optional correspondingShardOptional = localShards.stream() - .filter(sr -> sr.shardId().equals(hit.getShard().getShardId())) - .findFirst(); + .filter(sr -> sr.shardId().equals(hit.getShard().getShardId())) + .findFirst(); if (correspondingShardOptional.isPresent() == false) { continue; } @@ -284,7 +322,8 @@ public class WatcherService extends AbstractComponent { watches.add(watch); } } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("couldn't load watch [{}], ignoring it...", id), e); + logger.error((org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("couldn't load watch [{}], ignoring it...", id), e); } } SearchScrollRequest request = new SearchScrollRequest(response.getScrollId()); @@ -320,7 +359,25 @@ public class WatcherService extends AbstractComponent { return shardIndex == index; } - public WatcherState state() { - return state.get(); + /** + * Wraps an abstract runnable to easier supply onFailure and doRun methods via lambdas + * This ensures that the uncaught exception handler in the executing threadpool does not get called + * + * @param run The code to be executed in the runnable + * @param exceptionConsumer The exception handling code to be executed, if the runnable fails + * @return The AbstractRunnable instance to pass to the executor + */ + private static AbstractRunnable wrapWatcherService(Runnable run, Consumer exceptionConsumer) { + return new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + exceptionConsumer.accept(e); + } + + @Override + protected void doRun() throws Exception { + run.run(); + } + }; } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index 29ef6e03f6d..6901adb0a69 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.execution; +import com.google.common.collect.Iterables; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; @@ -17,7 +18,6 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.MapBuilder; @@ -64,8 +64,10 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; @@ -75,27 +77,30 @@ public class ExecutionService extends AbstractComponent { public static final Setting DEFAULT_THROTTLE_PERIOD_SETTING = Setting.positiveTimeSetting("xpack.watcher.execution.default_throttle_period", - TimeValue.timeValueSeconds(5), Setting.Property.NodeScope); + TimeValue.timeValueSeconds(5), Setting.Property.NodeScope); private final MeanMetric totalExecutionsTime = new MeanMetric(); private final Map actionByTypeExecutionTime = new HashMap<>(); - private final HistoryStore historyStore; - private final TriggeredWatchStore triggeredWatchStore; - private final WatchExecutor executor; - private final Clock clock; private final TimeValue defaultThrottlePeriod; private final TimeValue maxStopTimeout; + private final TimeValue indexDefaultTimeout; + + private final HistoryStore historyStore; + private final TriggeredWatchStore triggeredWatchStore; + private final Clock clock; private final WatchParser parser; private final ClusterService clusterService; private final Client client; - private final TimeValue indexDefaultTimeout; + private final WatchExecutor executor; + private final ExecutorService genericExecutor; - private volatile CurrentExecutions currentExecutions; - private final AtomicBoolean started = new AtomicBoolean(false); + private AtomicReference currentExecutions = new AtomicReference<>(); + private final AtomicBoolean paused = new AtomicBoolean(false); public ExecutionService(Settings settings, HistoryStore historyStore, TriggeredWatchStore triggeredWatchStore, WatchExecutor executor, - Clock clock, WatchParser parser, ClusterService clusterService, Client client) { + Clock clock, WatchParser parser, ClusterService clusterService, Client client, + ExecutorService genericExecutor) { super(settings); this.historyStore = historyStore; this.triggeredWatchStore = triggeredWatchStore; @@ -106,52 +111,21 @@ public class ExecutionService extends AbstractComponent { this.parser = parser; this.clusterService = clusterService; this.client = client; + this.genericExecutor = genericExecutor; this.indexDefaultTimeout = settings.getAsTime("xpack.watcher.internal.ops.index.default_timeout", TimeValue.timeValueSeconds(30)); + this.currentExecutions.set(new CurrentExecutions()); } - public synchronized void start() throws Exception { - if (started.get()) { - return; - } - - assert executor.queue().isEmpty() : "queue should be empty, but contains " + executor.queue().size() + " elements."; - if (started.compareAndSet(false, true)) { - try { - logger.debug("starting execution service"); - historyStore.start(); - triggeredWatchStore.start(); - currentExecutions = new CurrentExecutions(); - logger.debug("started execution service"); - } catch (Exception e) { - started.set(false); - throw e; - } - } - } - - public boolean validate(ClusterState state) { - return triggeredWatchStore.validate(state) && HistoryStore.validate(state); - } - - public synchronized void stop() { - if (started.compareAndSet(true, false)) { - logger.debug("stopping execution service"); - // We could also rely on the shutdown in #updateSettings call, but - // this is a forceful shutdown that also interrupts the worker threads in the thread pool - int cancelledTaskCount = executor.queue().drainTo(new ArrayList<>()); - - this.clearExecutions(); - triggeredWatchStore.stop(); - historyStore.stop(); - logger.debug("stopped execution service, cancelled [{}] queued tasks", cancelledTaskCount); - } + public void unPause() { + paused.set(false); } /** * Pause the execution of the watcher executor * @return the number of tasks that have been removed */ - public synchronized int pauseExecution() { + public int pause() { + paused.set(true); int cancelledTaskCount = executor.queue().drainTo(new ArrayList<>()); this.clearExecutions(); return cancelledTaskCount; @@ -171,12 +145,12 @@ public class ExecutionService extends AbstractComponent { // for testing only CurrentExecutions getCurrentExecutions() { - return currentExecutions; + return currentExecutions.get(); } public List currentExecutions() { List currentExecutions = new ArrayList<>(); - for (WatchExecution watchExecution : this.currentExecutions) { + for (WatchExecution watchExecution : this.currentExecutions.get()) { currentExecutions.add(watchExecution.createSnapshot()); } // Lets show the longest running watch first: @@ -203,26 +177,28 @@ public class ExecutionService extends AbstractComponent { } void processEventsAsync(Iterable events) throws Exception { - if (!started.get()) { - throw new IllegalStateException("not started"); + if (paused.get()) { + logger.debug("watcher execution service paused, not processing [{}] events", Iterables.size(events)); + return; } Tuple, List> watchesAndContext = createTriggeredWatchesAndContext(events); List triggeredWatches = watchesAndContext.v1(); triggeredWatchStore.putAll(triggeredWatches, ActionListener.wrap( - response -> executeTriggeredWatches(response, watchesAndContext), - e -> { - Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof EsRejectedExecutionException) { - logger.debug("failed to store watch records due to filled up watcher threadpool"); - } else { - logger.warn("failed to store watch records", e); - } - })); + response -> executeTriggeredWatches(response, watchesAndContext), + e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof EsRejectedExecutionException) { + logger.debug("failed to store watch records due to filled up watcher threadpool"); + } else { + logger.warn("failed to store watch records", e); + } + })); } void processEventsSync(Iterable events) throws IOException { - if (!started.get()) { - throw new IllegalStateException("not started"); + if (paused.get()) { + logger.debug("watcher execution service paused, not processing [{}] events", Iterables.size(events)); + return; } Tuple, List> watchesAndContext = createTriggeredWatchesAndContext(events); List triggeredWatches = watchesAndContext.v1(); @@ -279,7 +255,7 @@ public class ExecutionService extends AbstractComponent { WatchRecord record = null; final String watchId = ctx.id().watchId(); try { - boolean executionAlreadyExists = currentExecutions.put(watchId, new WatchExecution(ctx, Thread.currentThread())); + boolean executionAlreadyExists = currentExecutions.get().put(watchId, new WatchExecution(ctx, Thread.currentThread())); if (executionAlreadyExists) { logger.trace("not executing watch [{}] because it is already queued", watchId); record = ctx.abortBeforeExecution(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED, "Watch is already queued in thread pool"); @@ -336,7 +312,7 @@ public class ExecutionService extends AbstractComponent { logger.error((Supplier) () -> new ParameterizedMessage("failed to delete triggered watch [{}]", ctx.id()), e); } } - currentExecutions.remove(watchId); + currentExecutions.get().remove(watchId); logger.debug("finished [{}]/[{}]", watchId, ctx.id()); } return record; @@ -353,14 +329,14 @@ public class ExecutionService extends AbstractComponent { // so we just need to update the watch itself // we do not want to update the status.state field, as it might have been deactivated inbetween Map parameters = MapBuilder.newMapBuilder() - .put(Watch.INCLUDE_STATUS_KEY, "true") - .put(WatchStatus.INCLUDE_STATE, "false") - .immutableMap(); + .put(Watch.INCLUDE_STATUS_KEY, "true") + .put(WatchStatus.INCLUDE_STATE, "false") + .immutableMap(); ToXContent.MapParams params = new ToXContent.MapParams(parameters); XContentBuilder source = JsonXContent.contentBuilder(). - startObject() - .field(WatchField.STATUS.getPreferredName(), watch.status(), params) - .endObject(); + startObject() + .field(WatchField.STATUS.getPreferredName(), watch.status(), params) + .endObject(); UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, watch.id()); updateRequest.doc(source); @@ -400,7 +376,6 @@ public class ExecutionService extends AbstractComponent { The execution of an watch is split into two phases: 1. the trigger part which just makes sure to store the associated watch record in the history 2. the actual processing of the watch - The reason this split is that we don't want to lose the fact watch was triggered. This way, even if the thread pool that executes the watches is completely busy, we don't lose the fact that the watch was triggered (it'll have its history record) @@ -419,16 +394,16 @@ public class ExecutionService extends AbstractComponent { } } catch (Exception exc) { logger.error((Supplier) () -> - new ParameterizedMessage("Error storing watch history record for watch [{}] after thread pool rejection", - triggeredWatch.id()), exc); + new ParameterizedMessage("Error storing watch history record for watch [{}] after thread pool rejection", + triggeredWatch.id()), exc); } try { triggeredWatchStore.delete(triggeredWatch.id()); } catch (Exception exc) { logger.error((Supplier) () -> - new ParameterizedMessage("Error deleting triggered watch store record for watch [{}] after thread pool " + - "rejection", triggeredWatch.id()), exc); + new ParameterizedMessage("Error deleting triggered watch store record for watch [{}] after thread pool " + + "rejection", triggeredWatch.id()), exc); } }; } @@ -494,15 +469,15 @@ public class ExecutionService extends AbstractComponent { GetResponse response = getWatch(triggeredWatch.id().watchId()); if (response.isExists() == false) { String message = "unable to find watch for record [" + triggeredWatch.id().watchId() + "]/[" + triggeredWatch.id() + - "], perhaps it has been deleted, ignoring..."; + "], perhaps it has been deleted, ignoring..."; WatchRecord record = new WatchRecord.MessageWatchRecord(triggeredWatch.id(), triggeredWatch.triggerEvent(), - ExecutionState.NOT_EXECUTED_WATCH_MISSING, message, clusterService.localNode().getId()); + ExecutionState.NOT_EXECUTED_WATCH_MISSING, message, clusterService.localNode().getId()); historyStore.forcePut(record); triggeredWatchStore.delete(triggeredWatch.id()); } else { DateTime now = new DateTime(clock.millis(), UTC); TriggeredExecutionContext ctx = new TriggeredExecutionContext(triggeredWatch.id().watchId(), now, - triggeredWatch.triggerEvent(), defaultThrottlePeriod, true); + triggeredWatch.triggerEvent(), defaultThrottlePeriod, true); executeAsync(ctx, triggeredWatch); counter++; } @@ -541,9 +516,10 @@ public class ExecutionService extends AbstractComponent { * This clears out the current executions and sets new empty current executions * This is needed, because when this method is called, watcher keeps running, so sealing executions would be a bad idea */ - public synchronized void clearExecutions() { - currentExecutions.sealAndAwaitEmpty(maxStopTimeout); - currentExecutions = new CurrentExecutions(); + private void clearExecutions() { + final CurrentExecutions currentExecutionsBeforeSetting = currentExecutions.getAndSet(new CurrentExecutions()); + // clear old executions in background, no need to wait + genericExecutor.execute(() -> currentExecutionsBeforeSetting.sealAndAwaitEmpty(maxStopTimeout)); } // the watch execution task takes another runnable as parameter diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index 35bc805fc59..e0164b5bdbd 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.watcher.execution; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -24,7 +22,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -46,13 +43,11 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; public class TriggeredWatchStore extends AbstractComponent { @@ -61,7 +56,6 @@ public class TriggeredWatchStore extends AbstractComponent { private final TimeValue scrollTimeout; private final TriggeredWatch.Parser triggeredWatchParser; - private final AtomicBoolean started = new AtomicBoolean(false); private final TimeValue defaultBulkTimeout; private final TimeValue defaultSearchTimeout; @@ -73,36 +67,12 @@ public class TriggeredWatchStore extends AbstractComponent { this.defaultBulkTimeout = settings.getAsTime("xpack.watcher.internal.ops.bulk.default_timeout", TimeValue.timeValueSeconds(120)); this.defaultSearchTimeout = settings.getAsTime("xpack.watcher.internal.ops.search.default_timeout", TimeValue.timeValueSeconds(30)); this.triggeredWatchParser = triggeredWatchParser; - this.started.set(true); } - public void start() { - started.set(true); - } - - public boolean validate(ClusterState state) { - try { - IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, state.metaData()); - if (indexMetaData == null) { - return true; - } else { - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - logger.debug("triggered watch index [{}] is marked as closed, watcher cannot be started", - indexMetaData.getIndex().getName()); - return false; - } else { - return state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); - } - } - } catch (IllegalStateException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("error getting index meta data [{}]: ", - TriggeredWatchStoreField.INDEX_NAME), e); - return false; - } - } - - public void stop() { - started.set(false); + public static boolean validate(ClusterState state) { + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, state.metaData()); + return indexMetaData == null || (indexMetaData.getState() == IndexMetaData.State.OPEN && + state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive()); } public void putAll(final List triggeredWatches, final ActionListener listener) throws IOException { @@ -111,9 +81,8 @@ public class TriggeredWatchStore extends AbstractComponent { return; } - ensureStarted(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, createBulkRequest(triggeredWatches, - TriggeredWatchStoreField.DOC_TYPE), listener, client::bulk); + TriggeredWatchStoreField.DOC_TYPE), listener, client::bulk); } public BulkResponse putAll(final List triggeredWatches) throws IOException { @@ -144,7 +113,6 @@ public class TriggeredWatchStore extends AbstractComponent { } public void delete(Wid wid) { - ensureStarted(); DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, wid.value()); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { client.delete(request); // FIXME shouldn't we wait before saying the delete was successful @@ -152,12 +120,6 @@ public class TriggeredWatchStore extends AbstractComponent { logger.trace("successfully deleted triggered watch with id [{}]", wid); } - private void ensureStarted() { - if (!started.get()) { - throw illegalState("unable to persist triggered watches, the store is not ready"); - } - } - /** * Checks if any of the loaded watches has been put into the triggered watches index for immediate execution * @@ -180,7 +142,7 @@ public class TriggeredWatchStore extends AbstractComponent { try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { client.admin().indices().refresh(new RefreshRequest(TriggeredWatchStoreField.INDEX_NAME)) - .actionGet(TimeValue.timeValueSeconds(5)); + .actionGet(TimeValue.timeValueSeconds(5)); } catch (IndexNotFoundException e) { return Collections.emptyList(); } @@ -189,12 +151,12 @@ public class TriggeredWatchStore extends AbstractComponent { Collection triggeredWatches = new ArrayList<>(ids.size()); SearchRequest searchRequest = new SearchRequest(TriggeredWatchStoreField.INDEX_NAME) - .scroll(scrollTimeout) - .preference(Preference.LOCAL.toString()) - .source(new SearchSourceBuilder() - .size(scrollSize) - .sort(SortBuilders.fieldSort("_doc")) - .version(true)); + .scroll(scrollTimeout) + .preference(Preference.LOCAL.toString()) + .source(new SearchSourceBuilder() + .size(scrollSize) + .sort(SortBuilders.fieldSort("_doc")) + .version(true)); SearchResponse response = null; try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java index d226917c574..64e909a2f73 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -38,7 +37,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.watcher.support.Exceptions.ioException; -public class HistoryStore extends AbstractComponent { +public class HistoryStore extends AbstractComponent implements AutoCloseable { public static final String DOC_TYPE = "doc"; @@ -47,24 +46,17 @@ public class HistoryStore extends AbstractComponent { private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); private final Lock putUpdateLock = readWriteLock.readLock(); private final Lock stopLock = readWriteLock.writeLock(); - private final AtomicBoolean started = new AtomicBoolean(false); public HistoryStore(Settings settings, Client client) { super(settings); this.client = client; } - public void start() { - started.set(true); - } - - public void stop() { - stopLock.lock(); //This will block while put or update actions are underway - try { - started.set(false); - } finally { - stopLock.unlock(); - } + @Override + public void close() { + // This will block while put or update actions are underway + stopLock.lock(); + stopLock.unlock(); } /** @@ -72,9 +64,6 @@ public class HistoryStore extends AbstractComponent { * If the specified watchRecord already was stored this call will fail with a version conflict. */ public void put(WatchRecord watchRecord) throws Exception { - if (!started.get()) { - throw new IllegalStateException("unable to persist watch record history store is not ready"); - } String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); putUpdateLock.lock(); try (XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -82,8 +71,8 @@ public class HistoryStore extends AbstractComponent { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(builder) - .opType(IndexRequest.OpType.CREATE); + .source(builder) + .opType(IndexRequest.OpType.CREATE); client.index(request).actionGet(30, TimeUnit.SECONDS); logger.debug("indexed watch history record [{}]", watchRecord.id().value()); } catch (IOException ioe) { @@ -98,9 +87,6 @@ public class HistoryStore extends AbstractComponent { * Any existing watchRecord will be overwritten. */ public void forcePut(WatchRecord watchRecord) { - if (!started.get()) { - throw new IllegalStateException("unable to persist watch record history store is not ready"); - } String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); putUpdateLock.lock(); try { @@ -109,17 +95,17 @@ public class HistoryStore extends AbstractComponent { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(builder) - .opType(IndexRequest.OpType.CREATE); + .source(builder) + .opType(IndexRequest.OpType.CREATE); client.index(request).get(30, TimeUnit.SECONDS); logger.debug("indexed watch history record [{}]", watchRecord.id().value()); } catch (VersionConflictEngineException vcee) { watchRecord = new WatchRecord.MessageWatchRecord(watchRecord, ExecutionState.EXECUTED_MULTIPLE_TIMES, - "watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]"); + "watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]"); try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) - .source(xContentBuilder.value(watchRecord)); + .source(xContentBuilder.value(watchRecord)); client.index(request).get(30, TimeUnit.SECONDS); } logger.debug("overwrote watch history record [{}]", watchRecord.id().value()); @@ -142,11 +128,7 @@ public class HistoryStore extends AbstractComponent { public static boolean validate(ClusterState state) { String currentIndex = HistoryStoreField.getHistoryIndexNameForTime(DateTime.now(DateTimeZone.UTC)); IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(currentIndex, state.metaData()); - if (indexMetaData == null) { - return true; - } else { - return indexMetaData.getState() == IndexMetaData.State.OPEN && - state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); - } + return indexMetaData == null || (indexMetaData.getState() == IndexMetaData.State.OPEN && + state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive()); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java index d7f8962756b..474057031e7 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.watcher.common.stats.Counters; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.watcher.WatcherLifeCycleService; import org.elasticsearch.xpack.watcher.WatcherService; import org.elasticsearch.xpack.watcher.execution.ExecutionService; import org.elasticsearch.xpack.watcher.trigger.TriggerService; @@ -32,19 +33,19 @@ import java.util.List; public class TransportWatcherStatsAction extends TransportNodesAction { - private final WatcherService watcherService; private final ExecutionService executionService; private final TriggerService triggerService; + private final WatcherLifeCycleService lifeCycleService; @Inject public TransportWatcherStatsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, WatcherService watcherService, + IndexNameExpressionResolver indexNameExpressionResolver, WatcherLifeCycleService lifeCycleService, ExecutionService executionService, TriggerService triggerService) { super(settings, WatcherStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, WatcherStatsRequest::new, WatcherStatsRequest.Node::new, ThreadPool.Names.MANAGEMENT, WatcherStatsResponse.Node.class); - this.watcherService = watcherService; + this.lifeCycleService = lifeCycleService; this.executionService = executionService; this.triggerService = triggerService; } @@ -68,7 +69,7 @@ public class TransportWatcherStatsAction extends TransportNodesAction TICKER_INTERVAL_SETTING = - positiveTimeSetting("xpack.watcher.trigger.schedule.ticker.tick_interval", TimeValue.timeValueMillis(500), Property.NodeScope); + positiveTimeSetting("xpack.watcher.trigger.schedule.ticker.tick_interval", TimeValue.timeValueMillis(500), Property.NodeScope); private final TimeValue tickInterval; private volatile Map schedules; @@ -42,26 +43,31 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { super(settings, scheduleRegistry, clock); this.tickInterval = TICKER_INTERVAL_SETTING.get(settings); this.schedules = new ConcurrentHashMap<>(); + this.ticker = new Ticker(Node.NODE_DATA_SETTING.get(settings)); } @Override - public void start(Collection jobs) { - long starTime = clock.millis(); + public synchronized void start(Collection jobs) { + long startTime = clock.millis(); Map schedules = new ConcurrentHashMap<>(); for (Watch job : jobs) { if (job.trigger() instanceof ScheduleTrigger) { ScheduleTrigger trigger = (ScheduleTrigger) job.trigger(); - schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), starTime)); + schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); } } - this.schedules = schedules; - this.ticker = new Ticker(); + this.schedules.putAll(schedules); } @Override public void stop() { + schedules.clear(); ticker.close(); - pauseExecution(); + } + + @Override + public synchronized void pauseExecution() { + schedules.clear(); } @Override @@ -71,11 +77,6 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); } - @Override - public void pauseExecution() { - schedules.clear(); - } - @Override public int getJobCount() { return schedules.size(); @@ -93,9 +94,9 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { long scheduledTime = schedule.check(triggeredTime); if (scheduledTime > 0) { logger.debug("triggered job [{}] at [{}] (scheduled time was [{}])", schedule.name, - new DateTime(triggeredTime, UTC), new DateTime(scheduledTime, UTC)); + new DateTime(triggeredTime, UTC), new DateTime(scheduledTime, UTC)); events.add(new ScheduleTriggerEvent(schedule.name, new DateTime(triggeredTime, UTC), - new DateTime(scheduledTime, UTC))); + new DateTime(scheduledTime, UTC))); if (events.size() >= 1000) { notifyListeners(events); events.clear(); @@ -145,11 +146,15 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { private volatile boolean active = true; private final CountDownLatch closeLatch = new CountDownLatch(1); + private boolean isDataNode; - Ticker() { + Ticker(boolean isDataNode) { super("ticker-schedule-trigger-engine"); + this.isDataNode = isDataNode; setDaemon(true); - start(); + if (isDataNode) { + start(); + } } @Override @@ -167,15 +172,17 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { } public void close() { - logger.trace("stopping ticker thread"); - active = false; - try { - closeLatch.await(); - } catch (InterruptedException e) { - logger.warn("caught an interrupted exception when waiting while closing ticker thread", e); - Thread.currentThread().interrupt(); + if (isDataNode) { + logger.trace("stopping ticker thread"); + active = false; + try { + closeLatch.await(); + } catch (InterruptedException e) { + logger.warn("caught an interrupted exception when waiting while closing ticker thread", e); + Thread.currentThread().interrupt(); + } + logger.trace("ticker thread stopped"); } - logger.trace("ticker thread stopped"); } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 86375c0ea48..316cb722f2f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.watcher; -import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -25,16 +24,13 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; -import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.junit.Before; import org.mockito.stubbing.Answer; @@ -52,6 +48,7 @@ import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateR import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME; import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyString; @@ -59,8 +56,11 @@ import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; public class WatcherLifeCycleServiceTests extends ESTestCase { @@ -70,8 +70,6 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { @Before public void prepareServices() { - ThreadPool threadPool = mock(ThreadPool.class); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); ClusterService clusterService = mock(ClusterService.class); Answer answer = invocationOnMock -> { AckedClusterStateUpdateTask updateTask = (AckedClusterStateUpdateTask) invocationOnMock.getArguments()[1]; @@ -80,276 +78,233 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { }; doAnswer(answer).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class)); watcherService = mock(WatcherService.class); - lifeCycleService = new WatcherLifeCycleService(Settings.EMPTY, clusterService, watcherService, - EsExecutors.newDirectExecutorService()) { - @Override - void stopExecutor() { - // direct executor cannot be terminated - } - }; + lifeCycleService = new WatcherLifeCycleService(Settings.EMPTY, clusterService, watcherService); } - public void testStartAndStopCausedByClusterState() throws Exception { + public void testNoRestartWithoutAllocationIdsConfigured() { IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(new Index("anything", "foo")).build(); ClusterState previousClusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .build(); + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .build(); + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .build(); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); when(watcherService.validate(clusterState)).thenReturn(true); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); - verify(watcherService, times(1)).start(clusterState); - verify(watcherService, never()).stop(anyString()); + verifyZeroInteractions(watcherService); - // Trying to start a second time, but that should have no affect. - when(watcherService.state()).thenReturn(WatcherState.STARTED); + // Trying to start a second time, but that should have no effect. lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); - verify(watcherService, times(1)).start(clusterState); - verify(watcherService, never()).stop(anyString()); + verifyZeroInteractions(watcherService); } - public void testStartWithStateNotRecoveredBlock() throws Exception { + public void testStartWithStateNotRecoveredBlock() { DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) - .nodes(nodes).build(); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); + .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) + .nodes(nodes).build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); - verify(watcherService, never()).start(any(ClusterState.class)); + verifyZeroInteractions(watcherService); } - public void testShutdown() throws Exception { + public void testShutdown() { IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(); + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(); when(watcherService.validate(clusterState)).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); - verify(watcherService, times(1)).start(any(ClusterState.class)); - verify(watcherService, never()).stop(anyString()); - - when(watcherService.state()).thenReturn(WatcherState.STARTED); lifeCycleService.shutDown(); - verify(watcherService, times(1)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq("shutdown initiated")); + verify(watcherService, never()).stop(anyString()); + verify(watcherService, times(1)).shutDown(); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); + reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); - verify(watcherService, times(1)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq("shutdown initiated")); + verifyZeroInteractions(watcherService); } - public void testManualStartStop() throws Exception { - IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); + public void testManualStartStop() { + Index index = new Index(Watch.INDEX, "uuid"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); + IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required + .numberOfShards(1).numberOfReplicas(0); + MetaData.Builder metaDataBuilder = MetaData.builder() + .put(indexMetaDataBuilder) + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())); + if (randomBoolean()) { + metaDataBuilder.putCustom(WatcherMetaData.TYPE, new WatcherMetaData(false)); + } + MetaData metaData = metaDataBuilder.build(); + IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(); + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .metaData(metaData) + .build(); when(watcherService.validate(clusterState)).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); - verify(watcherService, times(1)).start(any(ClusterState.class)); - verify(watcherService, never()).stop(anyString()); + // mark watcher manually as stopped + ClusterState stoppedClusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .metaData(MetaData.builder(metaData).putCustom(WatcherMetaData.TYPE, new WatcherMetaData(true)).build()) + .build(); - when(watcherService.state()).thenReturn(WatcherState.STARTED); - String reason = randomAlphaOfLength(10); - lifeCycleService.stop(reason); - verify(watcherService, times(1)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq(reason)); + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stoppedClusterState, clusterState)); + verify(watcherService, times(1)).stop(eq("watcher manually marked to shutdown by cluster state update")); - // Starting via cluster state update, we shouldn't start because we have been stopped manually. - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); - verify(watcherService, times(2)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq(reason)); + // Starting via cluster state update, as the watcher metadata block is removed/set to true + reset(watcherService); + when(watcherService.validate(clusterState)).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, stoppedClusterState)); + verify(watcherService, times(1)).start(eq(clusterState)); // no change, keep going - clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(); - when(watcherService.state()).thenReturn(WatcherState.STARTED); + reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); - verify(watcherService, times(2)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq(reason)); - - ClusterState previousClusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(); - when(watcherService.validate(clusterState)).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); - verify(watcherService, times(3)).start(any(ClusterState.class)); - verify(watcherService, times(1)).stop(eq(reason)); + verifyZeroInteractions(watcherService); } - public void testManualStartStopClusterStateNotValid() throws Exception { - DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); - ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes).build(); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - when(watcherService.validate(clusterState)).thenReturn(false); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); - - verify(watcherService, never()).start(any(ClusterState.class)); - verify(watcherService, never()).stop(anyString()); - } - - public void testManualStartStopWatcherNotStopped() throws Exception { - DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); - ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes).build(); - when(watcherService.state()).thenReturn(WatcherState.STOPPING); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); - verify(watcherService, never()).validate(any(ClusterState.class)); - verify(watcherService, never()).start(any(ClusterState.class)); - verify(watcherService, never()).stop(anyString()); - } - - public void testNoLocalShards() throws Exception { + public void testNoLocalShards() { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1")).add(newNode("node_2")) - .build(); + .add(newNode("node_1")).add(newNode("node_2")) + .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - ).build(); + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(randomBoolean() ? - TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED) : - TestShardRouting.newShardRouting(shardId, "node_1", "node_2", true, RELOCATING)) - .build(); + .addShard(randomBoolean() ? + TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED) : + TestShardRouting.newShardRouting(shardId, "node_1", "node_2", true, RELOCATING)) + .build(); ClusterState clusterStateWithLocalShards = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .metaData(MetaData.builder().put(indexMetaData, false)) - .build(); + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); // shard moved over to node 2 IndexRoutingTable watchRoutingTableNode2 = IndexRoutingTable.builder(watchIndex) - .addShard(randomBoolean() ? - TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED) : - TestShardRouting.newShardRouting(shardId, "node_2", "node_1", true, RELOCATING)) - .build(); + .addShard(randomBoolean() ? + TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED) : + TestShardRouting.newShardRouting(shardId, "node_2", "node_1", true, RELOCATING)) + .build(); ClusterState clusterStateWithoutLocalShards = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes) - .routingTable(RoutingTable.builder().add(watchRoutingTableNode2).build()) - .metaData(MetaData.builder().put(indexMetaData, false)) - .build(); - - when(watcherService.state()).thenReturn(WatcherState.STARTED); + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTableNode2).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); // set current allocation ids + when(watcherService.validate(eq(clusterStateWithLocalShards))).thenReturn(true); + when(watcherService.validate(eq(clusterStateWithoutLocalShards))).thenReturn(false); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithLocalShards, clusterStateWithoutLocalShards)); - verify(watcherService, times(0)).pauseExecution(eq("no local watcher shards found")); + verify(watcherService, times(1)).reload(eq(clusterStateWithLocalShards), eq("new local watcher shard allocation ids")); + verify(watcherService, times(1)).validate(eq(clusterStateWithLocalShards)); + verifyNoMoreInteractions(watcherService); - // no more local hards, lets pause execution + // no more local shards, lets pause execution + reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutLocalShards, clusterStateWithLocalShards)); verify(watcherService, times(1)).pauseExecution(eq("no local watcher shards found")); + verifyNoMoreInteractions(watcherService); // no further invocations should happen if the cluster state does not change in regard to local shards + reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutLocalShards, clusterStateWithoutLocalShards)); - verify(watcherService, times(1)).pauseExecution(eq("no local watcher shards found")); + verifyZeroInteractions(watcherService); } - public void testReplicaWasAddedOrRemoved() throws Exception { + public void testReplicaWasAddedOrRemoved() { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); ShardId secondShardId = new ShardId(watchIndex, 1); DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1")) - .add(newNode("node_2")) - .build(); + .add(newNode("node_1")) + .add(newNode("node_2")) + .build(); IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) - .build(); + .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - ).build(); + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); ClusterState stateWithPrimaryShard = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(discoveryNodes) - .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) - .metaData(MetaData.builder().put(indexMetaData, false)) - .build(); + .nodes(discoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) - .build(); + .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) + .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .build(); ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(discoveryNodes) - .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) - .metaData(MetaData.builder().put(indexMetaData, false)) - .build(); + .nodes(discoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); // randomize between addition or removal of a replica boolean replicaAdded = randomBoolean(); - ClusterChangedEvent event; - ClusterState usedClusterState; + ClusterChangedEvent firstEvent; + ClusterChangedEvent secondEvent; if (replicaAdded) { - event = new ClusterChangedEvent("any", stateWithReplicaAdded, stateWithPrimaryShard); - usedClusterState = stateWithReplicaAdded; + firstEvent = new ClusterChangedEvent("any", stateWithPrimaryShard, stateWithReplicaAdded); + secondEvent = new ClusterChangedEvent("any", stateWithReplicaAdded, stateWithPrimaryShard); } else { - event = new ClusterChangedEvent("any", stateWithPrimaryShard, stateWithReplicaAdded); - usedClusterState = stateWithPrimaryShard; + firstEvent = new ClusterChangedEvent("any", stateWithReplicaAdded, stateWithPrimaryShard); + secondEvent = new ClusterChangedEvent("any", stateWithPrimaryShard, stateWithReplicaAdded); } - when(watcherService.state()).thenReturn(WatcherState.STARTED); - lifeCycleService.clusterChanged(event); - verify(watcherService).reload(eq(usedClusterState), anyString()); + when(watcherService.validate(eq(firstEvent.state()))).thenReturn(true); + lifeCycleService.clusterChanged(firstEvent); + verify(watcherService).reload(eq(firstEvent.state()), anyString()); + + reset(watcherService); + when(watcherService.validate(eq(secondEvent.state()))).thenReturn(true); + lifeCycleService.clusterChanged(secondEvent); + verify(watcherService).reload(eq(secondEvent.state()), anyString()); } // make sure that cluster state changes can be processed on nodes that do not hold data @@ -360,43 +315,42 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting); DiscoveryNode node1 = new DiscoveryNode("node_1", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), - new HashSet<>(asList(randomFrom(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER))), Version.CURRENT); + new HashSet<>(asList(randomFrom(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER))), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node_2", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), - new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); + new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); DiscoveryNode node3 = new DiscoveryNode("node_3", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), - new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); + new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - ); + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) - .metaData(MetaData.builder().put(indexMetaDataBuilder)) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) - .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .build(); + .metaData(MetaData.builder().put(indexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - ); + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, "node3", false, STARTED); IndexRoutingTable.Builder newRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting).addShard(replicaShardRouting); ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) - .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) - .routingTable(RoutingTable.builder().add(newRoutingTable).build()) - .build(); + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(newRoutingTable).build()) + .build(); - when(watcherService.state()).thenReturn(WatcherState.STARTED); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", currentState, previousState)); verify(watcherService, times(0)).pauseExecution(anyObject()); verify(watcherService, times(0)).reload(any(), any()); @@ -406,76 +360,48 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { Index watchIndex = new Index(Watch.INDEX, "foo"); ShardId shardId = new ShardId(watchIndex, 0); IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED)).build(); + .addShard(TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED)).build(); DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1")).build(); IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) - .settings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - ); + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ); ClusterState clusterStateWithWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) - .build(); + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .build(); ClusterState clusterStateWithoutWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes) - .build(); + .nodes(nodes) + .build(); - when(watcherService.state()).thenReturn(WatcherState.STARTED); + when(watcherService.validate(eq(clusterStateWithWatcherIndex))).thenReturn(true); + when(watcherService.validate(eq(clusterStateWithoutWatcherIndex))).thenReturn(false); // first add the shard allocation ids, by going from empty cs to CS with watcher index lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, clusterStateWithoutWatcherIndex)); + verify(watcherService).reload(eq(clusterStateWithWatcherIndex), anyString()); // now remove watches index, and ensure that pausing is only called once, no matter how often called (i.e. each CS update) lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); verify(watcherService, times(1)).pauseExecution(anyObject()); + reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); - verify(watcherService, times(1)).pauseExecution(anyObject()); - } - - public void testWatcherDoesNotStartWithOldIndexFormat() throws Exception { - String index = randomFrom(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME); - Index watchIndex = new Index(index, "foo"); - ShardId shardId = new ShardId(watchIndex, 0); - IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED)).build(); - DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1")).build(); - - Settings.Builder indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); - // no matter if not set or set to one, watcher should not start - if (randomBoolean()) { - indexSettings.put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 1); - } - IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(index).settings(indexSettings); - - ClusterState clusterStateWithWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(nodes) - .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) - .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) - .build(); - - ClusterState emptyClusterState = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).build(); - - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - when(watcherService.validate(eq(clusterStateWithWatcherIndex))).thenReturn(true); - lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, emptyClusterState)); - verify(watcherService, never()).start(any(ClusterState.class)); + verifyZeroInteractions(watcherService); } public void testWatcherServiceDoesNotStartIfIndexTemplatesAreMissing() throws Exception { DiscoveryNodes nodes = new DiscoveryNodes.Builder() - .masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1")) - .build(); + .masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1")) + .build(); MetaData.Builder metaDataBuilder = MetaData.builder(); boolean isHistoryTemplateAdded = randomBoolean(); @@ -495,191 +421,103 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { } ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).metaData(metaDataBuilder).build(); when(watcherService.validate(eq(state))).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); verify(watcherService, times(0)).start(any(ClusterState.class)); } public void testWatcherStopsWhenMasterNodeIsMissing() { + startWatcher(); + DiscoveryNodes nodes = new DiscoveryNodes.Builder() - .localNodeId("node_1") - .add(newNode("node_1")) - .build(); + .localNodeId("node_1") + .add(newNode("node_1")) + .build(); ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); - verify(watcherService, times(1)).stop(eq("no master node")); + verify(watcherService, times(1)).pauseExecution(eq("no master node")); } public void testWatcherStopsOnClusterLevelBlock() { + startWatcher(); + DiscoveryNodes nodes = new DiscoveryNodes.Builder() - .localNodeId("node_1") - .masterNodeId("node_1") - .add(newNode("node_1")) - .build(); + .localNodeId("node_1") + .masterNodeId("node_1") + .add(newNode("node_1")) + .build(); ClusterBlocks clusterBlocks = ClusterBlocks.builder().addGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_WRITES).build(); ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).blocks(clusterBlocks).build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); - verify(watcherService, times(1)).stop(eq("write level cluster block")); + verify(watcherService, times(1)).pauseExecution(eq("write level cluster block")); } - public void testStateIsSetImmediately() throws Exception { - Index index = new Index(Watch.INDEX, "foo"); - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); - indexRoutingTableBuilder.addShard( - TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); - IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required - .numberOfShards(1).numberOfReplicas(0); + public void testMasterOnlyNodeCanStart() { + List roles = Collections.singletonList(randomFrom(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.INGEST)); ClusterState state = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(indexMetaDataBuilder) - .build()) - .build(); + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(new DiscoveryNode("node_1", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(roles), Version.CURRENT))).build(); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("test", state, state)); + assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); + } + + public void testDataNodeWithoutDataCanStart() { + MetaData metaData = MetaData.builder().put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build(); + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .metaData(metaData) + .build(); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("test", state, state)); + assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); + } + + private ClusterState startWatcher() { + Index index = new Index(Watch.INDEX, "uuid"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); + IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required + .numberOfShards(1).numberOfReplicas(0); + MetaData metaData = MetaData.builder().put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())).put(indexMetaDataBuilder) + .build(); + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) + .metaData(metaData) + .build(); + ClusterState emptyState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .metaData(metaData) + .build(); + when(watcherService.validate(state)).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STOPPED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, state)); - verify(watcherService, times(1)).start(eq(state)); + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); + assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); + verify(watcherService, times(1)).reload(eq(state), anyString()); assertThat(lifeCycleService.allocationIds(), hasSize(1)); - // now do any cluster state upgrade, see that reload gets triggers, but should not - when(watcherService.state()).thenReturn(WatcherState.STARTED); - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, state)); - verify(watcherService, never()).pauseExecution(anyString()); - - verify(watcherService, never()).reload(eq(state), anyString()); - assertThat(lifeCycleService.allocationIds(), hasSize(1)); - } - - public void testWatcherServiceExceptionsAreCaught() { - Index index = new Index(Watch.INDEX, "foo"); - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); - indexRoutingTableBuilder.addShard( - TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); - IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required - .numberOfShards(1).numberOfReplicas(0).build(); - - // special setup for one of the following cluster states - DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); - DiscoveryNode localNode = mock(DiscoveryNode.class); - when(discoveryNodes.getMasterNodeId()).thenReturn("node_1"); - when(discoveryNodes.getLocalNode()).thenReturn(localNode); - when(localNode.isDataNode()).thenReturn(true); - when(localNode.getId()).thenReturn("does_not_exist"); - - ClusterState clusterState = randomFrom( - // cluster state with no watcher index - ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(), - // cluster state with no routing node - ClusterState.builder(new ClusterName("my-cluster")) - .nodes(discoveryNodes) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .build()) - .build(), - - // cluster state with no local shards - ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(indexMetaData, true) - .build()) - .build() - ); - - ClusterState stateWithWatcherShards = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(indexMetaData, true) - .build()) - .build(); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stateWithWatcherShards, stateWithWatcherShards)); - - when(watcherService.validate(anyObject())).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STARTED); - doAnswer(invocation -> { - throw new ElasticsearchSecurityException("breakme"); - }).when(watcherService).pauseExecution(anyString()); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, stateWithWatcherShards)); - verify(watcherService, times(1)).pauseExecution(anyString()); - } - - public void testWatcherServiceExceptionsAreCaughtOnReload() { - Index index = new Index(Watch.INDEX, "foo"); - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); - indexRoutingTableBuilder.addShard( - TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); - IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) - .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required - .numberOfShards(1).numberOfReplicas(0).build(); - - // cluster state with different local shards (another shard id) - ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))).routingTable( - RoutingTable.builder().add(IndexRoutingTable.builder(index) - .addShard(TestShardRouting.newShardRouting(Watch.INDEX, 1, "node_1", true, ShardRoutingState.STARTED)) - .build()).build()).metaData( - MetaData.builder().put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(indexMetaData, true).build()).build(); - - ClusterState stateWithWatcherShards = ClusterState.builder(new ClusterName("my-cluster")) - .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") - .add(newNode("node_1"))) - .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) - .metaData(MetaData.builder() - .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) - .put(indexMetaData, true) - .build()) - .build(); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stateWithWatcherShards, stateWithWatcherShards)); - - when(watcherService.validate(anyObject())).thenReturn(true); - when(watcherService.state()).thenReturn(WatcherState.STARTED); - doAnswer(invocation -> { - throw new ElasticsearchSecurityException("breakme"); - }).when(watcherService).reload(eq(clusterState), anyString()); - - lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, stateWithWatcherShards)); - verify(watcherService, times(1)).reload(eq(clusterState), anyString()); + // reset the mock, the user has to mock everything themselves again + reset(watcherService); + return state; } private List randomIndexPatterns() { return IntStream.range(0, between(1, 10)) - .mapToObj(n -> randomAlphaOfLengthBetween(1, 100)) - .collect(Collectors.toList()); + .mapToObj(n -> randomAlphaOfLengthBetween(1, 100)) + .collect(Collectors.toList()); } private static DiscoveryNode newNode(String nodeName) { @@ -688,6 +526,6 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { private static DiscoveryNode newNode(String nodeName, Version version) { return new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), - new HashSet<>(asList(DiscoveryNode.Role.values())), version); + new HashSet<>(asList(DiscoveryNode.Role.values())), version); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 0622ab48227..92726fb94cd 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; @@ -55,12 +56,12 @@ import org.mockito.ArgumentCaptor; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.concurrent.ExecutorService; import static java.util.Arrays.asList; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -68,15 +69,20 @@ import static org.mockito.Mockito.when; public class WatcherServiceTests extends ESTestCase { - public void testValidateStartWithClosedIndex() throws Exception { + private final ExecutorService executorService = EsExecutors.newDirectExecutorService(); + + public void testValidateStartWithClosedIndex() { TriggerService triggerService = mock(TriggerService.class); TriggeredWatchStore triggeredWatchStore = mock(TriggeredWatchStore.class); ExecutionService executionService = mock(ExecutionService.class); - when(executionService.validate(anyObject())).thenReturn(true); WatchParser parser = mock(WatchParser.class); WatcherService service = new WatcherService(Settings.EMPTY, triggerService, triggeredWatchStore, - executionService, parser, mock(Client.class)); + executionService, parser, mock(Client.class), executorService) { + @Override + void stopExecutor() { + } + }; ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); MetaData.Builder metaDataBuilder = MetaData.builder(); @@ -97,14 +103,17 @@ public class WatcherServiceTests extends ESTestCase { TriggerService triggerService = mock(TriggerService.class); TriggeredWatchStore triggeredWatchStore = mock(TriggeredWatchStore.class); ExecutionService executionService = mock(ExecutionService.class); - when(executionService.validate(anyObject())).thenReturn(true); WatchParser parser = mock(WatchParser.class); Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); WatcherService service = new WatcherService(settings, triggerService, triggeredWatchStore, - executionService, parser, client); + executionService, parser, client, executorService) { + @Override + void stopExecutor() { + } + }; // cluster state setup, with one node, one shard @@ -199,4 +208,4 @@ public class WatcherServiceTests extends ESTestCase { return new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 9684c55692f..73f0e820720 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -147,9 +148,7 @@ public class ExecutionServiceTests extends ESTestCase { when(clusterService.localNode()).thenReturn(discoveryNode); executionService = new ExecutionService(Settings.EMPTY, historyStore, triggeredWatchStore, executor, clock, parser, - clusterService, client); - - executionService.start(); + clusterService, client, EsExecutors.newDirectExecutorService()); } public void testExecute() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 07ba254dea9..f38f4ad6a86 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -85,9 +85,9 @@ import static org.mockito.Mockito.when; public class TriggeredWatchStoreTests extends ESTestCase { private Settings indexSettings = settings(Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .build(); + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); private Client client; private TriggeredWatch.Parser parser; @@ -101,10 +101,9 @@ public class TriggeredWatchStoreTests extends ESTestCase { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); parser = mock(TriggeredWatch.Parser.class); triggeredWatchStore = new TriggeredWatchStore(Settings.EMPTY, client, parser); - triggeredWatchStore.start(); } - public void testFindTriggeredWatchesEmptyCollection() throws Exception { + public void testFindTriggeredWatchesEmptyCollection() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(Collections.emptyList(), csBuilder.build()); assertThat(triggeredWatches, hasSize(0)); @@ -112,10 +111,10 @@ public class TriggeredWatchStoreTests extends ESTestCase { public void testValidateNoIndex() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); - assertThat(triggeredWatchStore.validate(csBuilder.build()), is(true)); + assertThat(TriggeredWatchStore.validate(csBuilder.build()), is(true)); } - public void testValidateNoActivePrimaryShards() throws Exception { + public void testValidateNoActivePrimaryShards() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); @@ -124,11 +123,11 @@ public class TriggeredWatchStoreTests extends ESTestCase { int numShards = 2 + randomInt(2); int numStartedShards = 1; Settings settings = settings(Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .build(); + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); metaDataBuilder.put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(settings) - .numberOfShards(numShards).numberOfReplicas(1)); + .numberOfShards(numShards).numberOfReplicas(1)); final Index index = metaDataBuilder.get(TriggeredWatchStoreField.INDEX_NAME).getIndex(); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); for (int i = 0; i < numShards; i++) { @@ -143,9 +142,9 @@ public class TriggeredWatchStoreTests extends ESTestCase { } ShardId shardId = new ShardId(index, 0); indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId, currentNodeId, null, true, state, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""))) - .build()); + .addShard(TestShardRouting.newShardRouting(shardId, currentNodeId, null, true, state, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""))) + .build()); indexRoutingTableBuilder.addReplica(); } routingTableBuilder.add(indexRoutingTableBuilder.build()); @@ -154,10 +153,10 @@ public class TriggeredWatchStoreTests extends ESTestCase { csBuilder.routingTable(routingTableBuilder.build()); ClusterState cs = csBuilder.build(); - assertThat(triggeredWatchStore.validate(cs), is(false)); + assertThat(TriggeredWatchStore.validate(cs), is(false)); } - public void testFindTriggeredWatchesGoodCase() throws Exception { + public void testFindTriggeredWatchesGoodCase() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); @@ -167,8 +166,8 @@ public class TriggeredWatchStoreTests extends ESTestCase { IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); ShardId shardId = new ShardId(index, 0); indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) - .build()); + .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); indexRoutingTableBuilder.addReplica(); routingTableBuilder.add(indexRoutingTableBuilder.build()); csBuilder.metaData(metaDataBuilder); @@ -206,7 +205,7 @@ public class TriggeredWatchStoreTests extends ESTestCase { hit.sourceRef(source); hits = new SearchHits(new SearchHit[]{hit}, 1, 1.0f); SearchResponse searchResponse2 = new SearchResponse( - new InternalSearchResponse(hits, null, null, null, false, null, 1), "_scrollId1", 1, 1, 0, 1, null, null); + new InternalSearchResponse(hits, null, null, null, false, null, 1), "_scrollId1", 1, 1, 0, 1, null, null); SearchResponse searchResponse3 = new SearchResponse(InternalSearchResponse.empty(), "_scrollId2", 1, 1, 0, 1, null, null); doAnswer(invocation -> { @@ -229,7 +228,7 @@ public class TriggeredWatchStoreTests extends ESTestCase { when(client.clearScroll(any())).thenReturn(clearScrollResponseFuture); clearScrollResponseFuture.onResponse(new ClearScrollResponse(true, 1)); - assertThat(triggeredWatchStore.validate(cs), is(true)); + assertThat(TriggeredWatchStore.validate(cs), is(true)); DateTime now = DateTime.now(UTC); ScheduleTriggerEvent triggerEvent = new ScheduleTriggerEvent(now, now); @@ -260,85 +259,86 @@ public class TriggeredWatchStoreTests extends ESTestCase { // the elasticsearch migration helper is doing reindex using aliases, so we have to // make sure that the watch store supports a single alias pointing to the watch index - public void testLoadStoreAsAlias() throws Exception { + public void testLoadStoreAsAlias() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); MetaData.Builder metaDataBuilder = MetaData.builder(); metaDataBuilder.put(IndexMetaData.builder("triggered-watches-alias").settings(indexSettings) - .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); final Index index = metaDataBuilder.get("triggered-watches-alias").getIndex(); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); ShardId shardId = new ShardId(index, 0); indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) - .build()); + .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); indexRoutingTableBuilder.addReplica(); routingTableBuilder.add(indexRoutingTableBuilder.build()); csBuilder.metaData(metaDataBuilder); csBuilder.routingTable(routingTableBuilder.build()); ClusterState cs = csBuilder.build(); - assertThat(triggeredWatchStore.validate(cs), is(true)); + assertThat(TriggeredWatchStore.validate(cs), is(true)); } // the elasticsearch migration helper is doing reindex using aliases, so we have to // make sure that the watch store supports only a single index in an alias - public void testLoadingFailsWithTwoAliases() throws Exception { + public void testLoadingFailsWithTwoAliases() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); MetaData.Builder metaDataBuilder = MetaData.builder(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); metaDataBuilder.put(IndexMetaData.builder("triggered-watches-alias").settings(indexSettings) - .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); metaDataBuilder.put(IndexMetaData.builder("whatever").settings(indexSettings) - .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); final Index index = metaDataBuilder.get("triggered-watches-alias").getIndex(); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(index, 0)) - .addShard(TestShardRouting.newShardRouting("triggered-watches-alias", 0, "_node_id", null, true, ShardRoutingState.STARTED)) - .build()); + .addShard(TestShardRouting.newShardRouting("triggered-watches-alias", 0, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); indexRoutingTableBuilder.addReplica(); final Index otherIndex = metaDataBuilder.get("whatever").getIndex(); IndexRoutingTable.Builder otherIndexRoutingTableBuilder = IndexRoutingTable.builder(otherIndex); otherIndexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(index, 0)) - .addShard(TestShardRouting.newShardRouting("whatever", 0, "_node_id", null, true, ShardRoutingState.STARTED)) - .build()); + .addShard(TestShardRouting.newShardRouting("whatever", 0, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); csBuilder.metaData(metaDataBuilder); csBuilder.routingTable(routingTableBuilder.build()); ClusterState cs = csBuilder.build(); - assertThat(triggeredWatchStore.validate(cs), is(false)); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> TriggeredWatchStore.validate(cs)); + assertThat(e.getMessage(), is("Alias [.triggered_watches] points to more than one index")); } // this is a special condition that could lead to an NPE in earlier versions - public void testTriggeredWatchesIndexIsClosed() throws Exception { + public void testTriggeredWatchesIndexIsClosed() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); MetaData.Builder metaDataBuilder = MetaData.builder(); metaDataBuilder.put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME) - .settings(indexSettings) - .state(IndexMetaData.State.CLOSE)); + .settings(indexSettings) + .state(IndexMetaData.State.CLOSE)); csBuilder.metaData(metaDataBuilder); - assertThat(triggeredWatchStore.validate(csBuilder.build()), is(false)); + assertThat(TriggeredWatchStore.validate(csBuilder.build()), is(false)); } - public void testTriggeredWatchesIndexDoesNotExistOnStartup() throws Exception { + public void testTriggeredWatchesIndexDoesNotExistOnStartup() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); ClusterState cs = csBuilder.build(); - assertThat(triggeredWatchStore.validate(cs), is(true)); + assertThat(TriggeredWatchStore.validate(cs), is(true)); Watch watch = mock(Watch.class); triggeredWatchStore.findTriggeredWatches(Collections.singletonList(watch), cs); verifyZeroInteractions(client); } - public void testIndexNotFoundButInMetaData() throws Exception { + public void testIndexNotFoundButInMetaData() { ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); MetaData.Builder metaDataBuilder = MetaData.builder() - .put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(indexSettings)); + .put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(indexSettings)); csBuilder.metaData(metaDataBuilder); ClusterState cs = csBuilder.build(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java index f2a0f4c311a..8f1cce93055 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.xpack.core.watcher.history.HistoryStoreField.get import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.core.IsEqual.equalTo; import static org.joda.time.DateTimeZone.UTC; @@ -64,7 +63,6 @@ public class HistoryStoreTests extends ESTestCase { when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); historyStore = new HistoryStore(Settings.EMPTY, client); - historyStore.start(); } public void testPut() throws Exception { @@ -80,7 +78,7 @@ public class HistoryStoreTests extends ESTestCase { IndexRequest request = (IndexRequest) invocation.getArguments()[0]; PlainActionFuture indexFuture = PlainActionFuture.newFuture(); if (request.id().equals(wid.value()) && request.type().equals(HistoryStore.DOC_TYPE) && request.opType() == OpType.CREATE - && request.index().equals(index)) { + && request.index().equals(index)) { indexFuture.onResponse(indexResponse); } else { indexFuture.onFailure(new ElasticsearchException("test issue")); @@ -92,32 +90,16 @@ public class HistoryStoreTests extends ESTestCase { verify(client).index(any()); } - public void testPutStopped() throws Exception { - Wid wid = new Wid("_name", new DateTime(0, UTC)); - ScheduleTriggerEvent event = new ScheduleTriggerEvent(wid.watchId(), new DateTime(0, UTC), new DateTime(0, UTC)); - WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, null, randomAlphaOfLength(10)); - - historyStore.stop(); - try { - historyStore.put(watchRecord); - fail("Expected IllegalStateException"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), is("unable to persist watch record history store is not ready")); - } finally { - historyStore.start(); - } - } - public void testIndexNameGeneration() { String indexTemplateVersion = INDEX_TEMPLATE_VERSION; assertThat(getHistoryIndexNameForTime(new DateTime(0, UTC)), - equalTo(".watcher-history-"+ indexTemplateVersion +"-1970.01.01")); + equalTo(".watcher-history-"+ indexTemplateVersion +"-1970.01.01")); assertThat(getHistoryIndexNameForTime(new DateTime(100000000000L, UTC)), - equalTo(".watcher-history-" + indexTemplateVersion + "-1973.03.03")); + equalTo(".watcher-history-" + indexTemplateVersion + "-1973.03.03")); assertThat(getHistoryIndexNameForTime(new DateTime(1416582852000L, UTC)), - equalTo(".watcher-history-" + indexTemplateVersion + "-2014.11.21")); + equalTo(".watcher-history-" + indexTemplateVersion + "-2014.11.21")); assertThat(getHistoryIndexNameForTime(new DateTime(2833165811000L, UTC)), - equalTo(".watcher-history-" + indexTemplateVersion + "-2059.10.12")); + equalTo(".watcher-history-" + indexTemplateVersion + "-2059.10.12")); } public void testStoreWithHideSecrets() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index e47727f5d10..65c5d773046 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.junit.Before; import java.util.Arrays; import java.util.List; @@ -62,11 +61,6 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase { return false; } - @Before - public void deleteAllWatchHistoryIndices() { - assertAcked(client().admin().indices().prepareDelete(HistoryStoreField.INDEX_PREFIX + "*")); - } - public void testLoadMalformedWatchRecord() throws Exception { client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_id") .setSource(jsonBuilder().startObject() @@ -141,8 +135,10 @@ public class BootStrapTests extends AbstractWatcherIntegrationTestCase { stopWatcher(); startWatcher(); - WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); - assertThat(response.getWatchesCount(), equalTo(1L)); + assertBusy(() -> { + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.getWatchesCount(), equalTo(1L)); + }); } @AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index 8cb4ac1d07b..d98d6a44daf 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -88,9 +88,7 @@ public class WatchAckTests extends AbstractWatcherIntegrationTestCase { assertThat(a1CountAfterAck, greaterThan(0L)); assertThat(a2CountAfterAck, greaterThan(0L)); - logger.info("###3"); timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); - logger.info("###4"); flush(); refresh(); @@ -107,9 +105,7 @@ public class WatchAckTests extends AbstractWatcherIntegrationTestCase { assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); refresh(); - logger.info("###5"); timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); - logger.info("###6"); GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_id").get(); assertThat(getWatchResponse.isFound(), is(true)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java index 94b356286da..4eb1d709c3b 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.common.stats.Counters; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; -import org.elasticsearch.xpack.watcher.WatcherService; +import org.elasticsearch.xpack.watcher.WatcherLifeCycleService; import org.elasticsearch.xpack.watcher.execution.ExecutionService; import org.elasticsearch.xpack.watcher.trigger.TriggerService; import org.junit.Before; @@ -59,8 +59,8 @@ public class TransportWatcherStatsActionTests extends ESTestCase { when(clusterState.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA); when(clusterService.state()).thenReturn(clusterState); - WatcherService watcherService = mock(WatcherService.class); - when(watcherService.state()).thenReturn(WatcherState.STARTED); + WatcherLifeCycleService watcherLifeCycleService = mock(WatcherLifeCycleService.class); + when(watcherLifeCycleService.getState()).thenReturn(WatcherState.STARTED); ExecutionService executionService = mock(ExecutionService.class); when(executionService.executionThreadPoolQueueSize()).thenReturn(100L); @@ -80,9 +80,9 @@ public class TransportWatcherStatsActionTests extends ESTestCase { secondTriggerServiceStats.inc("foo.bar.baz", 1024); when(triggerService.stats()).thenReturn(firstTriggerServiceStats, secondTriggerServiceStats); - action = new TransportWatcherStatsAction(Settings.EMPTY, transportService, - clusterService, threadPool, new ActionFilters(Collections.emptySet()), - new IndexNameExpressionResolver(Settings.EMPTY), watcherService, executionService, triggerService); + action = new TransportWatcherStatsAction(Settings.EMPTY, transportService, clusterService, threadPool, new + ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(Settings.EMPTY), watcherLifeCycleService, + executionService, triggerService); } public void testWatcherStats() throws Exception { @@ -92,7 +92,7 @@ public class TransportWatcherStatsActionTests extends ESTestCase { WatcherStatsResponse.Node nodeResponse2 = action.nodeOperation(new WatcherStatsRequest.Node(request, "nodeId2")); WatcherStatsResponse response = action.newResponse(request, - Arrays.asList(nodeResponse1, nodeResponse2), Collections.emptyList()); + Arrays.asList(nodeResponse1, nodeResponse2), Collections.emptyList()); assertThat(response.getWatchesCount(), is(40L)); try (XContentBuilder builder = jsonBuilder()) { @@ -107,4 +107,4 @@ public class TransportWatcherStatsActionTests extends ESTestCase { assertThat(objectPath.evaluate("stats.1.stats.whatever"), is(1)); } } -} \ No newline at end of file +} diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 6581de8fa26..35a70a0aaeb 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -20,10 +20,10 @@ import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -38,28 +38,64 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { @Before public void startWatcher() throws Exception { + // delete the watcher history to not clutter with entries from other test + assertOK(adminClient().performRequest("DELETE", ".watcher-history-*")); + assertBusy(() -> { - adminClient().performRequest("POST", "_xpack/watcher/_start"); + Response response = adminClient().performRequest("GET", "_xpack/watcher/stats"); + String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); - for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { - assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + switch (state) { + case "stopped": + Response startResponse = adminClient().performRequest("POST", "/_xpack/watcher/_start"); + boolean isAcknowledged = ObjectPath.createFromResponse(startResponse).evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); } - - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); - ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); - String state = objectPath.evaluate("stats.0.watcher_state"); - assertThat(state, is("started")); }); + + assertBusy(() -> { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + Response templateExistsResponse = adminClient().performRequest("HEAD", "_template/" + template, emptyMap()); + assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200)); + } + }); + + // TODO why does the test fail without this? relaoding isseu with the new approach? Make sure to write a unit test! + assertOK(adminClient().performRequest("PUT", ".watches")); } @After public void stopWatcher() throws Exception { assertBusy(() -> { - adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); - Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); - ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); - String state = objectPath.evaluate("stats.0.watcher_state"); - assertThat(state, is("stopped")); + Response response = adminClient().performRequest("GET", "_xpack/watcher/stats", emptyMap()); + String state = ObjectPath.createFromResponse(response).evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + // all good here, we are done + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state"); + case "starting": + throw new AssertionError("waiting until starting state reached started state to stop"); + case "started": + Response stopResponse = adminClient().performRequest("POST", "/_xpack/watcher/_stop", emptyMap()); + boolean isAcknowledged = ObjectPath.createFromResponse(stopResponse).evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } }); } @@ -99,18 +135,18 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); // input builder.startObject("input").startObject("http").startObject("request").field("host", host).field("port", port) - .field("path", "/_cluster/health") - .field("scheme", "http") - .startObject("auth").startObject("basic") - .field("username", TEST_ADMIN_USERNAME).field("password", TEST_ADMIN_PASSWORD) - .endObject().endObject() - .endObject().endObject().endObject(); + .field("path", "/_cluster/health") + .field("scheme", "http") + .startObject("auth").startObject("basic") + .field("username", TEST_ADMIN_USERNAME).field("password", TEST_ADMIN_PASSWORD) + .endObject().endObject() + .endObject().endObject().endObject(); // condition builder.startObject("condition").startObject("compare").startObject("ctx.payload.number_of_data_nodes").field("lt", 10) - .endObject().endObject().endObject(); + .endObject().endObject().endObject(); // actions builder.startObject("actions").startObject("log").startObject("logging").field("text", "executed").endObject().endObject() - .endObject(); + .endObject(); builder.endObject(); @@ -132,7 +168,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { private void indexWatch(String watchId, XContentBuilder builder) throws Exception { StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), entity); + Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, emptyMap(), entity); assertOK(response); Map responseMap = entityAsMap(response); assertThat(responseMap, hasEntry("_id", watchId)); @@ -155,14 +191,14 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { builder.startObject(); builder.startObject("query").startObject("bool").startArray("must"); builder.startObject().startObject("term").startObject("watch_id").field("value", watchId).endObject().endObject() - .endObject(); + .endObject(); builder.endArray().endObject().endObject(); builder.startArray("sort").startObject().startObject("trigger_event.triggered_time").field("order", "desc").endObject() - .endObject().endArray(); + .endObject().endArray(); builder.endObject(); StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + Response response = client().performRequest("POST", ".watcher-history-*/_search", emptyMap(), entity); ObjectPath objectPath = ObjectPath.createFromResponse(response); int totalHits = objectPath.evaluate("hits.total"); assertThat(totalHits, is(greaterThanOrEqualTo(1))); From 09b918545ddbf172aa05c600531fb75ea440ea0e Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 3 May 2018 08:48:49 +0100 Subject: [PATCH 30/30] Adds Eclipse config for xpack licence headers (#30299) Uses a filter on the copy task for the eclipse settings files to replace the token @@LICENSE_HEADER_TEXT@@ with the correct licence header from the new buildSrc/src/main/resources/license-headers directory --- build.gradle | 10 ++++++++++ .../eclipse.settings/org.eclipse.jdt.ui.prefs | 2 +- .../license-headers/elastic-license-header.txt | 5 +++++ .../license-headers/oss-license-header.txt | 18 ++++++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 buildSrc/src/main/resources/license-headers/elastic-license-header.txt create mode 100644 buildSrc/src/main/resources/license-headers/oss-license-header.txt diff --git a/build.gradle b/build.gradle index c538c0cb898..395e1f600c9 100644 --- a/build.gradle +++ b/build.gradle @@ -19,6 +19,7 @@ import org.apache.tools.ant.taskdefs.condition.Os +import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version @@ -406,10 +407,19 @@ allprojects { } } } + + File licenseHeaderFile; + if (eclipse.project.name.startsWith(':x-pack')) { + licenseHeaderFile = new File(project.rootDir, 'buildSrc/src/main/resources/license-headers/elastic-license-header.txt') + } else { + licenseHeaderFile = new File(project.rootDir, 'buildSrc/src/main/resources/license-headers/oss-license-header.txt') + } + String licenseHeader = licenseHeaderFile.getText('UTF-8').replace('\n', '\\\\n') task copyEclipseSettings(type: Copy) { // TODO: "package this up" for external builds from new File(project.rootDir, 'buildSrc/src/main/resources/eclipse.settings') into '.settings' + filter{ it.replaceAll('@@LICENSE_HEADER_TEXT@@', licenseHeader)} } // otherwise .settings is not nuked entirely task wipeEclipseSettings(type: Delete) { diff --git a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.ui.prefs b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.ui.prefs index 391a8715868..827a41bf634 100644 --- a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.ui.prefs +++ b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.ui.prefs @@ -3,4 +3,4 @@ formatter_settings_version=12 # Intellij IDEA import order org.eclipse.jdt.ui.importorder=;com;org;java;javax;\#; # License header -org.eclipse.jdt.ui.text.custom_code_templates= +org.eclipse.jdt.ui.text.custom_code_templates= diff --git a/buildSrc/src/main/resources/license-headers/elastic-license-header.txt b/buildSrc/src/main/resources/license-headers/elastic-license-header.txt new file mode 100644 index 00000000000..30371a1ef54 --- /dev/null +++ b/buildSrc/src/main/resources/license-headers/elastic-license-header.txt @@ -0,0 +1,5 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ \ No newline at end of file diff --git a/buildSrc/src/main/resources/license-headers/oss-license-header.txt b/buildSrc/src/main/resources/license-headers/oss-license-header.txt new file mode 100644 index 00000000000..d85645a12ef --- /dev/null +++ b/buildSrc/src/main/resources/license-headers/oss-license-header.txt @@ -0,0 +1,18 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ \ No newline at end of file