From 731833cfc666f0a37276944086f8eb41e012da64 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 25 Nov 2015 14:42:33 +0100 Subject: [PATCH 01/40] Fixes #14489 Do not to load fields from _source when using the `fields` option. Non stored (non existing) fields are ignored by the fields visitor when using the `fields` option. Fixes #10783 Support * wildcard to retrieve stored fields when using the `fields` option. Supported pattern styles are "xxx*", "*xxx", "*xxx*" and "xxx*yyy". --- .../index/fieldvisitor/AllFieldsVisitor.java | 37 ------ .../fieldvisitor/CustomFieldsVisitor.java | 25 +++- .../search/fetch/FetchPhase.java | 120 +++++------------- .../template/SimpleIndexTemplateIT.java | 3 +- .../metrics/AbstractGeoTestCase.java | 6 +- .../aggregations/metrics/TopHitsIT.java | 5 +- docs/reference/migration/migrate_3_0.asciidoc | 4 + .../messy/tests/SearchFieldsTests.java | 40 +++++- .../test/search/10_source_filtering.yaml | 4 +- 9 files changed, 95 insertions(+), 149 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java deleted file mode 100644 index beb7de2c756..00000000000 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/AllFieldsVisitor.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.fieldvisitor; - -import org.apache.lucene.index.FieldInfo; - -import java.io.IOException; - -/** - */ -public class AllFieldsVisitor extends FieldsVisitor { - - public AllFieldsVisitor() { - super(true); - } - - @Override - public Status needsField(FieldInfo fieldInfo) throws IOException { - return Status.YES; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java b/core/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java index 922a27c70ab..bd1fd69eb74 100644 --- a/core/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java +++ b/core/src/main/java/org/elasticsearch/index/fieldvisitor/CustomFieldsVisitor.java @@ -19,22 +19,32 @@ package org.elasticsearch.index.fieldvisitor; import org.apache.lucene.index.FieldInfo; +import org.elasticsearch.common.regex.Regex; import java.io.IOException; +import java.util.Collections; +import java.util.List; import java.util.Set; /** - * A field visitor that allows to load a selection of the stored fields. + * A field visitor that allows to load a selection of the stored fields by exact name or by pattern. + * Supported pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy". * The Uid field is always loaded. * The class is optimized for source loading as it is a common use case. */ public class CustomFieldsVisitor extends FieldsVisitor { private final Set fields; + private final List patterns; - public CustomFieldsVisitor(Set fields, boolean loadSource) { + public CustomFieldsVisitor(Set fields, List patterns, boolean loadSource) { super(loadSource); this.fields = fields; + this.patterns = patterns; + } + + public CustomFieldsVisitor(Set fields, boolean loadSource) { + this(fields, Collections.emptyList(), loadSource); } @Override @@ -42,7 +52,14 @@ public class CustomFieldsVisitor extends FieldsVisitor { if (super.needsField(fieldInfo) == Status.YES) { return Status.YES; } - - return fields.contains(fieldInfo.name) ? Status.YES : Status.NO; + if (fields.contains(fieldInfo.name)) { + return Status.YES; + } + for (String pattern : patterns) { + if (Regex.simpleMatch(pattern, fieldInfo.name)) { + return Status.YES; + } + } + return Status.NO; } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 46f97adfd6d..227141e4ddf 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -30,12 +30,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.fieldvisitor.AllFieldsVisitor; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; @@ -55,13 +55,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder; @@ -98,9 +92,7 @@ public class FetchPhase implements SearchPhase { public void execute(SearchContext context) { FieldsVisitor fieldsVisitor; Set fieldNames = null; - List extractFieldNames = null; - - boolean loadAllStored = false; + List fieldNamePatterns = null; if (!context.hasFieldNames()) { // no fields specified, default to return source if no explicit indication if (!context.hasScriptFields() && !context.hasFetchSourceContext()) { @@ -111,10 +103,6 @@ public class FetchPhase implements SearchPhase { fieldsVisitor = new FieldsVisitor(context.sourceRequested()); } else { for (String fieldName : context.fieldNames()) { - if (fieldName.equals("*")) { - loadAllStored = true; - continue; - } if (fieldName.equals(SourceFieldMapper.NAME)) { if (context.hasFetchSourceContext()) { context.fetchSourceContext().fetchSource(true); @@ -123,32 +111,28 @@ public class FetchPhase implements SearchPhase { } continue; } - MappedFieldType fieldType = context.smartNameFieldType(fieldName); - if (fieldType == null) { - // Only fail if we know it is a object field, missing paths / fields shouldn't fail. - if (context.getObjectMapper(fieldName) != null) { - throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); + if (Regex.isSimpleMatchPattern(fieldName)) { + if (fieldNamePatterns == null) { + fieldNamePatterns = new ArrayList<>(); + } + fieldNamePatterns.add(fieldName); + } else { + MappedFieldType fieldType = context.smartNameFieldType(fieldName); + if (fieldType == null) { + // Only fail if we know it is a object field, missing paths / fields shouldn't fail. + if (context.getObjectMapper(fieldName) != null) { + throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); + } } - } else if (fieldType.stored()) { if (fieldNames == null) { fieldNames = new HashSet<>(); } - fieldNames.add(fieldType.names().indexName()); - } else { - if (extractFieldNames == null) { - extractFieldNames = new ArrayList<>(); - } - extractFieldNames.add(fieldName); + fieldNames.add(fieldName); } } - if (loadAllStored) { - fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source - } else if (fieldNames != null) { - boolean loadSource = extractFieldNames != null || context.sourceRequested(); - fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource); - } else { - fieldsVisitor = new FieldsVisitor(extractFieldNames != null || context.sourceRequested()); - } + boolean loadSource = context.sourceRequested(); + fieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames, + fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, loadSource); } InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()]; @@ -163,9 +147,9 @@ public class FetchPhase implements SearchPhase { try { int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId); if (rootDocId != -1) { - searchHit = createNestedSearchHit(context, docId, subDocId, rootDocId, extractFieldNames, loadAllStored, fieldNames, subReaderContext); + searchHit = createNestedSearchHit(context, docId, subDocId, rootDocId, fieldNames, fieldNamePatterns, subReaderContext); } else { - searchHit = createSearchHit(context, fieldsVisitor, docId, subDocId, extractFieldNames, subReaderContext); + searchHit = createSearchHit(context, fieldsVisitor, docId, subDocId, subReaderContext); } } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); @@ -199,7 +183,7 @@ public class FetchPhase implements SearchPhase { return -1; } - private InternalSearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, List extractFieldNames, LeafReaderContext subReaderContext) { + private InternalSearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, LeafReaderContext subReaderContext) { loadStoredFields(context, subReaderContext, fieldsVisitor, subDocId); fieldsVisitor.postProcess(context.mapperService()); @@ -219,45 +203,24 @@ public class FetchPhase implements SearchPhase { typeText = documentMapper.typeText(); } InternalSearchHit searchHit = new InternalSearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields); - - // go over and extract fields that are not mapped / stored + // Set _source if requested. SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, subDocId); if (fieldsVisitor.source() != null) { sourceLookup.setSource(fieldsVisitor.source()); } - if (extractFieldNames != null) { - for (String extractFieldName : extractFieldNames) { - List values = context.lookup().source().extractRawValues(extractFieldName); - if (!values.isEmpty()) { - if (searchHit.fieldsOrNull() == null) { - searchHit.fields(new HashMap(2)); - } - - SearchHitField hitField = searchHit.fields().get(extractFieldName); - if (hitField == null) { - hitField = new InternalSearchHitField(extractFieldName, new ArrayList<>(2)); - searchHit.fields().put(extractFieldName, hitField); - } - for (Object value : values) { - hitField.values().add(value); - } - } - } - } - return searchHit; } - private InternalSearchHit createNestedSearchHit(SearchContext context, int nestedTopDocId, int nestedSubDocId, int rootSubDocId, List extractFieldNames, boolean loadAllStored, Set fieldNames, LeafReaderContext subReaderContext) throws IOException { + private InternalSearchHit createNestedSearchHit(SearchContext context, int nestedTopDocId, int nestedSubDocId, int rootSubDocId, Set fieldNames, List fieldNamePatterns, LeafReaderContext subReaderContext) throws IOException { // Also if highlighting is requested on nested documents we need to fetch the _source from the root document, // otherwise highlighting will attempt to fetch the _source from the nested doc, which will fail, // because the entire _source is only stored with the root document. - final FieldsVisitor rootFieldsVisitor = new FieldsVisitor(context.sourceRequested() || extractFieldNames != null || context.highlight() != null); + final FieldsVisitor rootFieldsVisitor = new FieldsVisitor(context.sourceRequested() || context.highlight() != null); loadStoredFields(context, subReaderContext, rootFieldsVisitor, rootSubDocId); rootFieldsVisitor.postProcess(context.mapperService()); - Map searchFields = getSearchFields(context, nestedSubDocId, loadAllStored, fieldNames, subReaderContext); + Map searchFields = getSearchFields(context, nestedSubDocId, fieldNames, fieldNamePatterns, subReaderContext); DocumentMapper documentMapper = context.mapperService().documentMapper(rootFieldsVisitor.uid().type()); SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); @@ -299,39 +262,14 @@ public class FetchPhase implements SearchPhase { } InternalSearchHit searchHit = new InternalSearchHit(nestedTopDocId, rootFieldsVisitor.uid().id(), documentMapper.typeText(), nestedIdentity, searchFields); - if (extractFieldNames != null) { - for (String extractFieldName : extractFieldNames) { - List values = context.lookup().source().extractRawValues(extractFieldName); - if (!values.isEmpty()) { - if (searchHit.fieldsOrNull() == null) { - searchHit.fields(new HashMap(2)); - } - - SearchHitField hitField = searchHit.fields().get(extractFieldName); - if (hitField == null) { - hitField = new InternalSearchHitField(extractFieldName, new ArrayList<>(2)); - searchHit.fields().put(extractFieldName, hitField); - } - for (Object value : values) { - hitField.values().add(value); - } - } - } - } - return searchHit; } - private Map getSearchFields(SearchContext context, int nestedSubDocId, boolean loadAllStored, Set fieldNames, LeafReaderContext subReaderContext) { + private Map getSearchFields(SearchContext context, int nestedSubDocId, Set fieldNames, List fieldNamePatterns, LeafReaderContext subReaderContext) { Map searchFields = null; if (context.hasFieldNames() && !context.fieldNames().isEmpty()) { - FieldsVisitor nestedFieldsVisitor = null; - if (loadAllStored) { - nestedFieldsVisitor = new AllFieldsVisitor(); - } else if (fieldNames != null) { - nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames, false); - } - + FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames, + fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, false); if (nestedFieldsVisitor != null) { loadStoredFields(context, subReaderContext, nestedFieldsVisitor, nestedSubDocId); nestedFieldsVisitor.postProcess(context.mapperService()); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index b9da71d75aa..ca2025ced1b 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -118,7 +118,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { assertHitCount(searchResponse, 1); assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1")); - assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2")); // this will still be loaded because of the source feature + // field2 is not stored. + assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue()); client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index f2acc7c83a8..390e0cf5473 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -154,7 +154,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { .endObject())); } assertAcked(prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2)) - .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed")); + .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long,store=true", "tag", "type=string,index=not_analyzed")); for (int i = 0; i < 2000; i++) { singleVal = singleValues[i % numUniqueGeoPoints]; @@ -196,8 +196,8 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { SearchHitField hitField = searchHit.field(NUMBER_FIELD_NAME); assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1)); - Integer value = hitField.getValue(); - assertThat("Hit " + i + " has wrong value", value, equalTo(i)); + Long value = hitField.getValue(); + assertThat("Hit " + i + " has wrong value", value.intValue(), equalTo(i)); } assertThat(totalHits, equalTo(2000l)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index f9c8c53bed3..65e71fe9c05 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -532,8 +532,8 @@ public class TopHitsIT extends ESIntegTestCase { topHits("hits").setSize(1) .highlighter(new HighlightBuilder().field("text")) .setExplain(true) - .addFieldDataField("field1") .addField("text") + .addFieldDataField("field1") .addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap())) .setFetchSource("text", null) .setVersion(true) @@ -569,8 +569,7 @@ public class TopHitsIT extends ESIntegTestCase { SearchHitField field = hit.field("field1"); assertThat(field.getValue().toString(), equalTo("5")); - field = hit.field("text"); - assertThat(field.getValue().toString(), equalTo("some text to entertain")); + assertThat(hit.getSource().get("text").toString(), equalTo("some text to entertain")); field = hit.field("script"); assertThat(field.getValue().toString(), equalTo("5")); diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 08bcb380985..822d8864f71 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -468,3 +468,7 @@ response is output by default. Finally, the API for org.elasticsearch.monitor.os.OsStats has changed. The `getLoadAverage` method has been removed. The value for this can now be obtained from `OsStats.Cpu#getLoadAverage`. Additionally, the recent CPU usage can be obtained from `OsStats.Cpu#getPercent`. + +=== Fields option +Only stored fields are retrievable with this option. +The fields option won't be able to load non stored fields from _source anymore. diff --git a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java index 632f93d870f..8153d207b7c 100644 --- a/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java +++ b/plugins/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java @@ -108,12 +108,12 @@ public class SearchFieldsTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); - // field2 is not stored, check that it gets extracted from source + // field2 is not stored, check that it is not extracted from source. searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); - assertThat(searchResponse.getHits().getAt(0).fields().get("field2").value().toString(), equalTo("value2")); + assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(0)); + assertThat(searchResponse.getHits().getAt(0).fields().get("field2"), nullValue()); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); @@ -121,6 +121,34 @@ public class SearchFieldsTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").execute().actionGet(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); + assertThat(searchResponse.getHits().hits().length, equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); + + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").addField("field1").addField("field2").execute().actionGet(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); + assertThat(searchResponse.getHits().hits().length, equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2)); + assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); + assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); + + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field*").execute().actionGet(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); + assertThat(searchResponse.getHits().hits().length, equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2)); + assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); + assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("f*3").execute().actionGet(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); + assertThat(searchResponse.getHits().hits().length, equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); + assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); @@ -439,8 +467,7 @@ public class SearchFieldsTests extends ESIntegTestCase { .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - assertThat(searchResponse.getHits().getAt(0).field("field1").isMetadataField(), equalTo(false)); - assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value")); + assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue()); assertThat(searchResponse.getHits().getAt(0).field("_routing").isMetadataField(), equalTo(true)); assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1")); } @@ -647,8 +674,7 @@ public class SearchFieldsTests extends ESIntegTestCase { Map fields = response.getHits().getAt(0).getFields(); - assertThat(fields.get("field1").isMetadataField(), equalTo(false)); - assertThat(fields.get("field1").getValue().toString(), equalTo("value")); + assertThat(fields.get("field1"), nullValue()); assertThat(fields.get("_routing").isMetadataField(), equalTo(true)); assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); assertThat(fields.get("_timestamp").isMetadataField(), equalTo(true)); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index b49d659cec3..e0ac2aea2df 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -79,7 +79,6 @@ body: fields: [ include.field2 ] query: { match_all: {} } - - match: { hits.hits.0.fields: { include.field2 : [v2] }} - is_false: hits.hits.0._source - do: @@ -87,7 +86,7 @@ body: fields: [ include.field2, _source ] query: { match_all: {} } - - match: { hits.hits.0.fields: { include.field2 : [v2] }} + - match: { hits.hits.0._source.include.field2: v2 } - is_true: hits.hits.0._source @@ -95,4 +94,3 @@ search: fielddata_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } - From a0fe93fa6798e6b919767d0222d59b91d1864c17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 30 Nov 2015 11:07:09 +0100 Subject: [PATCH 02/40] Tests: Correction in AbstractShapeBuilderTestCase Removed check that two shapes that are different according to equals() have different hashCode since that is not required by the contract of hashCode. --- .../common/geo/builders/AbstractShapeBuilderTestCase.java | 1 - 1 file changed, 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index d1f24bfb7d9..f15a731e86e 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -111,7 +111,6 @@ public abstract class AbstractShapeBuilderTestCase exte assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(), equalTo(firstShape.hashCode())); assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape))); - assertThat("different shapes should have different hashcode", mutate(firstShape).hashCode(), not(equalTo(firstShape.hashCode()))); SB secondShape = copyShape(firstShape); assertTrue("shape is not equal to self", secondShape.equals(secondShape)); From a4e22b44e49ac01d9011d50442c5c929597204a2 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 30 Nov 2015 11:47:17 +0100 Subject: [PATCH 03/40] add java-api doc about shading / embedding Two new sections added * Dealing with JAR dependency conflicts * Embedding jar with dependencies Closes #15071. --- docs/java-api/index.asciidoc | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index 37f56df0f9f..f976ebc2426 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -34,6 +34,62 @@ For example, you can define the latest version in your `pom.xml` file: -------------------------------------------------- +== Dealing with JAR dependency conflicts + +If you want to use Elasticsearch in your Java application, you may have to deal with version conflicts with third party +dependencies like Guava and Joda. For instance, perhaps Elasticsearch uses Joda 2.8, while your code uses Joda 2.1. + +You have two choices: + +* The simplest solution is to upgrade. Newer module versions are likely to have fixed old bugs. +The further behind you fall, the harder it will be to upgrade later. Of course, it is possible that you are using a +third party dependency that in turn depends on an outdated version of a package, which prevents you from upgrading. + +* The second option is to relocate the troublesome dependencies and to shade them either with your own application +or with Elasticsearch and any plugins needed by the Elasticsearch client. + +The https://www.elastic.co/blog/to-shade-or-not-to-shade["Too shade or not to shade" blog post] describes +all the steps for doing so. + +== Embedding jar with dependencies + +If you want to create a single jar containing your application and all dependencies, you should not +use `maven-assembly-plugin` for that because it can not deal with `META-INF/services` structure which is +required by Lucene jars. + +Instead, you can use `maven-shade-plugin` and configure it as follow: + +[source,xml] +-------------------------------------------------- + + org.apache.maven.plugins + maven-shade-plugin + 2.4.1 + + + package + shade + + + + + + + + +-------------------------------------------------- + +Note that if you have a `main` class you want to automatically call when running `java -jar yourjar.jar`, just add +it to the `transformers`: + +[source,xml] +-------------------------------------------------- + + org.elasticsearch.demo.Generate + +-------------------------------------------------- + + == Deploying in JBoss EAP6 module Elasticsearch and Lucene classes need to be in the same JBoss module. From 18e969e161e6c18a80658ca5fe1a2b1f8a9a187c Mon Sep 17 00:00:00 2001 From: Jochen Schalanda Date: Mon, 30 Nov 2015 14:36:52 +0100 Subject: [PATCH 04/40] Add simple EditorConfig The EditorConfig file applies the formatting rules described in CONTRIBUTING.md. --- .editorconfig | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000000..9d4bfbf55d3 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,10 @@ +# EditorConfig: http://editorconfig.org/ + +root = true + +[*.java] +charset = utf-8 +indent_style = space +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true From f0f89e708d332cd15779ae85b111b08c08694f3d Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 23 Oct 2015 21:11:15 +0200 Subject: [PATCH 05/40] Split cluster state update tasks into roles This commit splits cluster state update tasks into roles. Those roles are: - task info - task configuration - task executor - task listener All tasks that have the same executor will be executed in batches. This removes the need for local batching as was previously in MetaDataMappingService. Additionally, this commit reintroduces batching on mapping update calls. Relates #13627 --- .../health/TransportClusterHealthAction.java | 2 +- .../TransportClusterRerouteAction.java | 2 +- .../TransportClusterUpdateSettingsAction.java | 6 +- .../AckedClusterStateTaskListener.java | 54 ++ .../cluster/AckedClusterStateUpdateTask.java | 8 +- .../elasticsearch/cluster/ClusterService.java | 11 +- .../cluster/ClusterStateTaskConfig.java | 64 +++ .../cluster/ClusterStateTaskExecutor.java | 53 ++ .../cluster/ClusterStateTaskListener.java | 48 ++ .../cluster/ClusterStateUpdateTask.java | 49 +- .../action/shard/ShardStateAction.java | 10 +- .../metadata/MetaDataCreateIndexService.java | 12 +- .../metadata/MetaDataDeleteIndexService.java | 3 +- .../metadata/MetaDataIndexAliasesService.java | 2 +- .../metadata/MetaDataIndexStateService.java | 4 +- .../MetaDataIndexTemplateService.java | 5 +- .../metadata/MetaDataMappingService.java | 531 ++++++++---------- .../MetaDataUpdateSettingsService.java | 19 +- .../cluster/routing/RoutingService.java | 2 +- .../service/InternalClusterService.java | 476 +++++++++------- .../discovery/zen/NodeJoinController.java | 10 +- .../discovery/zen/ZenDiscovery.java | 16 +- .../cluster/ClusterServiceIT.java | 30 +- .../DiscoveryWithServiceDisruptionsIT.java | 41 +- .../store/IndicesStoreIntegrationIT.java | 16 +- .../AbstractSnapshotIntegTestCase.java | 9 +- .../test/cluster/NoopClusterService.java | 5 +- .../test/cluster/TestClusterService.java | 53 +- .../BlockClusterStateProcessing.java | 2 +- .../SlowClusterStateProcessing.java | 2 +- 30 files changed, 856 insertions(+), 689 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 0f0b2680f61..f1cc59ba760 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -74,7 +74,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { if (request.waitForEvents() != null) { final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { @Override public ClusterState execute(ClusterState currentState) { return currentState; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index f916c37aec2..d7ec84fb7a5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -68,7 +68,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener) { - clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { private volatile ClusterState clusterStateToSend; private volatile RoutingExplanations explanations; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index b0934781dcd..73d14a2bb11 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -91,7 +91,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct final Settings.Builder transientUpdates = Settings.settingsBuilder(); final Settings.Builder persistentUpdates = Settings.settingsBuilder(); - clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("cluster_update_settings", + new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { private volatile boolean changed = false; @@ -132,7 +133,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct // in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible // to the components until the ClusterStateListener instances have been invoked, but are visible after // the first update task has been completed. - clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override public boolean mustAck(DiscoveryNode discoveryNode) { diff --git a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java new file mode 100644 index 00000000000..cdd9b2204ff --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; + +public interface AckedClusterStateTaskListener extends ClusterStateTaskListener { + + /** + * Called to determine which nodes the acknowledgement is expected from + * + * @param discoveryNode a node + * @return true if the node is expected to send ack back, false otherwise + */ + boolean mustAck(DiscoveryNode discoveryNode); + + /** + * Called once all the nodes have acknowledged the cluster state update request. Must be + * very lightweight execution, since it gets executed on the cluster service thread. + * + * @param t optional error that might have been thrown + */ + void onAllNodesAcked(@Nullable Throwable t); + + /** + * Called once the acknowledgement timeout defined by + * {@link AckedClusterStateUpdateTask#ackTimeout()} has expired + */ + void onAckTimeout(); + + /** + * Acknowledgement timeout, maximum time interval to wait for acknowledgements + */ + TimeValue ackTimeout(); + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java index 21c6cd5032a..b833f6e1879 100644 --- a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java @@ -22,18 +22,24 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ack.AckedRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; /** * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when * all the nodes have acknowledged a cluster state update request */ -public abstract class AckedClusterStateUpdateTask extends ClusterStateUpdateTask { +public abstract class AckedClusterStateUpdateTask extends ClusterStateUpdateTask implements AckedClusterStateTaskListener { private final ActionListener listener; private final AckedRequest request; protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener listener) { + this(Priority.NORMAL, request, listener); + } + + protected AckedClusterStateUpdateTask(Priority priority, AckedRequest request, ActionListener listener) { + super(priority); this.listener = listener; this.request = request; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java index 8a3a287bac8..d3985bd2e78 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.unit.TimeValue; @@ -101,12 +100,16 @@ public interface ClusterService extends LifecycleComponent { void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); /** - * Submits a task that will update the cluster state. + * Submits a task that will update the cluster state, using the given config. result will communicated + * to the given listener */ - void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask); + void submitStateUpdateTask(final String source, final T task, + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor, + final ClusterStateTaskListener listener); /** - * Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}). + * Submits a task that will update the cluster state; */ void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java new file mode 100644 index 00000000000..662095798af --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.unit.TimeValue; + +public interface ClusterStateTaskConfig { + + /** + * If the cluster state update task wasn't processed by the provided timeout, call + * {@link ClusterStateTaskListener#onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default). + */ + @Nullable + TimeValue timeout(); + + Priority priority(); + + static ClusterStateTaskConfig build(Priority priority) { + return new Basic(priority, null); + } + + static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) { + return new Basic(priority, timeout); + } + + + class Basic implements ClusterStateTaskConfig { + final TimeValue timeout; + final Priority priority; + + public Basic(Priority priority, TimeValue timeout) { + this.timeout = timeout; + this.priority = priority; + } + + @Override + public TimeValue timeout() { + return timeout; + } + + @Override + public Priority priority() { + return priority; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java new file mode 100644 index 00000000000..861b924c52e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import java.util.Arrays; +import java.util.List; + +public interface ClusterStateTaskExecutor { + /** + * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state + * should be changed. + */ + Result execute(ClusterState currentState, List tasks) throws Exception; + + /** + * indicates whether this task should only run if current node is master + */ + default boolean runOnlyOnMaster() { + return true; + } + + class Result { + final public ClusterState resultingState; + final public List failures; + + public Result(ClusterState resultingState, int numberOfTasks) { + this.resultingState = resultingState; + failures = Arrays.asList(new Throwable[numberOfTasks]); + } + + public Result(ClusterState resultingState, List failures) { + this.resultingState = resultingState; + this.failures = failures; + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java new file mode 100644 index 00000000000..16945d91971 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; + +import java.util.List; + +public interface ClusterStateTaskListener { + + /** + * A callback called when execute fails. + */ + void onFailure(String source, Throwable t); + + /** + * called when the task was rejected because the local node is no longer master + */ + default void onNoLongerMaster(String source) { + onFailure(source, new EsRejectedExecutionException("no longer master. source: [" + source + "]")); + } + + /** + * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed + * properly by all listeners. + */ + default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + } + + ; + +} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 7fef94d5c17..17c4635c7de 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -20,13 +20,31 @@ package org.elasticsearch.cluster; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; + +import java.util.List; /** * A task that can update the cluster state. */ -abstract public class ClusterStateUpdateTask { +abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { + + final private Priority priority; + + public ClusterStateUpdateTask() { + this(Priority.NORMAL); + } + + public ClusterStateUpdateTask(Priority priority) { + this.priority = priority; + } + + @Override + final public Result execute(ClusterState currentState, List tasks) throws Exception { + ClusterState result = execute(currentState); + return new Result(result, tasks.size()); + } /** * Update the cluster state based on the current state. Return the *same instance* if no state @@ -39,28 +57,6 @@ abstract public class ClusterStateUpdateTask { */ abstract public void onFailure(String source, Throwable t); - - /** - * indicates whether this task should only run if current node is master - */ - public boolean runOnlyOnMaster() { - return true; - } - - /** - * called when the task was rejected because the local node is no longer master - */ - public void onNoLongerMaster(String source) { - onFailure(source, new NotMasterException("no longer master. source: [" + source + "]")); - } - - /** - * Called when the result of the {@link #execute(ClusterState)} have been processed - * properly by all listeners. - */ - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } - /** * If the cluster state update task wasn't processed by the provided timeout, call * {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default). @@ -70,5 +66,8 @@ abstract public class ClusterStateUpdateTask { return null; } - + @Override + public Priority priority() { + return priority; + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 83897baa50d..1b43a33627b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -144,7 +144,8 @@ public class ShardStateAction extends AbstractComponent { private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); failedShardQueue.add(shardRoutingEntry); - clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", + new ClusterStateUpdateTask(Priority.HIGH) { @Override public ClusterState execute(ClusterState currentState) { @@ -198,8 +199,13 @@ public class ShardStateAction extends AbstractComponent { // process started events as fast as possible, to make shards available startedShardsQueue.add(shardRoutingEntry); - clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT, + clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", new ClusterStateUpdateTask() { + @Override + public Priority priority() { + return Priority.URGENT; + } + @Override public ClusterState execute(ClusterState currentState) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 5b83870cc3c..d3ba811a6e5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -170,12 +170,12 @@ public class MetaDataCreateIndexService extends AbstractComponent { updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX); request.settings(updatedSettingsBuilder.build()); - clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { - return new ClusterStateUpdateResponse(acknowledged); - } + clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + @Override + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); + } @Override public ClusterState execute(ClusterState currentState) throws Exception { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index fc87bae6507..54c014fb4ed 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -39,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.Collection; -import java.util.Locale; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -71,7 +70,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { Collection indices = Arrays.asList(request.indices); final DeleteIndexListener listener = new DeleteIndexListener(userListener); - clusterService.submitStateUpdateTask("delete-index " + indices, Priority.URGENT, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) { @Override public TimeValue timeout() { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 80ff68e6cf9..b13f9711bef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -62,7 +62,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener listener) { - clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index b1c9f0749b0..1fa1b702f66 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -76,7 +76,7 @@ public class MetaDataIndexStateService extends AbstractComponent { } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); @@ -140,7 +140,7 @@ public class MetaDataIndexStateService extends AbstractComponent { } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 13823e8ebdd..3d7d19b27b9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -56,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } public void removeTemplates(final RemoveRequest request, final RemoveListener listener) { - clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", new ClusterStateUpdateTask(Priority.URGENT) { @Override public TimeValue timeout() { @@ -143,7 +143,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } final IndexTemplateMetaData template = templateBuilder.build(); - clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", + new ClusterStateUpdateTask(Priority.URGENT) { @Override public TimeValue timeout() { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 725f06d9fae..215dde061db 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -22,17 +22,17 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -44,6 +44,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; +import java.io.IOException; import java.util.*; /** * Service responsible for submitting mapping changes @@ -53,13 +54,11 @@ public class MetaDataMappingService extends AbstractComponent { private final ClusterService clusterService; private final IndicesService indicesService; - // the mutex protect all the refreshOrUpdate variables! - private final Object refreshOrUpdateMutex = new Object(); - private final List refreshOrUpdateQueue = new ArrayList<>(); - private long refreshOrUpdateInsertOrder; - private long refreshOrUpdateProcessedInsertOrder; + final ClusterStateTaskExecutor refreshExectuor = new RefreshTaskExecutor(); + final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor(); private final NodeServicesProvider nodeServicesProvider; + @Inject public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { super(settings); @@ -68,37 +67,23 @@ public class MetaDataMappingService extends AbstractComponent { this.nodeServicesProvider = nodeServicesProvider; } - static class MappingTask { + static class RefreshTask { final String index; final String indexUUID; - - MappingTask(String index, final String indexUUID) { - this.index = index; - this.indexUUID = indexUUID; - } - } - - static class RefreshTask extends MappingTask { final String[] types; RefreshTask(String index, final String indexUUID, String[] types) { - super(index, indexUUID); + this.index = index; + this.indexUUID = indexUUID; this.types = types; } } - static class UpdateTask extends MappingTask { - final String type; - final CompressedXContent mappingSource; - final String nodeId; // null fr unknown - final ActionListener listener; - - UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener listener) { - super(index, indexUUID); - this.type = type; - this.mappingSource = mappingSource; - this.nodeId = nodeId; - this.listener = listener; + class RefreshTaskExecutor implements ClusterStateTaskExecutor { + @Override + public Result execute(ClusterState currentState, List tasks) throws Exception { + ClusterState newClusterState = executeRefresh(currentState, tasks); + return new Result(newClusterState, tasks.size()); } } @@ -107,39 +92,19 @@ public class MetaDataMappingService extends AbstractComponent { * as possible so we won't create the same index all the time for example for the updates on the same mapping * and generate a single cluster change event out of all of those. */ - Tuple> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception { - final List allTasks = new ArrayList<>(); - - synchronized (refreshOrUpdateMutex) { - if (refreshOrUpdateQueue.isEmpty()) { - return Tuple.tuple(currentState, allTasks); - } - - // we already processed this task in a bulk manner in a previous cluster event, simply ignore - // it so we will let other tasks get in and processed ones, we will handle the queued ones - // later on in a subsequent cluster state event - if (insertionOrder < refreshOrUpdateProcessedInsertOrder) { - return Tuple.tuple(currentState, allTasks); - } - - allTasks.addAll(refreshOrUpdateQueue); - refreshOrUpdateQueue.clear(); - - refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder; - } - + ClusterState executeRefresh(final ClusterState currentState, final List allTasks) throws Exception { if (allTasks.isEmpty()) { - return Tuple.tuple(currentState, allTasks); + return currentState; } // break down to tasks per index, so we can optimize the on demand index service creation // to only happen for the duration of a single index processing of its respective events - Map> tasksPerIndex = new HashMap<>(); - for (MappingTask task : allTasks) { + Map> tasksPerIndex = new HashMap<>(); + for (RefreshTask task : allTasks) { if (task.index == null) { logger.debug("ignoring a mapping task of type [{}] with a null index.", task); } - List indexTasks = tasksPerIndex.get(task.index); + List indexTasks = tasksPerIndex.get(task.index); if (indexTasks == null) { indexTasks = new ArrayList<>(); tasksPerIndex.put(task.index, indexTasks); @@ -150,7 +115,7 @@ public class MetaDataMappingService extends AbstractComponent { boolean dirty = false; MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - for (Map.Entry> entry : tasksPerIndex.entrySet()) { + for (Map.Entry> entry : tasksPerIndex.entrySet()) { String index = entry.getKey(); IndexMetaData indexMetaData = mdBuilder.get(index); if (indexMetaData == null) { @@ -160,9 +125,9 @@ public class MetaDataMappingService extends AbstractComponent { } // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node - List allIndexTasks = entry.getValue(); - List tasks = new ArrayList<>(); - for (MappingTask task : allIndexTasks) { + List allIndexTasks = entry.getValue(); + List tasks = new ArrayList<>(); + for (RefreshTask task : allIndexTasks) { if (!indexMetaData.isSameUUID(task.indexUUID)) { logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); continue; @@ -178,12 +143,8 @@ public class MetaDataMappingService extends AbstractComponent { indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); removeIndex = true; Set typesToIntroduce = new HashSet<>(); - for (MappingTask task : tasks) { - if (task instanceof UpdateTask) { - typesToIntroduce.add(((UpdateTask) task).type); - } else if (task instanceof RefreshTask) { - Collections.addAll(typesToIntroduce, ((RefreshTask) task).types); - } + for (RefreshTask task : tasks) { + Collections.addAll(typesToIntroduce, task.types); } for (String type : typesToIntroduce) { // only add the current relevant mapping (if exists) @@ -209,80 +170,42 @@ public class MetaDataMappingService extends AbstractComponent { } if (!dirty) { - return Tuple.tuple(currentState, allTasks); + return currentState; } - return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); } - private boolean processIndexMappingTasks(List tasks, IndexService indexService, IndexMetaData.Builder builder) { + private boolean processIndexMappingTasks(List tasks, IndexService indexService, IndexMetaData.Builder builder) { boolean dirty = false; String index = indexService.index().name(); // keep track of what we already refreshed, no need to refresh it again... Set processedRefreshes = new HashSet<>(); - for (MappingTask task : tasks) { - if (task instanceof RefreshTask) { - RefreshTask refreshTask = (RefreshTask) task; - try { - List updatedTypes = new ArrayList<>(); - for (String type : refreshTask.types) { - if (processedRefreshes.contains(type)) { - continue; - } - DocumentMapper mapper = indexService.mapperService().documentMapper(type); - if (mapper == null) { - continue; - } - if (!mapper.mappingSource().equals(builder.mapping(type).source())) { - updatedTypes.add(type); - builder.putMapping(new MappingMetaData(mapper)); - } - processedRefreshes.add(type); - } - - if (updatedTypes.isEmpty()) { + for (RefreshTask refreshTask : tasks) { + try { + List updatedTypes = new ArrayList<>(); + for (String type : refreshTask.types) { + if (processedRefreshes.contains(type)) { continue; } - - logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes); - dirty = true; - } catch (Throwable t) { - logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types); - } - } else if (task instanceof UpdateTask) { - UpdateTask updateTask = (UpdateTask) task; - try { - String type = updateTask.type; - CompressedXContent mappingSource = updateTask.mappingSource; - - MappingMetaData mappingMetaData = builder.mapping(type); - if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) { - logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type); + DocumentMapper mapper = indexService.mapperService().documentMapper(type); + if (mapper == null) { continue; } - - DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true); + if (!mapper.mappingSource().equals(builder.mapping(type).source())) { + updatedTypes.add(type); + builder.putMapping(new MappingMetaData(mapper)); + } processedRefreshes.add(type); - - // if we end up with the same mapping as the original once, ignore - if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) { - logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type); - continue; - } - - // build the updated mapping source - if (logger.isDebugEnabled()) { - logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource()); - } else if (logger.isInfoEnabled()) { - logger.info("[{}] update_mapping [{}] (dynamic)", index, type); - } - - builder.putMapping(new MappingMetaData(updatedMapper)); - dirty = true; - } catch (Throwable t) { - logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type); } - } else { - logger.warn("illegal state, got wrong mapping task type [{}]", task); + + if (updatedTypes.isEmpty()) { + continue; + } + + logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes); + dirty = true; + } catch (Throwable t) { + logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types); } } return dirty; @@ -292,197 +215,203 @@ public class MetaDataMappingService extends AbstractComponent { * Refreshes mappings if they are not the same between original and parsed version */ public void refreshMapping(final String index, final String indexUUID, final String... types) { - final long insertOrder; - synchronized (refreshOrUpdateMutex) { - insertOrder = ++refreshOrUpdateInsertOrder; - refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types)); - } - clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() { - private volatile List allTasks; + final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types); + clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", + refreshTask, + ClusterStateTaskConfig.build(Priority.HIGH), + refreshExectuor, + (source, t) -> logger.warn("failure during [{}]", t, source) + ); + } - @Override - public void onFailure(String source, Throwable t) { - logger.warn("failure during [{}]", t, source); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - Tuple> tuple = executeRefreshOrUpdate(currentState, insertOrder); - this.allTasks = tuple.v2(); - return tuple.v1(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (allTasks == null) { - return; + class PutMappingExecutor implements ClusterStateTaskExecutor { + @Override + public Result execute(ClusterState currentState, List tasks) throws Exception { + List indicesToClose = new ArrayList<>(); + ArrayList failures = new ArrayList<>(tasks.size()); + try { + // precreate incoming indices; + for (PutMappingClusterStateUpdateRequest request : tasks) { + // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up + for (String index : request.indices()) { + if (currentState.metaData().hasIndex(index)) { + // if we don't have the index, we will throw exceptions later; + if (indicesService.hasIndex(index) == false) { + final IndexMetaData indexMetaData = currentState.metaData().index(index); + IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); + indicesToClose.add(indexMetaData.getIndex()); + // make sure to add custom default mapping if exists + if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { + indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); + } + // only add the current relevant mapping (if exists) + if (indexMetaData.getMappings().containsKey(request.type())) { + indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes()); + } + } + } + } } - for (Object task : allTasks) { - if (task instanceof UpdateTask) { - UpdateTask uTask = (UpdateTask) task; - ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true); - uTask.listener.onResponse(response); + for (PutMappingClusterStateUpdateRequest request : tasks) { + try { + currentState = applyRequest(currentState, request); + failures.add(null); + } catch (Throwable t) { + failures.add(t); + } + } + + return new Result(currentState, failures); + } finally { + for (String index : indicesToClose) { + indicesService.removeIndex(index, "created for mapping processing"); + } + } + } + + private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { + Map newMappers = new HashMap<>(); + Map existingMappers = new HashMap<>(); + for (String index : request.indices()) { + IndexService indexService = indicesService.indexServiceSafe(index); + // try and parse it (no need to add it here) so we can bail early in case of parsing exception + DocumentMapper newMapper; + DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); + if (MapperService.DEFAULT_MAPPING.equals(request.type())) { + // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default + newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); + } else { + newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); + if (existingMapper != null) { + // first, simulate + MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); + // if we have conflicts, throw an exception + if (mergeResult.hasConflicts()) { + throw new MergeMappingException(mergeResult.buildConflicts()); + } + } else { + // TODO: can we find a better place for this validation? + // The reason this validation is here is that the mapper service doesn't learn about + // new types all at once , which can create a false error. + + // For example in MapperService we can't distinguish between a create index api call + // and a put mapping api call, so we don't which type did exist before. + // Also the order of the mappings may be backwards. + if (newMapper.parentFieldMapper().active()) { + IndexMetaData indexMetaData = currentState.metaData().index(index); + for (ObjectCursor mapping : indexMetaData.getMappings().values()) { + if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { + throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); + } + } + } + } + } + newMappers.put(index, newMapper); + if (existingMapper != null) { + existingMappers.put(index, existingMapper); + } + } + + String mappingType = request.type(); + if (mappingType == null) { + mappingType = newMappers.values().iterator().next().type(); + } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { + throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); + } + if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { + throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); + } + final Map mappings = new HashMap<>(); + for (Map.Entry entry : newMappers.entrySet()) { + String index = entry.getKey(); + // do the actual merge here on the master, and update the mapping source + DocumentMapper newMapper = entry.getValue(); + IndexService indexService = indicesService.indexService(index); + if (indexService == null) { + continue; + } + + CompressedXContent existingSource = null; + if (existingMappers.containsKey(entry.getKey())) { + existingSource = existingMappers.get(entry.getKey()).mappingSource(); + } + DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); + CompressedXContent updatedSource = mergedMapper.mappingSource(); + + if (existingSource != null) { + if (existingSource.equals(updatedSource)) { + // same source, no changes, ignore it + } else { + // use the merged mapping source + mappings.put(index, new MappingMetaData(mergedMapper)); + if (logger.isDebugEnabled()) { + logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); + } else if (logger.isInfoEnabled()) { + logger.info("[{}] update_mapping [{}]", index, mergedMapper.type()); + } + + } + } else { + mappings.put(index, new MappingMetaData(mergedMapper)); + if (logger.isDebugEnabled()) { + logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); + } else if (logger.isInfoEnabled()) { + logger.info("[{}] create_mapping [{}]", index, newMapper.type()); } } } - }); + if (mappings.isEmpty()) { + // no changes, return + return currentState; + } + MetaData.Builder builder = MetaData.builder(currentState.metaData()); + for (String indexName : request.indices()) { + IndexMetaData indexMetaData = currentState.metaData().index(indexName); + if (indexMetaData == null) { + throw new IndexNotFoundException(indexName); + } + MappingMetaData mappingMd = mappings.get(indexName); + if (mappingMd != null) { + builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd)); + } + } + + return ClusterState.builder(currentState).metaData(builder).build(); + } } public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener listener) { + clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", + request, + ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()), + putMappingExecutor, + new AckedClusterStateTaskListener() { - clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask(request, listener) { - - @Override - protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { - return new ClusterStateUpdateResponse(acknowledged); - } - - @Override - public ClusterState execute(final ClusterState currentState) throws Exception { - List indicesToClose = new ArrayList<>(); - try { - for (String index : request.indices()) { - if (!currentState.metaData().hasIndex(index)) { - throw new IndexNotFoundException(index); - } + @Override + public void onFailure(String source, Throwable t) { + listener.onFailure(t); } - // pre create indices here and add mappings to them so we can merge the mappings here if needed - for (String index : request.indices()) { - if (indicesService.hasIndex(index)) { - continue; - } - final IndexMetaData indexMetaData = currentState.metaData().index(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); - indicesToClose.add(indexMetaData.getIndex()); - // make sure to add custom default mapping if exists - if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { - indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); - } - // only add the current relevant mapping (if exists) - if (indexMetaData.getMappings().containsKey(request.type())) { - indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes()); - } + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; } - Map newMappers = new HashMap<>(); - Map existingMappers = new HashMap<>(); - for (String index : request.indices()) { - IndexService indexService = indicesService.indexServiceSafe(index); - // try and parse it (no need to add it here) so we can bail early in case of parsing exception - DocumentMapper newMapper; - DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); - if (MapperService.DEFAULT_MAPPING.equals(request.type())) { - // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); - } else { - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); - if (existingMapper != null) { - // first, simulate - MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); - // if we have conflicts, throw an exception - if (mergeResult.hasConflicts()) { - throw new MergeMappingException(mergeResult.buildConflicts()); - } - } else { - // TODO: can we find a better place for this validation? - // The reason this validation is here is that the mapper service doesn't learn about - // new types all at once , which can create a false error. - - // For example in MapperService we can't distinguish between a create index api call - // and a put mapping api call, so we don't which type did exist before. - // Also the order of the mappings may be backwards. - if (newMapper.parentFieldMapper().active()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { - throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); - } - } - } - } - } - - - newMappers.put(index, newMapper); - if (existingMapper != null) { - existingMappers.put(index, existingMapper); - } + @Override + public void onAllNodesAcked(@Nullable Throwable t) { + listener.onResponse(new ClusterStateUpdateResponse(true)); } - String mappingType = request.type(); - if (mappingType == null) { - mappingType = newMappers.values().iterator().next().type(); - } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { - throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); - } - if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { - throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); + @Override + public void onAckTimeout() { + listener.onResponse(new ClusterStateUpdateResponse(false)); } - final Map mappings = new HashMap<>(); - for (Map.Entry entry : newMappers.entrySet()) { - String index = entry.getKey(); - // do the actual merge here on the master, and update the mapping source - DocumentMapper newMapper = entry.getValue(); - IndexService indexService = indicesService.indexService(index); - if (indexService == null) { - continue; - } - - CompressedXContent existingSource = null; - if (existingMappers.containsKey(entry.getKey())) { - existingSource = existingMappers.get(entry.getKey()).mappingSource(); - } - DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); - CompressedXContent updatedSource = mergedMapper.mappingSource(); - - if (existingSource != null) { - if (existingSource.equals(updatedSource)) { - // same source, no changes, ignore it - } else { - // use the merged mapping source - mappings.put(index, new MappingMetaData(mergedMapper)); - if (logger.isDebugEnabled()) { - logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); - } else if (logger.isInfoEnabled()) { - logger.info("[{}] update_mapping [{}]", index, mergedMapper.type()); - } - } - } else { - mappings.put(index, new MappingMetaData(mergedMapper)); - if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); - } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, newMapper.type()); - } - } + @Override + public TimeValue ackTimeout() { + return request.ackTimeout(); } - - if (mappings.isEmpty()) { - // no changes, return - return currentState; - } - - MetaData.Builder builder = MetaData.builder(currentState.metaData()); - for (String indexName : request.indices()) { - IndexMetaData indexMetaData = currentState.metaData().index(indexName); - if (indexMetaData == null) { - throw new IndexNotFoundException(indexName); - } - MappingMetaData mappingMd = mappings.get(indexName); - if (mappingMd != null) { - builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd)); - } - } - - return ClusterState.builder(currentState).metaData(builder).build(); - } finally { - for (String index : indicesToClose) { - indicesService.removeIndex(index, "created for mapping processing"); - } - } - } - }); + }); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 1a928dd41ea..eaa1eefd25e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -24,11 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; @@ -44,13 +40,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.settings.IndexDynamicSettings; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; +import java.util.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -219,7 +209,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } final Settings openSettings = updatedSettingsBuilder.build(); - clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("update-settings", + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @@ -334,7 +325,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener listener) { - clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 6f43e880e3f..5cd4366bea4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -147,7 +147,7 @@ public class RoutingService extends AbstractLifecycleComponent i return; } logger.trace("rerouting {}", reason); - clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", Priority.HIGH, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) { @Override public ClusterState execute(ClusterState currentState) { rerouting.set(false); diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index c2300739a7d..ce936c83d47 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,16 +20,8 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterState.Builder; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.LocalNodeMasterListener; -import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; @@ -41,6 +33,7 @@ import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; @@ -49,13 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; -import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; +import org.elasticsearch.common.util.concurrent.*; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; @@ -63,18 +50,10 @@ import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -111,6 +90,7 @@ public class InternalClusterService extends AbstractLifecycleComponent priorityClusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection clusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection lastClusterStateListeners = new CopyOnWriteArrayList<>(); + private final Map> updateTasksPerExecutor = new HashMap<>(); // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API private final Collection postAppliedListeners = new CopyOnWriteArrayList<>(); private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners); @@ -289,30 +269,47 @@ public class InternalClusterService extends AbstractLifecycleComponent void submitStateUpdateTask(final String source, final T task, + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor, + final ClusterStateTaskListener listener + ) { if (!lifecycle.started()) { return; } try { - final UpdateTask task = new UpdateTask(source, priority, updateTask); - if (updateTask.timeout() != null) { - updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() { + final UpdateTask updateTask = new UpdateTask<>(source, task, config, executor, listener); + + synchronized (updateTasksPerExecutor) { + List pendingTasks = updateTasksPerExecutor.get(executor); + if (pendingTasks == null) { + pendingTasks = new ArrayList<>(); + updateTasksPerExecutor.put(executor, pendingTasks); + } + pendingTasks.add(updateTask); + } + + if (config.timeout() != null) { + updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), new Runnable() { @Override public void run() { threadPool.generic().execute(new Runnable() { @Override public void run() { - updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source())); + if (updateTask.processed.getAndSet(true) == false) { + listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); + } } }); } }); } else { - updateTasksExecutor.execute(task); + updateTasksExecutor.execute(updateTask); } } catch (EsRejectedExecutionException e) { // ignore cases where we are shutting down..., there is really nothing interesting @@ -379,188 +376,241 @@ public class InternalClusterService extends AbstractLifecycleComponent void runTasksForExecutor(ClusterStateTaskExecutor executor) { + final ArrayList> toExecute = new ArrayList<>(); + final ArrayList sources = new ArrayList<>(); + synchronized (updateTasksPerExecutor) { + List pending = updateTasksPerExecutor.remove(executor); + if (pending != null) { + for (Iterator iter = pending.iterator(); iter.hasNext(); ) { + UpdateTask task = iter.next(); + if (task.processed.getAndSet(true) == false) { + logger.trace("will process [{}]", task.source); + toExecute.add((UpdateTask) task); + sources.add(task.source); + } else { + logger.trace("skipping [{}], already processed", task.source); + } + } + } + } + if (toExecute.isEmpty()) { + return; + } + final String source = Strings.collectionToCommaDelimitedString(sources); + if (!lifecycle.started()) { + logger.debug("processing [{}]: ignoring, cluster_service not started", source); + return; + } + logger.debug("processing [{}]: execute", source); + ClusterState previousClusterState = clusterState; + if (!previousClusterState.nodes().localNodeMaster() && executor.runOnlyOnMaster()) { + logger.debug("failing [{}]: local node is no longer master", source); + toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source)); + return; + } + ClusterStateTaskExecutor.Result result; + long startTimeNS = System.nanoTime(); + try { + List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); + result = executor.execute(previousClusterState, inputs); + } catch (Throwable e) { + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + if (logger.isTraceEnabled()) { + StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); + sb.append(previousClusterState.nodes().prettyPrint()); + sb.append(previousClusterState.routingTable().prettyPrint()); + sb.append(previousClusterState.getRoutingNodes().prettyPrint()); + logger.trace(sb.toString(), e); + } + warnAboutSlowTaskIfNeeded(executionTime, source); + result = new ClusterStateTaskExecutor.Result(previousClusterState, Collections.nCopies(toExecute.size(), e)); + } + assert result.failures.size() == toExecute.size(); - public final ClusterStateUpdateTask updateTask; - - UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { - super(priority, source); - this.updateTask = updateTask; + ClusterState newClusterState = result.resultingState; + final ArrayList> proccessedListeners = new ArrayList<>(); + // fail all tasks that have failed and extract those that are waiting for results + for (int i = 0; i < toExecute.size(); i++) { + final UpdateTask task = toExecute.get(i); + final Throwable failure = result.failures.get(i); + if (failure == null) { + proccessedListeners.add(task); + } else { + task.listener.onFailure(task.source, failure); + } } - @Override - public void run() { - if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, cluster_service not started", source); - return; - } - logger.debug("processing [{}]: execute", source); - ClusterState previousClusterState = clusterState; - if (!previousClusterState.nodes().localNodeMaster() && updateTask.runOnlyOnMaster()) { - logger.debug("failing [{}]: local node is no longer master", source); - updateTask.onNoLongerMaster(source); - return; - } - ClusterState newClusterState; - long startTimeNS = System.nanoTime(); - try { - newClusterState = updateTask.execute(previousClusterState); - } catch (Throwable e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); - if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); - sb.append(previousClusterState.nodes().prettyPrint()); - sb.append(previousClusterState.routingTable().prettyPrint()); - sb.append(previousClusterState.getRoutingNodes().prettyPrint()); - logger.trace(sb.toString(), e); - } - warnAboutSlowTaskIfNeeded(executionTime, source); - updateTask.onFailure(source, e); - return; - } - - if (previousClusterState == newClusterState) { - if (updateTask instanceof AckedClusterStateUpdateTask) { + if (previousClusterState == newClusterState) { + for (UpdateTask task : proccessedListeners) { + if (task.listener instanceof AckedClusterStateTaskListener) { //no need to wait for ack if nothing changed, the update can be counted as acknowledged - ((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null); + ((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null); } - updateTask.clusterStateProcessed(source, previousClusterState, newClusterState); - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); - logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime); - warnAboutSlowTaskIfNeeded(executionTime, source); - return; + task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); } + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime); + warnAboutSlowTaskIfNeeded(executionTime, source); + return; + } - try { - Discovery.AckListener ackListener = new NoOpAckListener(); - if (newClusterState.nodes().localNodeMaster()) { - // only the master controls the version numbers - Builder builder = ClusterState.builder(newClusterState).incrementVersion(); - if (previousClusterState.routingTable() != newClusterState.routingTable()) { - builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build()); - } - if (previousClusterState.metaData() != newClusterState.metaData()) { - builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); - } - newClusterState = builder.build(); - - if (updateTask instanceof AckedClusterStateUpdateTask) { - final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask; - if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) { - ackedUpdateTask.onAckTimeout(); + try { + ArrayList ackListeners = new ArrayList<>(); + if (newClusterState.nodes().localNodeMaster()) { + // only the master controls the version numbers + Builder builder = ClusterState.builder(newClusterState).incrementVersion(); + if (previousClusterState.routingTable() != newClusterState.routingTable()) { + builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build()); + } + if (previousClusterState.metaData() != newClusterState.metaData()) { + builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); + } + newClusterState = builder.build(); + for (UpdateTask task : proccessedListeners) { + if (task.listener instanceof AckedClusterStateTaskListener) { + final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener; + if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) { + ackedListener.onAckTimeout(); } else { try { - ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool); + ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(), threadPool)); } catch (EsRejectedExecutionException ex) { if (logger.isDebugEnabled()) { logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex); } //timeout straightaway, otherwise we could wait forever as the timeout thread has not started - ackedUpdateTask.onAckTimeout(); + ackedListener.onAckTimeout(); } } } } - - newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED); - - if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n"); - sb.append(newClusterState.prettyPrint()); - logger.trace(sb.toString()); - } else if (logger.isDebugEnabled()) { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source); - } - - ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState); - // new cluster state, notify all listeners - final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); - if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { - String summary = nodesDelta.shortSummary(); - if (summary.length() > 0) { - logger.info("{}, reason: {}", summary, source); - } - } - - // TODO, do this in parallel (and wait) - for (DiscoveryNode node : nodesDelta.addedNodes()) { - if (!nodeRequiresConnection(node)) { - continue; - } - try { - transportService.connectToNode(node); - } catch (Throwable e) { - // the fault detection will detect it as failed as well - logger.warn("failed to connect to node [" + node + "]", e); - } - } - - // if we are the master, publish the new state to all nodes - // we publish here before we send a notification to all the listeners, since if it fails - // we don't want to notify - if (newClusterState.nodes().localNodeMaster()) { - logger.debug("publishing cluster state version [{}]", newClusterState.version()); - try { - discoveryService.publish(clusterChangedEvent, ackListener); - } catch (Discovery.FailedToCommitClusterStateException t) { - logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version()); - updateTask.onFailure(source, t); - return; - } - } - - // update the current cluster state - clusterState = newClusterState; - logger.debug("set local cluster state to version {}", newClusterState.version()); - for (ClusterStateListener listener : preAppliedListeners) { - try { - listener.clusterChanged(clusterChangedEvent); - } catch (Exception ex) { - logger.warn("failed to notify ClusterStateListener", ex); - } - } - - for (DiscoveryNode node : nodesDelta.removedNodes()) { - try { - transportService.disconnectFromNode(node); - } catch (Throwable e) { - logger.warn("failed to disconnect to node [" + node + "]", e); - } - } - - newClusterState.status(ClusterState.ClusterStateStatus.APPLIED); - - for (ClusterStateListener listener : postAppliedListeners) { - try { - listener.clusterChanged(clusterChangedEvent); - } catch (Exception ex) { - logger.warn("failed to notify ClusterStateListener", ex); - } - } - - //manual ack only from the master at the end of the publish - if (newClusterState.nodes().localNodeMaster()) { - try { - ackListener.onNodeAck(newClusterState.nodes().localNode(), null); - } catch (Throwable t) { - logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode()); - } - } - - updateTask.clusterStateProcessed(source, previousClusterState, newClusterState); - - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); - logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); - warnAboutSlowTaskIfNeeded(executionTime, source); - } catch (Throwable t) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); - StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n"); - sb.append(newClusterState.nodes().prettyPrint()); - sb.append(newClusterState.routingTable().prettyPrint()); - sb.append(newClusterState.getRoutingNodes().prettyPrint()); - logger.warn(sb.toString(), t); - // TODO: do we want to call updateTask.onFailure here? } + final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners); + + newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED); + + if (logger.isTraceEnabled()) { + StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n"); + sb.append(newClusterState.prettyPrint()); + logger.trace(sb.toString()); + } else if (logger.isDebugEnabled()) { + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source); + } + + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState); + // new cluster state, notify all listeners + final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); + if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { + String summary = nodesDelta.shortSummary(); + if (summary.length() > 0) { + logger.info("{}, reason: {}", summary, source); + } + } + + // TODO, do this in parallel (and wait) + for (DiscoveryNode node : nodesDelta.addedNodes()) { + if (!nodeRequiresConnection(node)) { + continue; + } + try { + transportService.connectToNode(node); + } catch (Throwable e) { + // the fault detection will detect it as failed as well + logger.warn("failed to connect to node [" + node + "]", e); + } + } + + // if we are the master, publish the new state to all nodes + // we publish here before we send a notification to all the listeners, since if it fails + // we don't want to notify + if (newClusterState.nodes().localNodeMaster()) { + logger.debug("publishing cluster state version [{}]", newClusterState.version()); + try { + discoveryService.publish(clusterChangedEvent, ackListener); + } catch (Discovery.FailedToCommitClusterStateException t) { + logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version()); + proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); + return; + } + } + + // update the current cluster state + clusterState = newClusterState; + logger.debug("set local cluster state to version {}", newClusterState.version()); + for (ClusterStateListener listener : preAppliedListeners) { + try { + listener.clusterChanged(clusterChangedEvent); + } catch (Exception ex) { + logger.warn("failed to notify ClusterStateListener", ex); + } + } + + for (DiscoveryNode node : nodesDelta.removedNodes()) { + try { + transportService.disconnectFromNode(node); + } catch (Throwable e) { + logger.warn("failed to disconnect to node [" + node + "]", e); + } + } + + newClusterState.status(ClusterState.ClusterStateStatus.APPLIED); + + for (ClusterStateListener listener : postAppliedListeners) { + try { + listener.clusterChanged(clusterChangedEvent); + } catch (Exception ex) { + logger.warn("failed to notify ClusterStateListener", ex); + } + } + + //manual ack only from the master at the end of the publish + if (newClusterState.nodes().localNodeMaster()) { + try { + ackListener.onNodeAck(newClusterState.nodes().localNode(), null); + } catch (Throwable t) { + logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode()); + } + } + + for (UpdateTask task : proccessedListeners) { + task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); + } + + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); + warnAboutSlowTaskIfNeeded(executionTime, source); + } catch (Throwable t) { + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n"); + sb.append(newClusterState.nodes().prettyPrint()); + sb.append(newClusterState.routingTable().prettyPrint()); + sb.append(newClusterState.getRoutingNodes().prettyPrint()); + logger.warn(sb.toString(), t); + // TODO: do we want to call updateTask.onFailure here? + } + + } + + class UpdateTask extends SourcePrioritizedRunnable { + + public final T task; + public final ClusterStateTaskConfig config; + public final ClusterStateTaskExecutor executor; + public final ClusterStateTaskListener listener; + public final AtomicBoolean processed = new AtomicBoolean(); + + UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, ClusterStateTaskListener listener) { + super(config.priority(), source); + this.task = task; + this.config = config; + this.executor = executor; + this.listener = listener; + } + + @Override + public void run() { + runTasksForExecutor(executor); } } @@ -729,13 +779,24 @@ public class InternalClusterService extends AbstractLifecycleComponent listeners; + + private DelegetingAckListener(List listeners) { + this.listeners = listeners; + } + @Override public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + for (Discovery.AckListener listener : listeners) { + listener.onNodeAck(node, t); + } } @Override public void onTimeout() { + throw new UnsupportedOperationException("no timeout delegation"); } } @@ -743,20 +804,20 @@ public class InternalClusterService extends AbstractLifecycleComponent ackTimeoutCallback; private Throwable lastFailure; - AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { - this.ackedUpdateTask = ackedUpdateTask; + AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { + this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.nodes = nodes; int countDown = 0; for (DiscoveryNode node : nodes) { - if (ackedUpdateTask.mustAck(node)) { + if (ackedTaskListener.mustAck(node)) { countDown++; } } @@ -764,7 +825,7 @@ public class InternalClusterService extends AbstractLifecycleComponent joinCallbacksToRespondTo = new ArrayList<>(); private boolean nodeAdded = false; + public ProcessJoinsTask(Priority priority) { + super(priority); + } + @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index e5ec230fd66..03111d141ef 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -320,7 +320,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } catch (FailedToCommitClusterStateException t) { // cluster service logs a WARN message logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes()); - clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) { return rejoin(currentState, "failed to publish to min_master_nodes"); @@ -498,7 +498,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return; } if (localNodeMaster()) { - clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id()); @@ -538,7 +538,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // nothing to do here... return; } - clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) { if (currentState.nodes().get(node.id()) == null) { @@ -587,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership. return; } - clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) { // check if we have enough master nodes, if not, we need to move into joining the cluster again @@ -627,7 +627,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen logger.info("master_left [{}], reason [{}]", cause, masterNode, reason); - clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public boolean runOnlyOnMaster() { @@ -694,7 +694,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } void processNextPendingClusterState(String reason) { - clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) { @Override public boolean runOnlyOnMaster() { return false; @@ -1059,7 +1059,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return; } logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); - clusterService.submitStateUpdateTask("ping from another master", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { @@ -1114,7 +1114,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen class RejoinClusterRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { - clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public boolean runOnlyOnMaster() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 96a865f4da4..1fb6c06a73c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -43,23 +43,14 @@ import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.*; /** * @@ -711,8 +702,8 @@ public class ClusterServiceIT extends ESIntegTestCase { .build(); internalCluster().startNode(settings); ClusterService clusterService = internalCluster().getInstance(ClusterService.class); - BlockingTask block = new BlockingTask(); - clusterService.submitStateUpdateTask("test", Priority.IMMEDIATE, block); + BlockingTask block = new BlockingTask(Priority.IMMEDIATE); + clusterService.submitStateUpdateTask("test", block); int taskCount = randomIntBetween(5, 20); Priority[] priorities = Priority.values(); @@ -721,7 +712,7 @@ public class ClusterServiceIT extends ESIntegTestCase { CountDownLatch latch = new CountDownLatch(taskCount); for (int i = 0; i < taskCount; i++) { Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; - clusterService.submitStateUpdateTask("test", priority, new PrioritiezedTask(priority, latch, tasks)); + clusterService.submitStateUpdateTask("test", new PrioritiezedTask(priority, latch, tasks)); } block.release(); @@ -730,9 +721,9 @@ public class ClusterServiceIT extends ESIntegTestCase { Priority prevPriority = null; for (PrioritiezedTask task : tasks) { if (prevPriority == null) { - prevPriority = task.priority; + prevPriority = task.priority(); } else { - assertThat(task.priority.sameOrAfter(prevPriority), is(true)); + assertThat(task.priority().sameOrAfter(prevPriority), is(true)); } } } @@ -947,6 +938,10 @@ public class ClusterServiceIT extends ESIntegTestCase { private static class BlockingTask extends ClusterStateUpdateTask { private final CountDownLatch latch = new CountDownLatch(1); + public BlockingTask(Priority priority) { + super(priority); + } + @Override public ClusterState execute(ClusterState currentState) throws Exception { latch.await(); @@ -965,12 +960,11 @@ public class ClusterServiceIT extends ESIntegTestCase { private static class PrioritiezedTask extends ClusterStateUpdateTask { - private final Priority priority; private final CountDownLatch latch; private final List tasks; private PrioritiezedTask(Priority priority, CountDownLatch latch, List tasks) { - this.priority = priority; + super(priority); this.latch = latch; this.tasks = tasks; } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index f9dbda217d0..b14792a2c33 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -25,11 +25,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -59,16 +55,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; -import org.elasticsearch.test.disruption.BlockClusterStateProcessing; -import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; -import org.elasticsearch.test.disruption.LongGCDisruption; -import org.elasticsearch.test.disruption.NetworkDelaysPartition; -import org.elasticsearch.test.disruption.NetworkDisconnectPartition; -import org.elasticsearch.test.disruption.NetworkPartition; -import org.elasticsearch.test.disruption.NetworkUnresponsivePartition; -import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.disruption.SingleNodeDisruption; -import org.elasticsearch.test.disruption.SlowClusterStateProcessing; +import org.elasticsearch.test.disruption.*; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportException; @@ -78,31 +65,15 @@ import org.elasticsearch.transport.TransportService; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; +import java.util.*; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0) @ESIntegTestCase.SuppressLocalMode @@ -650,7 +621,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // but will be queued and once the old master node un-freezes it gets executed. // The old master node will send this update + the cluster state where he is flagged as master to the other // nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { return ClusterState.builder(currentState).build(); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index cb6a979d38d..fc4dd4f6487 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -27,13 +27,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; @@ -56,11 +50,7 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import java.io.IOException; import java.nio.file.Files; @@ -407,7 +397,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable relocations when we do this, to make sure the shards are not relocated from node2 // due to rebalancing, and delete its content client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); - internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder("test"); diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index ffddcfc1619..51ae038ca0d 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -20,12 +20,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.PendingClusterTask; @@ -208,7 +203,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { private void addBlock() { // We should block after this task - add blocking cluster state update task - clusterService.submitStateUpdateTask("test_block", passThroughPriority, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("test_block", new ClusterStateUpdateTask(passThroughPriority) { @Override public ClusterState execute(ClusterState currentState) throws Exception { while(System.currentTimeMillis() < stopWaitingAt) { diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java index 834e7d540c4..cb3d643f555 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java +++ b/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.service.PendingClusterTask; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.transport.DummyTransportAddress; @@ -115,12 +114,12 @@ public class NoopClusterService implements ClusterService { } @Override - public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { + public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { } @Override - public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { + public void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, ClusterStateTaskListener listener) { } diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java index b13963961a0..3845a71c45e 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.logging.ESLogger; @@ -40,10 +39,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Queue; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; @@ -183,31 +179,34 @@ public class TestClusterService implements ClusterService { } @Override - synchronized public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { - logger.debug("processing [{}]", source); - if (state().nodes().localNodeMaster() == false && updateTask.runOnlyOnMaster()) { - updateTask.onNoLongerMaster(source); - logger.debug("failed [{}], no longer master", source); - return; - } - ClusterState newState; - ClusterState previousClusterState = state; - try { - newState = updateTask.execute(previousClusterState); - } catch (Exception e) { - updateTask.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", e)); - return; - } - setStateAndNotifyListeners(newState); - if (updateTask instanceof ClusterStateUpdateTask) { - ((ClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newState); - } - logger.debug("finished [{}]", source); + public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { + submitStateUpdateTask(source, null, updateTask, updateTask, updateTask); } @Override - public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { - submitStateUpdateTask(source, Priority.NORMAL, updateTask); + synchronized public void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, ClusterStateTaskListener listener) { + logger.debug("processing [{}]", source); + if (state().nodes().localNodeMaster() == false && executor.runOnlyOnMaster()) { + listener.onNoLongerMaster(source); + logger.debug("failed [{}], no longer master", source); + return; + } + ClusterStateTaskExecutor.Result result; + ClusterState previousClusterState = state; + try { + result = executor.execute(previousClusterState, Arrays.asList(task)); + } catch (Exception e) { + result = new ClusterStateTaskExecutor.Result(previousClusterState, Arrays.asList(e)); + } + if (result.failures.get(0) != null) { + listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", + result.failures.get(0))); + return; + } + setStateAndNotifyListeners(result.resultingState); + listener.clusterStateProcessed(source, previousClusterState, result.resultingState); + logger.debug("finished [{}]", source); + } @Override diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java index 8154abfbd33..e318843e84f 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java +++ b/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java @@ -58,7 +58,7 @@ public class BlockClusterStateProcessing extends SingleNodeDisruption { boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1)); assert success : "startDisrupting called without waiting on stopDistrupting to complete"; final CountDownLatch started = new CountDownLatch(1); - clusterService.submitStateUpdateTask("service_disruption_block", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public boolean runOnlyOnMaster() { diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java index 3c56f8305c0..b9c663686b1 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java +++ b/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java @@ -102,7 +102,7 @@ public class SlowClusterStateProcessing extends SingleNodeDisruption { return false; } final AtomicBoolean stopped = new AtomicBoolean(false); - clusterService.submitStateUpdateTask("service_disruption_delay", Priority.IMMEDIATE, new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("service_disruption_delay", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public boolean runOnlyOnMaster() { From eaba3d5cb303dfc42540f642840c16b59bedc487 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 24 Nov 2015 10:32:13 -0500 Subject: [PATCH 06/40] Add test for cluster state batch updates --- .../cluster/ClusterServiceIT.java | 129 +++++++++++++++++- 1 file changed, 123 insertions(+), 6 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 1fb6c06a73c..60e7fb29041 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -44,9 +44,12 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -708,18 +711,18 @@ public class ClusterServiceIT extends ESIntegTestCase { Priority[] priorities = Priority.values(); // will hold all the tasks in the order in which they were executed - List tasks = new ArrayList<>(taskCount); + List tasks = new ArrayList<>(taskCount); CountDownLatch latch = new CountDownLatch(taskCount); for (int i = 0; i < taskCount; i++) { Priority priority = priorities[randomIntBetween(0, priorities.length - 1)]; - clusterService.submitStateUpdateTask("test", new PrioritiezedTask(priority, latch, tasks)); + clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks)); } block.release(); latch.await(); Priority prevPriority = null; - for (PrioritiezedTask task : tasks) { + for (PrioritizedTask task : tasks) { if (prevPriority == null) { prevPriority = task.priority(); } else { @@ -728,6 +731,120 @@ public class ClusterServiceIT extends ESIntegTestCase { } } + public void testClusterStateBatchedUpdates() throws InterruptedException { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + + AtomicInteger counter = new AtomicInteger(); + class Task { + private AtomicBoolean state = new AtomicBoolean(); + + public void execute() { + if (!state.compareAndSet(false, true)) { + throw new IllegalStateException(); + } else { + counter.incrementAndGet(); + } + } + } + + class TaskExecutor implements ClusterStateTaskExecutor { + private AtomicInteger counter = new AtomicInteger(); + + @Override + public Result execute(ClusterState currentState, List tasks) throws Exception { + tasks.forEach(task -> task.execute()); + counter.addAndGet(tasks.size()); + return new Result(currentState, tasks.size()); + } + + @Override + public boolean runOnlyOnMaster() { + return false; + } + } + int numberOfThreads = randomIntBetween(2, 256); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + + ConcurrentMap counters = new ConcurrentHashMap<>(); + CountDownLatch latch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); + ClusterStateTaskListener listener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Throwable t) { + assert false; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet(); + latch.countDown(); + } + }; + + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + List executors = new ArrayList<>(); + for (int i = 0; i < numberOfExecutors; i++) { + executors.add(new TaskExecutor()); + } + + // randomly assign tasks to executors + List assignments = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + for (int j = 0; j < tasksSubmittedPerThread; j++) { + assignments.add(randomFrom(executors)); + } + } + + Map counts = new HashMap<>(); + for (TaskExecutor executor : assignments) { + counts.merge(executor, 1, (previous, one) -> previous + one); + } + + CountDownLatch startingGun = new CountDownLatch(1 + numberOfThreads); + List threads = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + final int index = i; + Thread thread = new Thread(() -> { + startingGun.countDown(); + for (int j = 0; j < tasksSubmittedPerThread; j++) { + ClusterStateTaskExecutor executor = assignments.get(index * tasksSubmittedPerThread + j); + clusterService.submitStateUpdateTask( + Thread.currentThread().getName(), + new Task(), + ClusterStateTaskConfig.build(Priority.NORMAL), + executor, + listener); + } + }); + threads.add(thread); + thread.start(); + } + + startingGun.countDown(); + for (Thread thread : threads) { + thread.join(); + } + + // wait until all the cluster state updates have been processed + latch.await(); + + // assert the number of executed tasks is correct + assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); + + // assert each executor executed the correct number of tasks + for (TaskExecutor executor : executors) { + assertEquals((int)counts.get(executor), executor.counter.get()); + } + + // assert the correct number of clusterStateProcessed events were triggered + for (Map.Entry entry : counters.entrySet()) { + assertEquals(entry.getValue().get(), tasksSubmittedPerThread); + } + } + @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level public void testClusterStateUpdateLogging() throws Exception { Settings settings = settingsBuilder() @@ -958,12 +1075,12 @@ public class ClusterServiceIT extends ESIntegTestCase { } - private static class PrioritiezedTask extends ClusterStateUpdateTask { + private static class PrioritizedTask extends ClusterStateUpdateTask { private final CountDownLatch latch; - private final List tasks; + private final List tasks; - private PrioritiezedTask(Priority priority, CountDownLatch latch, List tasks) { + private PrioritizedTask(Priority priority, CountDownLatch latch, List tasks) { super(priority); this.latch = latch; this.tasks = tasks; From 1fb6a1f669b42285eee27d1fcd8581145a52a1b6 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 24 Nov 2015 12:10:38 -0500 Subject: [PATCH 07/40] Simplify grouping of cluster state update tasks --- .../cluster/metadata/MetaDataMappingService.java | 7 +------ .../cluster/service/InternalClusterService.java | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 215dde061db..44e38533257 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -104,12 +104,7 @@ public class MetaDataMappingService extends AbstractComponent { if (task.index == null) { logger.debug("ignoring a mapping task of type [{}] with a null index.", task); } - List indexTasks = tasksPerIndex.get(task.index); - if (indexTasks == null) { - indexTasks = new ArrayList<>(); - tasksPerIndex.put(task.index, indexTasks); - } - indexTasks.add(task); + tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task); } boolean dirty = false; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index ce936c83d47..3407a571661 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -286,12 +286,7 @@ public class InternalClusterService extends AbstractLifecycleComponent updateTask = new UpdateTask<>(source, task, config, executor, listener); synchronized (updateTasksPerExecutor) { - List pendingTasks = updateTasksPerExecutor.get(executor); - if (pendingTasks == null) { - pendingTasks = new ArrayList<>(); - updateTasksPerExecutor.put(executor, pendingTasks); - } - pendingTasks.add(updateTask); + updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask); } if (config.timeout() != null) { From c3f97e7642f6f5208656b35629af08cccaaaeb22 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 12:04:09 -0500 Subject: [PATCH 08/40] Simplify InternalClusterService#submitStateUpdateTask with lambdas --- .../cluster/service/InternalClusterService.java | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 3407a571661..c97c55d5587 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -290,19 +290,10 @@ public class InternalClusterService extends AbstractLifecycleComponent threadPool.generic().execute(() -> { + if (updateTask.processed.getAndSet(true) == false) { + listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); + }})); } else { updateTasksExecutor.execute(updateTask); } From 7caee2fa4d918d4c6978bcf986a158d03bf7db2a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 12:54:48 -0500 Subject: [PATCH 09/40] Explicitly correspond cluster state tasks and execution results --- .../cluster/ClusterStateTaskExecutor.java | 67 ++++++++++++++++--- .../cluster/ClusterStateUpdateTask.java | 6 +- .../metadata/MetaDataMappingService.java | 14 ++-- .../service/InternalClusterService.java | 29 ++++---- .../cluster/ClusterServiceIT.java | 4 +- .../test/cluster/TestClusterService.java | 15 +++-- 6 files changed, 95 insertions(+), 40 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 861b924c52e..ebb8e397b99 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -18,15 +18,17 @@ */ package org.elasticsearch.cluster; -import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Collectors; public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state * should be changed. */ - Result execute(ClusterState currentState, List tasks) throws Exception; + Result execute(ClusterState currentState, List tasks) throws Exception; /** * indicates whether this task should only run if current node is master @@ -35,19 +37,66 @@ public interface ClusterStateTaskExecutor { return true; } - class Result { + /** + * Represents the result of a batched execution of cluster state update tasks + * @param the type of the cluster state update task + */ + class Result { final public ClusterState resultingState; - final public List failures; + final public Map executionResults; - public Result(ClusterState resultingState, int numberOfTasks) { - this.resultingState = resultingState; - failures = Arrays.asList(new Throwable[numberOfTasks]); + /** + * Construct an execution result instance for which every cluster state update task succeeded + * @param resultingState the resulting cluster state + * @param tasks the cluster state update tasks + */ + public Result(ClusterState resultingState, List tasks) { + this(resultingState, tasks.stream().collect(Collectors.toMap(task -> task, task -> ClusterStateTaskExecutionResult.success()))); } - public Result(ClusterState resultingState, List failures) { + /** + * Construct an execution result instance with a correspondence between the tasks and their execution result + * @param resultingState the resulting cluster state + * @param executionResults the correspondence between tasks and their outcome + */ + public Result(ClusterState resultingState, Map executionResults) { this.resultingState = resultingState; - this.failures = failures; + this.executionResults = executionResults; } } + final class ClusterStateTaskExecutionResult { + private final Throwable failure; + + private static final ClusterStateTaskExecutionResult SUCCESS = new ClusterStateTaskExecutionResult(null); + + public static ClusterStateTaskExecutionResult success() { + return SUCCESS; + } + + public static ClusterStateTaskExecutionResult failure(Throwable failure) { + return new ClusterStateTaskExecutionResult(failure); + } + + private ClusterStateTaskExecutionResult(Throwable failure) { + this.failure = failure; + } + + public boolean isSuccess() { + return failure != null; + } + + /** + * Handle the execution result with the provided consumers + * @param onSuccess handler to invoke on success + * @param onFailure handler to invoke on failure; the throwable passed through will not be null + */ + public void handle(Runnable onSuccess, Consumer onFailure) { + if (failure == null) { + onSuccess.run(); + } else { + onFailure.accept(failure); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 17c4635c7de..ffcb9c0e75b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -28,7 +28,7 @@ import java.util.List; /** * A task that can update the cluster state. */ -abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { +abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { final private Priority priority; @@ -41,9 +41,9 @@ abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, } @Override - final public Result execute(ClusterState currentState, List tasks) throws Exception { + final public Result execute(ClusterState currentState, List tasks) throws Exception { ClusterState result = execute(currentState); - return new Result(result, tasks.size()); + return new Result<>(result, tasks); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 44e38533257..be401269917 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -81,9 +81,9 @@ public class MetaDataMappingService extends AbstractComponent { class RefreshTaskExecutor implements ClusterStateTaskExecutor { @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public Result execute(ClusterState currentState, List tasks) throws Exception { ClusterState newClusterState = executeRefresh(currentState, tasks); - return new Result(newClusterState, tasks.size()); + return new Result<>(newClusterState, tasks); } } @@ -221,9 +221,9 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public Result execute(ClusterState currentState, List tasks) throws Exception { List indicesToClose = new ArrayList<>(); - ArrayList failures = new ArrayList<>(tasks.size()); + Map executionResults = new HashMap<>(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { @@ -250,13 +250,13 @@ public class MetaDataMappingService extends AbstractComponent { for (PutMappingClusterStateUpdateRequest request : tasks) { try { currentState = applyRequest(currentState, request); - failures.add(null); + executionResults.put(request, ClusterStateTaskExecutionResult.success()); } catch (Throwable t) { - failures.add(t); + executionResults.put(request, ClusterStateTaskExecutionResult.failure(t)); } } - return new Result(currentState, failures); + return new Result<>(currentState, executionResults); } finally { for (String index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index c97c55d5587..ad4139fec21 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -269,7 +269,7 @@ public class InternalClusterService extends AbstractLifecycleComponent task.listener.onNoLongerMaster(task.source)); return; } - ClusterStateTaskExecutor.Result result; + ClusterStateTaskExecutor.Result result; long startTimeNS = System.nanoTime(); try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); @@ -410,21 +410,26 @@ public class InternalClusterService extends AbstractLifecycleComponent executionResults = + toExecute + .stream() + .collect(Collectors.toMap( + updateTask -> updateTask.task, + updateTask -> ClusterStateTaskExecutor.ClusterStateTaskExecutionResult.failure(e) + )); + result = new ClusterStateTaskExecutor.Result<>(previousClusterState, executionResults); } - assert result.failures.size() == toExecute.size(); + + assert result.executionResults != null; ClusterState newClusterState = result.resultingState; final ArrayList> proccessedListeners = new ArrayList<>(); // fail all tasks that have failed and extract those that are waiting for results - for (int i = 0; i < toExecute.size(); i++) { - final UpdateTask task = toExecute.get(i); - final Throwable failure = result.failures.get(i); - if (failure == null) { - proccessedListeners.add(task); - } else { - task.listener.onFailure(task.source, failure); - } + for (UpdateTask updateTask : toExecute) { + assert result.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); + final ClusterStateTaskExecutor.ClusterStateTaskExecutionResult executionResult = + result.executionResults.get(updateTask.task); + executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); } if (previousClusterState == newClusterState) { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 60e7fb29041..820e468a093 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -755,10 +755,10 @@ public class ClusterServiceIT extends ESIntegTestCase { private AtomicInteger counter = new AtomicInteger(); @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public Result execute(ClusterState currentState, List tasks) throws Exception { tasks.forEach(task -> task.execute()); counter.addAndGet(tasks.size()); - return new Result(currentState, tasks.size()); + return new Result<>(currentState, tasks); } @Override diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java index 3845a71c45e..e5d45cd0e58 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -191,18 +191,19 @@ public class TestClusterService implements ClusterService { logger.debug("failed [{}], no longer master", source); return; } - ClusterStateTaskExecutor.Result result; + ClusterStateTaskExecutor.Result result; ClusterState previousClusterState = state; try { result = executor.execute(previousClusterState, Arrays.asList(task)); } catch (Exception e) { - result = new ClusterStateTaskExecutor.Result(previousClusterState, Arrays.asList(e)); - } - if (result.failures.get(0) != null) { - listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", - result.failures.get(0))); - return; + result = new ClusterStateTaskExecutor.Result<>(previousClusterState, Collections.singletonMap(task, ClusterStateTaskExecutor.ClusterStateTaskExecutionResult.failure(e))); } + + result.executionResults.get(task).handle( + () -> {}, + ex -> listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", ex)) + ); + setStateAndNotifyListeners(result.resultingState); listener.clusterStateProcessed(source, previousClusterState, result.resultingState); logger.debug("finished [{}]", source); From 9aa9447016755884836c33909ec720cb79324247 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 14:31:17 -0500 Subject: [PATCH 10/40] Simplify loop in InternalClusterService#runTasksForExecutor --- .../cluster/service/InternalClusterService.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index ad4139fec21..a00b56f5775 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -368,11 +368,10 @@ public class InternalClusterService extends AbstractLifecycleComponent pending = updateTasksPerExecutor.remove(executor); if (pending != null) { - for (Iterator iter = pending.iterator(); iter.hasNext(); ) { - UpdateTask task = iter.next(); + for (UpdateTask task : pending) { if (task.processed.getAndSet(true) == false) { logger.trace("will process [{}]", task.source); - toExecute.add((UpdateTask) task); + toExecute.add(task); sources.add(task.source); } else { logger.trace("skipping [{}], already processed", task.source); From 72e18ec6817b9463ecb666b328dab014e06ee0a0 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 17:38:34 -0500 Subject: [PATCH 11/40] Add builder to create cluster state executor results --- .../cluster/ClusterStateTaskExecutor.java | 70 +++++++++++++------ .../cluster/ClusterStateUpdateTask.java | 4 +- .../metadata/MetaDataMappingService.java | 16 ++--- .../service/InternalClusterService.java | 23 +++--- .../cluster/ClusterServiceIT.java | 4 +- .../test/cluster/TestClusterService.java | 12 ++-- 6 files changed, 76 insertions(+), 53 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index ebb8e397b99..ab85d9540f0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -18,17 +18,17 @@ */ package org.elasticsearch.cluster; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; -import java.util.stream.Collectors; public interface ClusterStateTaskExecutor { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state * should be changed. */ - Result execute(ClusterState currentState, List tasks) throws Exception; + BatchResult execute(ClusterState currentState, List tasks) throws Exception; /** * indicates whether this task should only run if current node is master @@ -41,44 +41,74 @@ public interface ClusterStateTaskExecutor { * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task */ - class Result { + class BatchResult { final public ClusterState resultingState; - final public Map executionResults; - - /** - * Construct an execution result instance for which every cluster state update task succeeded - * @param resultingState the resulting cluster state - * @param tasks the cluster state update tasks - */ - public Result(ClusterState resultingState, List tasks) { - this(resultingState, tasks.stream().collect(Collectors.toMap(task -> task, task -> ClusterStateTaskExecutionResult.success()))); - } + final public Map executionResults; /** * Construct an execution result instance with a correspondence between the tasks and their execution result * @param resultingState the resulting cluster state * @param executionResults the correspondence between tasks and their outcome */ - public Result(ClusterState resultingState, Map executionResults) { + BatchResult(ClusterState resultingState, Map executionResults) { this.resultingState = resultingState; this.executionResults = executionResults; } + + public static Builder builder() { + return new Builder<>(); + } + + public static class Builder { + private final Map executionResults = new IdentityHashMap<>(); + + public Builder success(T task) { + return result(task, TaskResult.success()); + } + + public Builder successes(Iterable tasks) { + for (T task : tasks) { + success(task); + } + return this; + } + + public Builder failure(T task, Throwable t) { + return result(task, TaskResult.failure(t)); + } + + public Builder failures(Iterable tasks, Throwable t) { + for (T task : tasks) { + failure(task, t); + } + return this; + } + + private Builder result(T task, TaskResult executionResult) { + executionResults.put(task, executionResult); + return this; + } + + public BatchResult build(ClusterState resultingState) { + return new BatchResult<>(resultingState, executionResults); + } + } } - final class ClusterStateTaskExecutionResult { + final class TaskResult { private final Throwable failure; - private static final ClusterStateTaskExecutionResult SUCCESS = new ClusterStateTaskExecutionResult(null); + private static final TaskResult SUCCESS = new TaskResult(null); - public static ClusterStateTaskExecutionResult success() { + public static TaskResult success() { return SUCCESS; } - public static ClusterStateTaskExecutionResult failure(Throwable failure) { - return new ClusterStateTaskExecutionResult(failure); + public static TaskResult failure(Throwable failure) { + return new TaskResult(failure); } - private ClusterStateTaskExecutionResult(Throwable failure) { + private TaskResult(Throwable failure) { this.failure = failure; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index ffcb9c0e75b..3e2881134f8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -41,9 +41,9 @@ abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, } @Override - final public Result execute(ClusterState currentState, List tasks) throws Exception { + final public BatchResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState result = execute(currentState); - return new Result<>(result, tasks); + return BatchResult.builder().successes(tasks).build(result); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index be401269917..fb6ed1f0753 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; @@ -81,9 +80,9 @@ public class MetaDataMappingService extends AbstractComponent { class RefreshTaskExecutor implements ClusterStateTaskExecutor { @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState newClusterState = executeRefresh(currentState, tasks); - return new Result<>(newClusterState, tasks); + return BatchResult.builder().successes(tasks).build(newClusterState); } } @@ -221,9 +220,10 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { List indicesToClose = new ArrayList<>(); - Map executionResults = new HashMap<>(); + BatchResult.Builder builder = BatchResult.builder(); + Map executionResults = new HashMap<>(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { @@ -250,13 +250,13 @@ public class MetaDataMappingService extends AbstractComponent { for (PutMappingClusterStateUpdateRequest request : tasks) { try { currentState = applyRequest(currentState, request); - executionResults.put(request, ClusterStateTaskExecutionResult.success()); + builder.success(request); } catch (Throwable t) { - executionResults.put(request, ClusterStateTaskExecutionResult.failure(t)); + builder.failure(request, t); } } - return new Result<>(currentState, executionResults); + return builder.build(currentState); } finally { for (String index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index a00b56f5775..d4b15861846 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -394,11 +394,11 @@ public class InternalClusterService extends AbstractLifecycleComponent task.listener.onNoLongerMaster(task.source)); return; } - ClusterStateTaskExecutor.Result result; + ClusterStateTaskExecutor.BatchResult batchResult; long startTimeNS = System.nanoTime(); try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); - result = executor.execute(previousClusterState, inputs); + batchResult = executor.execute(previousClusterState, inputs); } catch (Throwable e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); if (logger.isTraceEnabled()) { @@ -409,25 +409,18 @@ public class InternalClusterService extends AbstractLifecycleComponent executionResults = - toExecute - .stream() - .collect(Collectors.toMap( - updateTask -> updateTask.task, - updateTask -> ClusterStateTaskExecutor.ClusterStateTaskExecutionResult.failure(e) - )); - result = new ClusterStateTaskExecutor.Result<>(previousClusterState, executionResults); + batchResult = ClusterStateTaskExecutor.BatchResult.builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState); } - assert result.executionResults != null; + assert batchResult.executionResults != null; - ClusterState newClusterState = result.resultingState; + ClusterState newClusterState = batchResult.resultingState; final ArrayList> proccessedListeners = new ArrayList<>(); // fail all tasks that have failed and extract those that are waiting for results for (UpdateTask updateTask : toExecute) { - assert result.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); - final ClusterStateTaskExecutor.ClusterStateTaskExecutionResult executionResult = - result.executionResults.get(updateTask.task); + assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); + final ClusterStateTaskExecutor.TaskResult executionResult = + batchResult.executionResults.get(updateTask.task); executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 820e468a093..947ac475d61 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -755,10 +755,10 @@ public class ClusterServiceIT extends ESIntegTestCase { private AtomicInteger counter = new AtomicInteger(); @Override - public Result execute(ClusterState currentState, List tasks) throws Exception { + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { tasks.forEach(task -> task.execute()); counter.addAndGet(tasks.size()); - return new Result<>(currentState, tasks); + return BatchResult.builder().successes(tasks).build(currentState); } @Override diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java index e5d45cd0e58..5dc8cce99c6 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -191,21 +191,21 @@ public class TestClusterService implements ClusterService { logger.debug("failed [{}], no longer master", source); return; } - ClusterStateTaskExecutor.Result result; + ClusterStateTaskExecutor.BatchResult batchResult; ClusterState previousClusterState = state; try { - result = executor.execute(previousClusterState, Arrays.asList(task)); + batchResult = executor.execute(previousClusterState, Arrays.asList(task)); } catch (Exception e) { - result = new ClusterStateTaskExecutor.Result<>(previousClusterState, Collections.singletonMap(task, ClusterStateTaskExecutor.ClusterStateTaskExecutionResult.failure(e))); + batchResult = ClusterStateTaskExecutor.BatchResult.builder().failure(task, e).build(previousClusterState); } - result.executionResults.get(task).handle( + batchResult.executionResults.get(task).handle( () -> {}, ex -> listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", ex)) ); - setStateAndNotifyListeners(result.resultingState); - listener.clusterStateProcessed(source, previousClusterState, result.resultingState); + setStateAndNotifyListeners(batchResult.resultingState); + listener.clusterStateProcessed(source, previousClusterState, batchResult.resultingState); logger.debug("finished [{}]", source); } From ffb3e0a8452320e9a3b101f0e1f69db6fdcea127 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 17:44:28 -0500 Subject: [PATCH 12/40] ClusterStateTaskListener#onNoLongerMaster now throws NotMasterException --- .../org/elasticsearch/cluster/ClusterStateTaskListener.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index 16945d91971..612fde60d43 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -33,7 +33,7 @@ public interface ClusterStateTaskListener { * called when the task was rejected because the local node is no longer master */ default void onNoLongerMaster(String source) { - onFailure(source, new EsRejectedExecutionException("no longer master. source: [" + source + "]")); + onFailure(source, new NotMasterException("no longer master. source: [" + source + "]")); } /** From fba74c96153e603223ad4221b87d606366f1002b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 25 Nov 2015 18:01:39 -0500 Subject: [PATCH 13/40] Add docs for cluster state update task batching --- .../elasticsearch/cluster/ClusterService.java | 25 +++++++++++-- .../cluster/ClusterStateTaskConfig.java | 36 ++++++++++++++++--- .../cluster/ClusterStateTaskListener.java | 5 --- 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java index d3985bd2e78..b682b0cc61d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -100,8 +100,22 @@ public interface ClusterService extends LifecycleComponent { void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); /** - * Submits a task that will update the cluster state, using the given config. result will communicated - * to the given listener + * Submits a cluster state update task; submitted updates will be + * batched across the same instance of executor. The exact batching + * semantics depend on the underlying implementation but a rough + * guideline is that if the update task is submitted while there + * are pending update tasks for the same executor, these update + * tasks will all be executed on the executor in a single batch + * + * @param source the source of the cluster state update task + * @param task the state needed for the cluster state update task + * @param config the cluster state update task configuration + * @param executor the cluster state update task executor; tasks + * that share the same executor will be executed + * batches on this executor + * @param listener callback after the cluster state update task + * completes + * @param the type of the cluster state update task state */ void submitStateUpdateTask(final String source, final T task, final ClusterStateTaskConfig config, @@ -109,7 +123,12 @@ public interface ClusterService extends LifecycleComponent { final ClusterStateTaskListener listener); /** - * Submits a task that will update the cluster state; + * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)}, + * submitted updates will not be batched. + * + * @param source the source of the cluster state update task + * @param updateTask the full context for the cluster state update + * task */ void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java index 662095798af..2ef2438991e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java @@ -22,26 +22,54 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; +/** + * Cluster state update task configuration for timeout and priority + */ public interface ClusterStateTaskConfig { - /** - * If the cluster state update task wasn't processed by the provided timeout, call - * {@link ClusterStateTaskListener#onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default). + * The timeout for this cluster state update task configuration. If + * the cluster state update task isn't processed within this + * timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)} + * is invoked. + * + * @return the timeout, or null if one is not set */ @Nullable TimeValue timeout(); + /** + * The {@link Priority} for this cluster state update task configuration. + * + * @return the priority + */ Priority priority(); + /** + * Build a cluster state update task configuration with the + * specified {@link Priority} and no timeout. + * + * @param priority the priority for the associated cluster state + * update task + * @return the resulting cluster state update task configuration + */ static ClusterStateTaskConfig build(Priority priority) { return new Basic(priority, null); } + /** + * Build a cluster state update task configuration with the + * specified {@link Priority} and timeout. + * + * @param priority the priority for the associated cluster state + * update task + * @param timeout the timeout for the associated cluster state + * update task + * @return the result cluster state update task configuration + */ static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) { return new Basic(priority, timeout); } - class Basic implements ClusterStateTaskConfig { final TimeValue timeout; final Priority priority; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index 612fde60d43..3bf7887cd1c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.cluster; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; - import java.util.List; public interface ClusterStateTaskListener { @@ -42,7 +40,4 @@ public interface ClusterStateTaskListener { */ default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } - - ; - } From b6826bfc78bbae42e39e41189e29f6b4363c710b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 8 Oct 2015 15:16:47 -0400 Subject: [PATCH 14/40] [doc] Information on JVM fork count I spent 20 minutes reading gradle docs to figure out how to do this. No one else should have to do that. Also, some of the documentation was out of date. --- TESTING.asciidoc | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index b12111c94a4..da238c3437b 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -149,17 +149,23 @@ gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix) === Load balancing and caches. -By default, the tests run sequentially on a single forked JVM. - -To run with more forked JVMs than the default use: +By default the tests run on up to 4 JVMs based on the number of cores. If you +want to explicitly specify the number of JVMs you can do so on the command +line: ---------------------------- gradle test -Dtests.jvms=8 ---------------------------- -Don't count hypercores for CPU-intense tests and leave some slack -for JVM-internal threads (like the garbage collector). Make sure there is -enough RAM to handle child JVMs. +Or in `~/.gradle/gradle.properties`: + +---------------------------- +systemProp.tests.jvms=8 +---------------------------- + +Its difficult to pick the "right" number here. Hypercores don't count for CPU +intensive tests and you should leave some slack for JVM-interal threads like +the garbage collector. And you have to have enough RAM to handle each JVM. === Test compatibility. From d8a1a4bd4306af97be25773baa0181e718d65e4e Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Mon, 30 Nov 2015 14:58:00 +0100 Subject: [PATCH 15/40] fix toXContent() for mapper attachments field We must use simpleName() instead of name() because otherwise when the mapping is generated as a string the field name will be the full path with dots and that is illegal from es 2.0 on. closes https://github.com/elastic/elasticsearch-mapper-attachments/issues/169 --- .../mapper/attachments/AttachmentMapper.java | 2 +- .../SimpleAttachmentMapperTests.java | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index a52f0768082..2e4c08992c5 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -624,7 +624,7 @@ public class AttachmentMapper extends FieldMapper { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(name()); + builder.startObject(simpleName()); builder.field("type", CONTENT_TYPE); if (indexCreatedBefore2x) { builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java index 934bf1b7157..0023fc44e24 100644 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java +++ b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/SimpleAttachmentMapperTests.java @@ -22,10 +22,14 @@ package org.elasticsearch.mapper.attachments; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; +import org.junit.Test; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; @@ -107,4 +111,32 @@ public class SimpleAttachmentMapperTests extends AttachmentUnitTestCase { assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().names().indexName()), containsString("This document tests the ability of Apache Tika to extract content")); } + /** + * See issue https://github.com/elastic/elasticsearch-mapper-attachments/issues/169 + * Mapping should not contain field names with dot. + */ + public void testMapperErrorWithDotTwoLevels169() throws Exception { + XContentBuilder mappingBuilder = jsonBuilder(); + mappingBuilder.startObject() + .startObject("mail") + .startObject("properties") + .startObject("attachments") + .startObject("properties") + .startObject("innerfield") + .field("type", "attachment") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + byte[] mapping = mappingBuilder.bytes().toBytes(); + MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY); + DocumentMapper docMapper = mapperService.parse("mail", new CompressedXContent(mapping), true); + // this should not throw an exception + mapperService.parse("mail", new CompressedXContent(docMapper.mapping().toString()), true); + // the mapping may not contain a field name with a dot + assertFalse(docMapper.mapping().toString().contains(".")); + } + } From bed9bf19c68e2302b7e4a3614964779cd893836d Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 30 Nov 2015 16:01:55 +0100 Subject: [PATCH 16/40] S3 repository: fix spelling error Reported at https://github.com/elastic/elasticsearch-cloud-aws/pull/221 --- docs/plugins/repository-s3.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 57c1e38c928..4c9e93bc714 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -198,7 +198,7 @@ The following settings are supported: request. Beyond this threshold, the S3 repository will use the http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] to split the chunk into several parts, each of `buffer_size` length, and - to upload each part in its own request. Note that positioning a buffer + to upload each part in its own request. Note that setting a buffer size lower than `5mb` is not allowed since it will prevents the use of the Multipart API and may result in upload errors. Defaults to `5mb`. From 02798951ef3a3e7a2ae33e7eb7637e912d566834 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Honza=20Kr=C3=A1l?= Date: Mon, 2 Nov 2015 15:51:22 +0100 Subject: [PATCH 17/40] [docs] Updating the Python client docxs --- docs/community-clients/index.asciidoc | 3 -- docs/python/index.asciidoc | 45 ++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index 1d06aeddf98..51789221be6 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -144,9 +144,6 @@ Also see the {client}/php-api/current/index.html[official Elasticsearch PHP clie Also see the {client}/python-api/current/index.html[official Elasticsearch Python client]. -* http://github.com/elasticsearch/elasticsearch-dsl-py[elasticsearch-dsl-py] - chainable query and filter construction built on top of official client. - * http://github.com/rhec/pyelasticsearch[pyelasticsearch]: Python client. diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc index 66b79d7fa2b..64756adc444 100644 --- a/docs/python/index.asciidoc +++ b/docs/python/index.asciidoc @@ -7,7 +7,29 @@ ground for all Elasticsearch-related code in Python; because of this it tries to be opinion-free and very extendable. The full documentation is available at http://elasticsearch-py.rtfd.org/ -It can be installed with: +.Elasticsearch DSL +************************************************************************************ +For a more high level client library with more limited scope, have a look at +http://elasticsearch-dsl.rtfd.org/[elasticsearch-dsl] - a more pythonic library +sitting on top of `elasticsearch-py`. + +It provides a more convenient and idiomatic way to write and manipulate +http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html[queries]. It +stays close to the Elasticsearch JSON DSL, mirroring its terminology and +structure while exposing the whole range of the DSL from Python either directly +using defined classes or a queryset-like expressions. + +It also provides an optional +http://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype[persistence +layer] for working with documents as Python objects in an ORM-like fashion: +defining mappings, retrieving and saving documents, wrapping the document data +in user-defined classes. +************************************************************************************ + + +=== Installation + +It can be installed with pip: [source,sh] ------------------------------------ @@ -16,13 +38,24 @@ pip install elasticsearch === Versioning -There are two branches for development - `master` and `0.4`. Master branch is -used to track all the changes for Elasticsearch 1.0 and beyond whereas 0.4 -tracks Elasticsearch 0.90. +There are two branches for development - `master` and `1.x`. Master branch is +used to track all the changes for Elasticsearch 2.0 and beyond whereas 1.x +tracks Elasticsearch 1.*. Releases with major version 1 (1.X.Y) are to be used with Elasticsearch 1.* and later, 0.4 releases are meant to work with Elasticsearch 0.90.*. +The recommended way to set your requirements in your `setup.py` or +`requirements.txt` is: + +------------------------------------ + # Elasticsearch 2.x + elasticsearch>=2.0.0,<3.0.0 + + # Elasticsearch 1.x + elasticsearch>=1.0.0,<2.0.0 +------------------------------------ + === Example use Simple use-case: @@ -71,6 +104,10 @@ The client's features include: * pluggable architecture +The client also contains a convenient set of +http://elasticsearch-py.readthedocs.org/en/master/helpers.html[helpers] for +some of the more engaging tasks like bulk indexing and reindexing. + === License From 9dbda2af62723d4d53ed300f7cd98e5d42bf20eb Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 30 Nov 2015 11:12:26 +0100 Subject: [PATCH 18/40] Update scripting.asciidoc Fix script syntax for script_score Closes #15096 --- docs/reference/modules/scripting.asciidoc | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 50be5fdce48..982097cdf1c 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -121,10 +121,12 @@ curl -XPOST localhost:9200/_search -d '{ "functions": [ { "script_score": { - "lang": "groovy", - "file": "calculate-score", - "params": { - "my_modifier": 8 + "script": { + "lang": "groovy", + "file": "calculate-score", + "params": { + "my_modifier": 8 + } } } } @@ -180,10 +182,12 @@ curl -XPOST localhost:9200/_search -d '{ "functions": [ { "script_score": { - "id": "indexedCalculateScore", - "lang" : "groovy", - "params": { - "my_modifier": 8 + "script": { + "id": "indexedCalculateScore", + "lang" : "groovy", + "params": { + "my_modifier": 8 + } } } } From 4bb1eed7668cadce62fff1697ecb8045eff8f53b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 30 Nov 2015 13:40:39 -0800 Subject: [PATCH 19/40] Build: Increase the number of failed tests shown in test summary We had increased this in maven, but it was lost in the transition to gradle. This change adds it as a configurable setting the the logger for randomized testing and bumps it to 25. --- .../gradle/junit4/TestLoggingConfiguration.groovy | 9 +++++++++ .../gradle/junit4/TestReportLogger.groovy | 11 ++++------- .../org/elasticsearch/gradle/BuildPlugin.groovy | 1 + 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy index d18ac3fbd5a..97251252f54 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy @@ -1,5 +1,6 @@ package com.carrotsearch.gradle.junit4 +import org.gradle.api.tasks.Input import org.gradle.util.ConfigureUtil class TestLoggingConfiguration { @@ -20,6 +21,10 @@ class TestLoggingConfiguration { SlowTestsConfiguration slowTests = new SlowTestsConfiguration() StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration() + /** Summarize the first N failures at the end of the test. */ + @Input + int showNumFailuresAtEnd = 3 // match TextReport default + void slowTests(Closure closure) { ConfigureUtil.configure(closure, slowTests) } @@ -31,4 +36,8 @@ class TestLoggingConfiguration { void outputMode(String mode) { outputMode = mode.toUpperCase() as OutputMode } + + void showNumFailuresAtEnd(int n) { + showNumFailuresAtEnd = n + } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy index 22a449cb5f1..b56a22ee2d9 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy @@ -48,9 +48,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv /** Format line for JVM ID string. */ String jvmIdFormat - /** Summarize the first N failures at the end. */ - int showNumFailuresAtEnd = 3 - /** Output stream that logs messages to the given logger */ LoggingOutputStream outStream LoggingOutputStream errStream @@ -110,13 +107,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv @Subscribe void onQuit(AggregatedQuitEvent e) throws IOException { - if (showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { + if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { List sublist = this.failedTests StringBuilder b = new StringBuilder() b.append('Tests with failures') - if (sublist.size() > showNumFailuresAtEnd) { - sublist = sublist.subList(0, showNumFailuresAtEnd) - b.append(" (first " + showNumFailuresAtEnd + " out of " + failedTests.size() + ")") + if (sublist.size() > config.showNumFailuresAtEnd) { + sublist = sublist.subList(0, config.showNumFailuresAtEnd) + b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")") } b.append(':\n') for (Description description : sublist) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 4c24710fa23..42098a59510 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -365,6 +365,7 @@ class BuildPlugin implements Plugin { enableSystemAssertions false testLogging { + showNumFailuresAtEnd 25 slowTests { heartbeat 10 summarySize 5 From 44f21b24d7370849e749c803689b2dce33a22839 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 30 Nov 2015 17:22:58 -0500 Subject: [PATCH 20/40] Fix unit tests to bind to port 0. I will followup with ITs and other modules. By fixing this, these tests become more reliable (will never sporatically fail due to other stuff on your machine: ports are assigned by the OS), and it allows us to move forward with gradle parallel builds, in my tests this is a nice speedup, but we can't do it until tests are cleaned up --- .../netty/NettyHttpServerPipeliningTests.java | 10 +- .../HttpPipeliningHandlerTests.java | 13 +- .../NettySizeHeaderFrameDecoderTests.java | 6 +- .../netty/NettyScheduledPingTests.java | 4 +- .../netty/NettyTransportMultiPortTests.java | 155 ++++-------------- .../netty/SimpleNettyTransportTests.java | 4 +- 6 files changed, 52 insertions(+), 140 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java index b675d29c9da..95cb5b46b5f 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTests.java @@ -91,7 +91,10 @@ public class NettyHttpServerPipeliningTests extends ESTestCase { } public void testThatHttpPipeliningWorksWhenEnabled() throws Exception { - Settings settings = settingsBuilder().put("http.pipelining", true).build(); + Settings settings = settingsBuilder() + .put("http.pipelining", true) + .put("http.port", "0") + .build(); httpServerTransport = new CustomNettyHttpServerTransport(settings); httpServerTransport.start(); InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses()); @@ -105,7 +108,10 @@ public class NettyHttpServerPipeliningTests extends ESTestCase { } public void testThatHttpPipeliningCanBeDisabled() throws Exception { - Settings settings = settingsBuilder().put("http.pipelining", false).build(); + Settings settings = settingsBuilder() + .put("http.pipelining", false) + .put("http.port", "0") + .build(); httpServerTransport = new CustomNettyHttpServerTransport(settings); httpServerTransport.start(); InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses()); diff --git a/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTests.java b/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTests.java index 166d394a9cb..28cdd241e15 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTests.java @@ -76,8 +76,6 @@ public class HttpPipeliningHandlerTests extends ESTestCase { private static final long RESPONSE_TIMEOUT = 10000L; private static final long CONNECTION_TIMEOUT = 10000L; private static final String CONTENT_TYPE_TEXT = "text/plain; charset=UTF-8"; - // TODO make me random - private static final InetSocketAddress HOST_ADDR = new InetSocketAddress(InetAddress.getLoopbackAddress(), 9080); private static final String PATH1 = "/1"; private static final String PATH2 = "/2"; private static final String SOME_RESPONSE_TEXT = "some response for "; @@ -90,6 +88,8 @@ public class HttpPipeliningHandlerTests extends ESTestCase { private HashedWheelTimer timer; + private InetSocketAddress boundAddress; + @Before public void startBootstraps() { clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory()); @@ -118,7 +118,8 @@ public class HttpPipeliningHandlerTests extends ESTestCase { } }); - serverBootstrap.bind(HOST_ADDR); + Channel channel = serverBootstrap.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); + boundAddress = (InetSocketAddress) channel.getLocalAddress(); timer = new HashedWheelTimer(); } @@ -137,7 +138,7 @@ public class HttpPipeliningHandlerTests extends ESTestCase { responsesIn = new CountDownLatch(1); responses.clear(); - final ChannelFuture connectionFuture = clientBootstrap.connect(HOST_ADDR); + final ChannelFuture connectionFuture = clientBootstrap.connect(boundAddress); assertTrue(connectionFuture.await(CONNECTION_TIMEOUT)); final Channel clientChannel = connectionFuture.getChannel(); @@ -145,11 +146,11 @@ public class HttpPipeliningHandlerTests extends ESTestCase { // NetworkAddress.formatAddress makes a proper HOST header. final HttpRequest request1 = new DefaultHttpRequest( HTTP_1_1, HttpMethod.GET, PATH1); - request1.headers().add(HOST, NetworkAddress.formatAddress(HOST_ADDR)); + request1.headers().add(HOST, NetworkAddress.formatAddress(boundAddress)); final HttpRequest request2 = new DefaultHttpRequest( HTTP_1_1, HttpMethod.GET, PATH2); - request2.headers().add(HOST, NetworkAddress.formatAddress(HOST_ADDR)); + request2.headers().add(HOST, NetworkAddress.formatAddress(boundAddress)); clientChannel.write(request1); clientChannel.write(request2); diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index c8a566a7763..3f140b388fd 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -50,7 +50,11 @@ import static org.hamcrest.Matchers.is; */ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { - private final Settings settings = settingsBuilder().put("name", "foo").put("transport.host", "127.0.0.1").build(); + private final Settings settings = settingsBuilder() + .put("name", "foo") + .put("transport.host", "127.0.0.1") + .put("transport.tcp.port", "0") + .build(); private ThreadPool threadPool; private NettyTransport nettyTransport; diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index afb4d1d75fc..7a939a5a1bc 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -49,9 +49,7 @@ public class NettyScheduledPingTests extends ESTestCase { public void testScheduledPing() throws Exception { ThreadPool threadPool = new ThreadPool(getClass().getName()); - int startPort = 11000 + randomIntBetween(0, 255); - int endPort = startPort + 10; - Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", startPort + "-" + endPort).build(); + Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", 0).build(); final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry()); MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 02819525faf..1c8869772e2 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.transport.netty; -import com.carrotsearch.hppc.IntHashSet; - import org.elasticsearch.Version; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.common.component.Lifecycle; @@ -27,176 +25,115 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.rule.RepeatOnExceptionRule; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.TransportService; import org.junit.Before; -import org.junit.Rule; - -import java.io.IOException; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.net.Socket; -import java.nio.charset.StandardCharsets; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.is; public class NettyTransportMultiPortTests extends ESTestCase { - private static final int MAX_RETRIES = 10; private String host; - @Rule - public RepeatOnExceptionRule repeatOnBindExceptionRule = new RepeatOnExceptionRule(logger, MAX_RETRIES, BindTransportException.class); - @Before public void setup() { - if (randomBoolean()) { - host = "localhost"; + if (NetworkUtils.SUPPORTS_V6 && randomBoolean()) { + host = "::1"; } else { - if (NetworkUtils.SUPPORTS_V6 && randomBoolean()) { - host = "::1"; - } else { - host = "127.0.0.1"; - } + host = "127.0.0.1"; } } public void testThatNettyCanBindToMultiplePorts() throws Exception { - int[] ports = getRandomPorts(3); - Settings settings = settingsBuilder() .put("network.host", host) - .put("transport.tcp.port", ports[0]) - .put("transport.profiles.default.port", ports[1]) - .put("transport.profiles.client1.port", ports[2]) + .put("transport.tcp.port", 22) // will not actually bind to this + .put("transport.profiles.default.port", 0) + .put("transport.profiles.client1.port", 0) .build(); ThreadPool threadPool = new ThreadPool("tst"); - try (NettyTransport ignored = startNettyTransport(settings, threadPool)) { - assertConnectionRefused(ports[0]); - assertPortIsBound(ports[1]); - assertPortIsBound(ports[2]); + try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + assertEquals(1, transport.profileBoundAddresses().size()); + assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { terminate(threadPool); } } public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception { - int[] ports = getRandomPorts(2); - Settings settings = settingsBuilder() .put("network.host", host) - .put("transport.tcp.port", ports[0]) - .put("transport.profiles.client1.port", ports[1]) + .put("transport.tcp.port", 0) + .put("transport.profiles.client1.port", 0) .build(); ThreadPool threadPool = new ThreadPool("tst"); - try (NettyTransport ignored = startNettyTransport(settings, threadPool)) { - assertPortIsBound(ports[0]); - assertPortIsBound(ports[1]); + try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + assertEquals(1, transport.profileBoundAddresses().size()); + assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { terminate(threadPool); } } public void testThatProfileWithoutPortSettingsFails() throws Exception { - int[] ports = getRandomPorts(1); Settings settings = settingsBuilder() .put("network.host", host) - .put("transport.tcp.port", ports[0]) + .put("transport.tcp.port", 0) .put("transport.profiles.client1.whatever", "foo") .build(); ThreadPool threadPool = new ThreadPool("tst"); - try (NettyTransport ignored = startNettyTransport(settings, threadPool)) { - assertPortIsBound(ports[0]); + try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + assertEquals(0, transport.profileBoundAddresses().size()); + assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { terminate(threadPool); } } public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception { - int[] ports = getRandomPorts(3); - Settings settings = settingsBuilder() .put("network.host", host) - .put("transport.tcp.port", ports[0]) - .put("transport.netty.port", ports[1]) - .put("transport.profiles.default.port", ports[2]) + .put("transport.tcp.port", 22) // will not actually bind to this + .put("transport.netty.port", 23) // will not actually bind to this + .put("transport.profiles.default.port", 0) .build(); ThreadPool threadPool = new ThreadPool("tst"); - try (NettyTransport ignored = startNettyTransport(settings, threadPool)) { - assertConnectionRefused(ports[0]); - assertConnectionRefused(ports[1]); - assertPortIsBound(ports[2]); + try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + assertEquals(0, transport.profileBoundAddresses().size()); + assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { terminate(threadPool); } } public void testThatProfileWithoutValidNameIsIgnored() throws Exception { - int[] ports = getRandomPorts(3); - Settings settings = settingsBuilder() .put("network.host", host) - .put("transport.tcp.port", ports[0]) + .put("transport.tcp.port", 0) // mimics someone trying to define a profile for .local which is the profile for a node request to itself - .put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", ports[1]) - .put("transport.profiles..port", ports[2]) + .put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", 22) // will not actually bind to this + .put("transport.profiles..port", 23) // will not actually bind to this .build(); ThreadPool threadPool = new ThreadPool("tst"); - try (NettyTransport ignored = startNettyTransport(settings, threadPool)) { - assertPortIsBound(ports[0]); - assertConnectionRefused(ports[1]); - assertConnectionRefused(ports[2]); + try (NettyTransport transport = startNettyTransport(settings, threadPool)) { + assertEquals(0, transport.profileBoundAddresses().size()); + assertEquals(1, transport.boundAddress().boundAddresses().length); } finally { terminate(threadPool); } } - private int[] getRandomPorts(int numberOfPorts) { - IntHashSet ports = new IntHashSet(); - - int nextPort = randomIntBetween(49152, 65535); - for (int i = 0; i < numberOfPorts; i++) { - boolean foundPortInRange = false; - while (!foundPortInRange) { - if (!ports.contains(nextPort)) { - logger.debug("looking to see if port [{}]is available", nextPort); - try (ServerSocket serverSocket = new ServerSocket()) { - // Set SO_REUSEADDR as we may bind here and not be able - // to reuse the address immediately without it. - serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress()); - serverSocket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), nextPort)); - - // bind was a success - logger.debug("port [{}] available.", nextPort); - foundPortInRange = true; - ports.add(nextPort); - } catch (IOException e) { - // Do nothing - logger.debug("port [{}] not available.", e, nextPort); - } - } - nextPort = randomIntBetween(49152, 65535); - } - } - return ports.toArray(); - } - private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) { BigArrays bigArrays = new MockBigArrays(new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); @@ -206,36 +143,4 @@ public class NettyTransportMultiPortTests extends ESTestCase { assertThat(nettyTransport.lifecycleState(), is(Lifecycle.State.STARTED)); return nettyTransport; } - - private void assertConnectionRefused(int port) throws Exception { - try { - trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName(host), port).address()); - fail("Expected to get exception when connecting to port " + port); - } catch (IOException e) { - // expected - logger.info("Got expected connection message {}", e.getMessage()); - } - } - - private void assertPortIsBound(int port) throws Exception { - assertPortIsBound(host, port); - } - - private void assertPortIsBound(String host, int port) throws Exception { - logger.info("Trying to connect to [{}]:[{}]", host, port); - trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName(host), port).address()); - } - - private void trySocketConnection(InetSocketAddress address) throws Exception { - try (Socket socket = new Socket()) { - logger.info("Connecting to {}", address); - socket.connect(address, 500); - - assertThat(socket.isConnected(), is(true)); - try (OutputStream os = socket.getOutputStream()) { - os.write("foo".getBytes(StandardCharsets.UTF_8)); - os.flush(); - } - } - } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java b/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java index c18597ff8d4..89702118b49 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java @@ -38,9 +38,7 @@ import static org.hamcrest.Matchers.containsString; public class SimpleNettyTransportTests extends AbstractSimpleTransportTestCase { @Override protected MockTransportService build(Settings settings, Version version, NamedWriteableRegistry namedWriteableRegistry) { - int startPort = 11000 + randomIntBetween(0, 255); - int endPort = startPort + 10; - settings = Settings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build(); + settings = Settings.builder().put(settings).put("transport.tcp.port", "0").build(); MockTransportService transportService = new MockTransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, version, namedWriteableRegistry), threadPool); transportService.start(); return transportService; From e256c6fdd145116c4e06b363be33a4a3b7be5845 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 1 Dec 2015 09:57:22 +0100 Subject: [PATCH 21/40] AwaitsFix testDynamicUpdates pending on https://github.com/elastic/elasticsearch/issues/15129 --- .../mapping/UpdateMappingIntegrationIT.java | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 305688b3a6e..75d4a70320e 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -22,8 +22,8 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; @@ -43,22 +43,15 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; @ClusterScope(randomDynamicTemplates = false) public class UpdateMappingIntegrationIT extends ESIntegTestCase { + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/15129") public void testDynamicUpdates() throws Exception { client().admin().indices().prepareCreate("test") .setSettings( From e2f87b0c5295935d5f0b6c4c9350f30a01650b80 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Mon, 30 Nov 2015 16:14:37 +0100 Subject: [PATCH 22/40] multi field names may not contain dots related to #14957 --- .../index/mapper/core/TypeParsers.java | 3 ++ .../mapper/multifield/MultiFieldTests.java | 46 +++++++++++++------ 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index 3f142cc2f9c..0bb0b213f64 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -319,6 +319,9 @@ public class TypeParsers { for (Map.Entry multiFieldEntry : multiFieldsPropNodes.entrySet()) { String multiFieldName = multiFieldEntry.getKey(); + if (multiFieldName.contains(".")) { + throw new MapperParsingException("Field name [" + multiFieldName + "] which is a multi field of [" + name + "] cannot contain '.'"); + } if (!(multiFieldEntry.getValue() instanceof Map)) { throw new MapperParsingException("illegal field [" + multiFieldName + "], only fields can be specified inside fields"); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index 8a6b183b11c..a5a073d147f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -31,34 +31,24 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.ParseContext.Document; -import org.elasticsearch.index.mapper.core.CompletionFieldMapper; -import org.elasticsearch.index.mapper.core.DateFieldMapper; -import org.elasticsearch.index.mapper.core.LongFieldMapper; -import org.elasticsearch.index.mapper.core.StringFieldMapper; -import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; +import org.elasticsearch.index.mapper.core.*; import org.elasticsearch.index.mapper.geo.BaseGeoPointFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.VersionUtils; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Map; import java.util.TreeMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.mapper.MapperBuilders.doc; -import static org.elasticsearch.index.mapper.MapperBuilders.rootObject; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; +import static org.elasticsearch.index.mapper.MapperBuilders.*; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.*; /** * @@ -526,4 +516,30 @@ public class MultiFieldTests extends ESSingleNodeTestCase { assertTrue(e.getMessage().contains("cannot be used in multi field")); } } + + public void testMultiFieldWithDot() throws IOException { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject() + .startObject("my_type") + .startObject("properties") + .startObject("city") + .field("type", "string") + .startObject("fields") + .startObject("raw.foo") + .field("type", "string") + .field("index", "not_analyzed") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + + MapperService mapperService = createIndex("test").mapperService(); + try { + mapperService.documentMapperParser().parse(mapping.string()); + fail("this should throw an exception because one field contains a dot"); + } catch (MapperParsingException e) { + assertThat(e.getMessage(), equalTo("Field name [raw.foo] which is a multi field of [city] cannot contain '.'")); + } + } } From 059a675aa5aab0a2993adbfe6456da38fc480b90 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 1 Dec 2015 11:40:20 +0100 Subject: [PATCH 23/40] [TEST] mark test as awaitsfix: RareClusterStateIT.testDeleteCreateInOneBulk() --- .../java/org/elasticsearch/indices/state/RareClusterStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index ab3f825cecc..96611aeca8a 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -161,6 +161,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @TestLogging("cluster.service:TRACE") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932") public void testDeleteCreateInOneBulk() throws Exception { internalCluster().startNodesAsync(2, Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen") From 854099f1d51ca3c9a8b66c6c40c4c45d7dd8ff84 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 28 Nov 2015 15:15:52 +0100 Subject: [PATCH 24/40] Reject refresh usage in bulk items when using and fix NPE when no source The REST bulk API rejects use of `refresh` at the item level. But the Java API lets the user setting it. We need to have the same behavior and don't let think the user he can define `refresh` per bulk item. Note that the user can still define `refresh` on the bulk itself. Also a user can create with Java API an IndexRequest without any source which is causing a NPE when evaluating the bulk item size. Closes #7361. Closes #15120. --- .../action/bulk/BulkRequest.java | 24 ++++++++--- .../action/bulk/BulkRequestTests.java | 40 +++++++++++++++++-- 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 260fd5e732d..2eadbb5a6b2 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -46,9 +46,10 @@ import java.util.List; import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes - * it in a single batch. + * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s + * and allows to executes it in a single batch. * + * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ public class BulkRequest extends ActionRequest implements CompositeIndicesRequest { @@ -89,6 +90,12 @@ public class BulkRequest extends ActionRequest implements Composite return add(request, null); } + /** + * Add a request to the current BulkRequest. + * @param request Request to add + * @param payload Optional payload + * @return the current bulk request + */ public BulkRequest add(ActionRequest request, @Nullable Object payload) { if (request instanceof IndexRequest) { add((IndexRequest) request, payload); @@ -127,7 +134,8 @@ public class BulkRequest extends ActionRequest implements Composite BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { requests.add(request); addPayload(payload); - sizeInBytes += request.source().length() + REQUEST_OVERHEAD; + // lack of source is validated in validate() method + sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD; return this; } @@ -478,8 +486,14 @@ public class BulkRequest extends ActionRequest implements Composite if (requests.isEmpty()) { validationException = addValidationError("no requests added", validationException); } - for (int i = 0; i < requests.size(); i++) { - ActionRequestValidationException ex = requests.get(i).validate(); + for (ActionRequest request : requests) { + // We first check if refresh has been set + if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) || + (request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) || + (request instanceof IndexRequest && ((IndexRequest)request).refresh())) { + validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException); + } + ActionRequestValidationException ex = request.validate(); if (ex != null) { if (validationException == null) { validationException = new ActionRequestValidationException(); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 0242edae317..78f96bab7b2 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; @@ -36,9 +37,7 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.*; public class BulkRequestTests extends ESTestCase { public void testSimpleBulk1() throws Exception { @@ -171,4 +170,39 @@ public class BulkRequestTests extends ESTestCase { bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null); assertThat(bulkRequest.numberOfActions(), equalTo(9)); } + + // issue 7361 + public void testBulkRequestWithRefresh() throws Exception { + BulkRequest bulkRequest = new BulkRequest(); + // We force here a "id is missing" validation error + bulkRequest.add(new DeleteRequest("index", "type", null).refresh(true)); + // We force here a "type is missing" validation error + bulkRequest.add(new DeleteRequest("index", null, "id")); + bulkRequest.add(new DeleteRequest("index", "type", "id").refresh(true)); + bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").refresh(true)); + bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").refresh(true)); + ActionRequestValidationException validate = bulkRequest.validate(); + assertThat(validate, notNullValue()); + assertThat(validate.validationErrors(), not(empty())); + assertThat(validate.validationErrors(), contains( + "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", + "id is missing", + "type is missing", + "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", + "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", + "Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.")); + } + + // issue 15120 + public void testBulkNoSource() throws Exception { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new UpdateRequest("index", "type", "id")); + bulkRequest.add(new IndexRequest("index", "type", "id")); + ActionRequestValidationException validate = bulkRequest.validate(); + assertThat(validate, notNullValue()); + assertThat(validate.validationErrors(), not(empty())); + assertThat(validate.validationErrors(), contains( + "script or doc is missing", + "source is missing")); + } } From d17d62942285723e7255935b74a19f768b500e82 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 1 Dec 2015 12:12:56 +0100 Subject: [PATCH 25/40] [TEST] stabilize SimpleSearchIT.testQueryNumericFieldWithRegex Provide mappings explicitly instead of relying on dynamic mapping, also add missing ensureGreen. Closes #15105 --- .../org/elasticsearch/search/simple/SimpleSearchIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 8a81c49f71c..27cc3d3cfb8 100644 --- a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -333,9 +333,9 @@ public class SimpleSearchIT extends ESIntegTestCase { } public void testQueryNumericFieldWithRegex() throws Exception { - createIndex("idx"); - indexRandom(true, client().prepareIndex("idx", "type").setSource("num", 34)); - + assertAcked(prepareCreate("idx").addMapping("type", "num", "type=integer")); + ensureGreen("idx"); + try { client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get(); fail("SearchPhaseExecutionException should have been thrown"); From cebd7bdd7f5eddb80b26afc0f19301520d6e6c68 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 1 Dec 2015 12:22:07 +0100 Subject: [PATCH 26/40] Mappings: Don't ignore merge failures. --- .../index/mapper/MapperService.java | 11 ++- .../mapper/update/UpdateMappingTests.java | 96 +++++++++++++++++++ 2 files changed, 102 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index f617dd5c6f0..90909737805 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -250,13 +250,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable { DocumentMapper oldMapper = mappers.get(mapper.type()); if (oldMapper != null) { - MergeResult result = oldMapper.merge(mapper.mapping(), false, updateAllTypes); + // simulate first + MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes); if (result.hasConflicts()) { - // TODO: What should we do??? - if (logger.isDebugEnabled()) { - logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.buildConflicts())); - } + throw new MergeMappingException(result.buildConflicts()); } + // then apply for real + result = oldMapper.merge(mapper.mapping(), false, updateAllTypes); + assert result.hasConflicts() == false; // we already simulated return oldMapper; } else { List newObjectMappers = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index 5149ab10575..7c15875bc11 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -29,7 +29,9 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.MergeResult; +import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -107,6 +109,100 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); } + public void testConflictSameType() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("foo").field("type", "long").endObject() + .endObject().endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping).mapperService(); + + XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("foo").field("type", "double").endObject() + .endObject().endObject().endObject(); + + try { + mapperService.merge("type", new CompressedXContent(update.string()), false, false); + fail(); + } catch (MergeMappingException e) { + // expected + } + + try { + mapperService.merge("type", new CompressedXContent(update.string()), false, false); + fail(); + } catch (MergeMappingException e) { + // expected + } + + assertTrue(mapperService.documentMapper("type").mapping().root().getMapper("foo") instanceof LongFieldMapper); + } + + public void testConflictNewType() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("foo").field("type", "long").endObject() + .endObject().endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build(), "type1", mapping).mapperService(); + + XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties").startObject("foo").field("type", "double").endObject() + .endObject().endObject().endObject(); + + try { + mapperService.merge("type2", new CompressedXContent(update.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + assertTrue(e.getMessage().contains("conflicts with existing mapping in other types")); + } + + try { + mapperService.merge("type2", new CompressedXContent(update.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + assertTrue(e.getMessage().contains("conflicts with existing mapping in other types")); + } + + assertTrue(mapperService.documentMapper("type1").mapping().root().getMapper("foo") instanceof LongFieldMapper); + assertNull(mapperService.documentMapper("type2")); + } + + // same as the testConflictNewType except that the mapping update is on an existing type + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/15049") + public void testConflictNewTypeUpdate() throws Exception { + XContentBuilder mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("foo").field("type", "long").endObject() + .endObject().endObject().endObject(); + XContentBuilder mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build()).mapperService(); + + mapperService.merge("type1", new CompressedXContent(mapping1.string()), false, false); + mapperService.merge("type2", new CompressedXContent(mapping2.string()), false, false); + + XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties").startObject("foo").field("type", "double").endObject() + .endObject().endObject().endObject(); + + try { + mapperService.merge("type2", new CompressedXContent(update.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + assertTrue(e.getMessage().contains("conflicts with existing mapping in other types")); + } + + try { + mapperService.merge("type2", new CompressedXContent(update.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + assertTrue(e.getMessage().contains("conflicts with existing mapping in other types")); + } + + assertTrue(mapperService.documentMapper("type1").mapping().root().getMapper("foo") instanceof LongFieldMapper); + assertNotNull(mapperService.documentMapper("type2")); + assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo")); + } + public void testIndexFieldParsingBackcompat() throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build()); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); From eea72a6d8652dac931b42fb075740753bad219ea Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 30 Nov 2015 18:24:03 -0500 Subject: [PATCH 27/40] Preserve existing mappings on batch mapping updates This commit addresses an issues introduced in #14899 to apply mapping updates in batches. The issue is that an existing mapping for a type could be lost if that type came in a batch that already contained a mapping update for another type on the same index. The underlying issue was that the existing mapping would not be merged in because the merging logic was only tripped once per index, rather than for all types seeing updates for each index. Resolving this issue is simply a matter of ensuring that all existing types seeing updates are merged in. Closes #15129 --- .../cluster/metadata/MetaDataMappingService.java | 14 +++++++++----- .../mapping/UpdateMappingIntegrationIT.java | 1 - 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index fb6ed1f0753..d19a087faa6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -221,9 +221,8 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - List indicesToClose = new ArrayList<>(); + Set indicesToClose = new HashSet<>(); BatchResult.Builder builder = BatchResult.builder(); - Map executionResults = new HashMap<>(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { @@ -231,10 +230,15 @@ public class MetaDataMappingService extends AbstractComponent { for (String index : request.indices()) { if (currentState.metaData().hasIndex(index)) { // if we don't have the index, we will throw exceptions later; - if (indicesService.hasIndex(index) == false) { + if (indicesService.hasIndex(index) == false || indicesToClose.contains(index)) { final IndexMetaData indexMetaData = currentState.metaData().index(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); - indicesToClose.add(indexMetaData.getIndex()); + IndexService indexService; + if (indicesService.hasIndex(index) == false) { + indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); + indicesToClose.add(index); + } else { + indexService = indicesService.indexService(index); + } // make sure to add custom default mapping if exists if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 75d4a70320e..68902fd22f8 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -51,7 +51,6 @@ import static org.hamcrest.Matchers.*; @ClusterScope(randomDynamicTemplates = false) public class UpdateMappingIntegrationIT extends ESIntegTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/15129") public void testDynamicUpdates() throws Exception { client().admin().indices().prepareCreate("test") .setSettings( From 09006ace115b019c8e71223dc1c5990b46fd62ff Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 1 Dec 2015 07:24:19 -0500 Subject: [PATCH 28/40] Add the default mapping at most once on batch mapping updates When creating an index on master for the purpose of updating mappings, the default mapping could needlessly be added multiple times. This commit ensures that the default mapping is added at most once while preparing to update mappings. --- .../cluster/metadata/MetaDataMappingService.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index d19a087faa6..96370423ebe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -234,15 +234,15 @@ public class MetaDataMappingService extends AbstractComponent { final IndexMetaData indexMetaData = currentState.metaData().index(index); IndexService indexService; if (indicesService.hasIndex(index) == false) { - indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); indicesToClose.add(index); + indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); + // make sure to add custom default mapping if exists + if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { + indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); + } } else { indexService = indicesService.indexService(index); } - // make sure to add custom default mapping if exists - if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { - indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes()); - } // only add the current relevant mapping (if exists) if (indexMetaData.getMappings().containsKey(request.type())) { indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes()); From 13dbed9c92ef7c2ddc9da59c725cd2d8b6ff131e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 1 Dec 2015 07:43:45 -0500 Subject: [PATCH 29/40] Add each mapping at most once on batch mapping updates When creating an index on master for the purpose of updating mappings, a mapping being updated could needlessly be merged multiple times. This commit ensures that each mapping is merged at most once while preparing to update mappings. --- .../cluster/metadata/MetaDataMappingService.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 96370423ebe..c2c05241679 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -243,8 +243,9 @@ public class MetaDataMappingService extends AbstractComponent { } else { indexService = indicesService.indexService(index); } - // only add the current relevant mapping (if exists) - if (indexMetaData.getMappings().containsKey(request.type())) { + // only add the current relevant mapping (if exists and not yet added) + if (indexMetaData.getMappings().containsKey(request.type()) && + !indexService.mapperService().hasMapping(request.type())) { indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes()); } } From c67a33248688f603cd04c8b996f4de3cbd8ea1ba Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 1 Dec 2015 13:42:06 +0100 Subject: [PATCH 30/40] Query DSL: Enforce distance is greater than 0 in geo distance query Validation is not done as part of the distance setter method and tested in GeoDistanceQueryBuilderTests. Fixed GeoDistanceTests to adapt to the new validation. Closes #15135 --- .../index/query/GeoDistanceQueryBuilder.java | 8 ++- .../query/GeoDistanceQueryBuilderTests.java | 58 ++++++++++--------- .../messy/tests/GeoDistanceTests.java | 4 +- 3 files changed, 40 insertions(+), 30 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 58d1c4b703c..823362140a1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -128,7 +128,11 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder { @@ -86,7 +84,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase Date: Thu, 26 Nov 2015 19:45:33 +0100 Subject: [PATCH 31/40] Refactor HighlighterBuilder This change pulls out the common fields that HighlighterBuilder shares with its nested Field class into a new abstract CommonHighlighterOptions superclass which also gets equals() and hashCode() method and methods to serialize the common fields to a StreamOutput and read them from a stream. Relates to #15044 --- .../search/builder/SearchSourceBuilder.java | 2 +- .../highlight/AbstractHighlighterBuilder.java | 509 +++++++++++++++ .../search/highlight/HighlightBuilder.java | 602 ++++-------------- .../highlight/HighlighterParseElement.java | 40 +- .../highlight/HighlightBuilderTests.java | 332 ++++++++++ 5 files changed, 1013 insertions(+), 472 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java create mode 100644 core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index b9663e4a0a0..7963b678fb3 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -408,7 +408,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); - highlightBuilder.innerXContent(builder, EMPTY_PARAMS); + highlightBuilder.innerXContent(builder); builder.endObject(); this.highlightBuilder = builder.bytes(); return this; diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java new file mode 100644 index 00000000000..b10e2e8f58f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java @@ -0,0 +1,509 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.apache.lucene.search.highlight.SimpleFragmenter; +import org.apache.lucene.search.highlight.SimpleSpanFragmenter; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * This abstract class holds parameters shared by {@link HighlightBuilder} and {@link HighlightBuilder.Field} + * and provides the common setters, equality, hashCode calculation and common serialization + */ +public abstract class AbstractHighlighterBuilder { + + protected String[] preTags; + + protected String[] postTags; + + protected Integer fragmentSize; + + protected Integer numOfFragments; + + protected String highlighterType; + + protected String fragmenter; + + protected QueryBuilder highlightQuery; + + protected String order; + + protected Boolean highlightFilter; + + protected Boolean forceSource; + + protected Integer boundaryMaxScan; + + protected char[] boundaryChars; + + protected Integer noMatchSize; + + protected Integer phraseLimit; + + protected Map options; + + protected Boolean requireFieldMatch; + + /** + * Set the pre tags that will be used for highlighting. + */ + @SuppressWarnings("unchecked") + public HB preTags(String... preTags) { + this.preTags = preTags; + return (HB) this; + } + + /** + * @return the value set by {@link #preTags(String...)} + */ + public String[] preTags() { + return this.preTags; + } + + /** + * Set the post tags that will be used for highlighting. + */ + @SuppressWarnings("unchecked") + public HB postTags(String... postTags) { + this.postTags = postTags; + return (HB) this; + } + + /** + * @return the value set by {@link #postTags(String...)} + */ + public String[] postTags() { + return this.postTags; + } + + /** + * Set the fragment size in characters, defaults to {@link HighlighterParseElement#DEFAULT_FRAGMENT_CHAR_SIZE} + */ + @SuppressWarnings("unchecked") + public HB fragmentSize(Integer fragmentSize) { + this.fragmentSize = fragmentSize; + return (HB) this; + } + + /** + * @return the value set by {@link #fragmentSize(Integer)} + */ + public Integer fragmentSize() { + return this.fragmentSize; + } + + /** + * Set the number of fragments, defaults to {@link HighlighterParseElement#DEFAULT_NUMBER_OF_FRAGMENTS} + */ + @SuppressWarnings("unchecked") + public HB numOfFragments(Integer numOfFragments) { + this.numOfFragments = numOfFragments; + return (HB) this; + } + + /** + * @return the value set by {@link #numOfFragments(Integer)} + */ + public Integer numOfFragments() { + return this.numOfFragments; + } + + /** + * Set type of highlighter to use. Out of the box supported types + * are plain, fvh and postings. + * The default option selected is dependent on the mappings defined for your index. + * Details of the different highlighter types are covered in the reference guide. + */ + @SuppressWarnings("unchecked") + public HB highlighterType(String highlighterType) { + this.highlighterType = highlighterType; + return (HB) this; + } + + /** + * @return the value set by {@link #highlighterType(String)} + */ + public String highlighterType() { + return this.highlighterType; + } + + /** + * Sets what fragmenter to use to break up text that is eligible for highlighting. + * This option is only applicable when using the plain highlighterType highlighter. + * Permitted values are "simple" or "span" relating to {@link SimpleFragmenter} and + * {@link SimpleSpanFragmenter} implementations respectively with the default being "span" + */ + @SuppressWarnings("unchecked") + public HB fragmenter(String fragmenter) { + this.fragmenter = fragmenter; + return (HB) this; + } + + /** + * @return the value set by {@link #fragmenter(String)} + */ + public String fragmenter() { + return this.fragmenter; + } + + /** + * Sets a query to be used for highlighting instead of the search query. + */ + @SuppressWarnings("unchecked") + public HB highlightQuery(QueryBuilder highlightQuery) { + this.highlightQuery = highlightQuery; + return (HB) this; + } + + /** + * @return the value set by {@link #highlightQuery(QueryBuilder)} + */ + public QueryBuilder highlightQuery() { + return this.highlightQuery; + } + + /** + * The order of fragments per field. By default, ordered by the order in the + * highlighted text. Can be score, which then it will be ordered + * by score of the fragments. + */ + @SuppressWarnings("unchecked") + public HB order(String order) { + this.order = order; + return (HB) this; + } + + /** + * @return the value set by {@link #order(String)} + */ + public String order() { + return this.order; + } + + /** + * Set this to true when using the highlighterType fvh + * and you want to provide highlighting on filter clauses in your + * query. Default is false. + */ + @SuppressWarnings("unchecked") + public HB highlightFilter(Boolean highlightFilter) { + this.highlightFilter = highlightFilter; + return (HB) this; + } + + /** + * @return the value set by {@link #highlightFilter(Boolean)} + */ + public Boolean highlightFilter() { + return this.highlightFilter; + } + + /** + * When using the highlighterType fvh this setting + * controls how far to look for boundary characters, and defaults to 20. + */ + @SuppressWarnings("unchecked") + public HB boundaryMaxScan(Integer boundaryMaxScan) { + this.boundaryMaxScan = boundaryMaxScan; + return (HB) this; + } + + /** + * @return the value set by {@link #boundaryMaxScan(Integer)} + */ + public Integer boundaryMaxScan() { + return this.boundaryMaxScan; + } + + /** + * When using the highlighterType fvh this setting + * defines what constitutes a boundary for highlighting. It’s a single string with + * each boundary character defined in it. It defaults to .,!? \t\n + */ + @SuppressWarnings("unchecked") + public HB boundaryChars(char[] boundaryChars) { + this.boundaryChars = boundaryChars; + return (HB) this; + } + + /** + * @return the value set by {@link #boundaryChars(char[])} + */ + public char[] boundaryChars() { + return this.boundaryChars; + } + + /** + * Allows to set custom options for custom highlighters. + */ + @SuppressWarnings("unchecked") + public HB options(Map options) { + this.options = options; + return (HB) this; + } + + /** + * @return the value set by {@link #options(Map)} + */ + public Map options() { + return this.options; + } + + /** + * Set to true to cause a field to be highlighted only if a query matches that field. + * Default is false meaning that terms are highlighted on all requested fields regardless + * if the query matches specifically on them. + */ + @SuppressWarnings("unchecked") + public HB requireFieldMatch(Boolean requireFieldMatch) { + this.requireFieldMatch = requireFieldMatch; + return (HB) this; + } + + /** + * @return the value set by {@link #requireFieldMatch(Boolean)} + */ + public Boolean requireFieldMatch() { + return this.requireFieldMatch; + } + + /** + * Sets the size of the fragment to return from the beginning of the field if there are no matches to + * highlight and the field doesn't also define noMatchSize. + * @param noMatchSize integer to set or null to leave out of request. default is null. + * @return this for chaining + */ + @SuppressWarnings("unchecked") + public HB noMatchSize(Integer noMatchSize) { + this.noMatchSize = noMatchSize; + return (HB) this; + } + + /** + * @return the value set by {@link #noMatchSize(Integer)} + */ + public Integer noMatchSize() { + return this.noMatchSize; + } + + /** + * Sets the maximum number of phrases the fvh will consider if the field doesn't also define phraseLimit. + * @param phraseLimit maximum number of phrases the fvh will consider + * @return this for chaining + */ + @SuppressWarnings("unchecked") + public HB phraseLimit(Integer phraseLimit) { + this.phraseLimit = phraseLimit; + return (HB) this; + } + + /** + * @return the value set by {@link #phraseLimit(Integer)} + */ + public Integer phraseLimit() { + return this.noMatchSize; + } + + /** + * Forces the highlighting to highlight fields based on the source even if fields are stored separately. + */ + @SuppressWarnings("unchecked") + public HB forceSource(Boolean forceSource) { + this.forceSource = forceSource; + return (HB) this; + } + + /** + * @return the value set by {@link #forceSource(Boolean)} + */ + public Boolean forceSource() { + return this.forceSource; + } + + void commonOptionsToXContent(XContentBuilder builder) throws IOException { + if (preTags != null) { + builder.array("pre_tags", preTags); + } + if (postTags != null) { + builder.array("post_tags", postTags); + } + if (fragmentSize != null) { + builder.field("fragment_size", fragmentSize); + } + if (numOfFragments != null) { + builder.field("number_of_fragments", numOfFragments); + } + if (highlighterType != null) { + builder.field("type", highlighterType); + } + if (fragmenter != null) { + builder.field("fragmenter", fragmenter); + } + if (highlightQuery != null) { + builder.field("highlight_query", highlightQuery); + } + if (order != null) { + builder.field("order", order); + } + if (highlightFilter != null) { + builder.field("highlight_filter", highlightFilter); + } + if (boundaryMaxScan != null) { + builder.field("boundary_max_scan", boundaryMaxScan); + } + if (boundaryChars != null) { + builder.field("boundary_chars", boundaryChars); + } + if (options != null && options.size() > 0) { + builder.field("options", options); + } + if (forceSource != null) { + builder.field("force_source", forceSource); + } + if (requireFieldMatch != null) { + builder.field("require_field_match", requireFieldMatch); + } + if (noMatchSize != null) { + builder.field("no_match_size", noMatchSize); + } + if (phraseLimit != null) { + builder.field("phrase_limit", phraseLimit); + } + } + + @Override + public final int hashCode() { + return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize, + numOfFragments, highlighterType, fragmenter, highlightQuery, order, highlightFilter, + forceSource, boundaryMaxScan, Arrays.hashCode(boundaryChars), noMatchSize, + phraseLimit, options, requireFieldMatch, doHashCode()); + } + + /** + * internal hashCode calculation to overwrite for the implementing classes. + */ + protected abstract int doHashCode(); + + @Override + public final boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + @SuppressWarnings("unchecked") + HB other = (HB) obj; + return Arrays.equals(preTags, other.preTags) && + Arrays.equals(postTags, other.postTags) && + Objects.equals(fragmentSize, other.fragmentSize) && + Objects.equals(numOfFragments, other.numOfFragments) && + Objects.equals(highlighterType, other.highlighterType) && + Objects.equals(fragmenter, other.fragmenter) && + Objects.equals(highlightQuery, other.highlightQuery) && + Objects.equals(order, other.order) && + Objects.equals(highlightFilter, other.highlightFilter) && + Objects.equals(forceSource, other.forceSource) && + Objects.equals(boundaryMaxScan, other.boundaryMaxScan) && + Arrays.equals(boundaryChars, other.boundaryChars) && + Objects.equals(noMatchSize, other.noMatchSize) && + Objects.equals(phraseLimit, other.phraseLimit) && + Objects.equals(options, other.options) && + Objects.equals(requireFieldMatch, other.requireFieldMatch) && + doEquals(other); + } + + /** + * internal equals to overwrite for the implementing classes. + */ + protected abstract boolean doEquals(HB other); + + /** + * read common parameters from {@link StreamInput} + */ + @SuppressWarnings("unchecked") + protected HB readOptionsFrom(StreamInput in) throws IOException { + preTags(in.readOptionalStringArray()); + postTags(in.readOptionalStringArray()); + fragmentSize(in.readOptionalVInt()); + numOfFragments(in.readOptionalVInt()); + highlighterType(in.readOptionalString()); + fragmenter(in.readOptionalString()); + if (in.readBoolean()) { + highlightQuery(in.readQuery()); + } + order(in.readOptionalString()); + highlightFilter(in.readOptionalBoolean()); + forceSource(in.readOptionalBoolean()); + boundaryMaxScan(in.readOptionalVInt()); + if (in.readBoolean()) { + boundaryChars(in.readString().toCharArray()); + } + noMatchSize(in.readOptionalVInt()); + phraseLimit(in.readOptionalVInt()); + if (in.readBoolean()) { + options(in.readMap()); + } + requireFieldMatch(in.readOptionalBoolean()); + return (HB) this; + } + + /** + * write common parameters to {@link StreamOutput} + */ + protected void writeOptionsTo(StreamOutput out) throws IOException { + out.writeOptionalStringArray(preTags); + out.writeOptionalStringArray(postTags); + out.writeOptionalVInt(fragmentSize); + out.writeOptionalVInt(numOfFragments); + out.writeOptionalString(highlighterType); + out.writeOptionalString(fragmenter); + boolean hasQuery = highlightQuery != null; + out.writeBoolean(hasQuery); + if (hasQuery) { + out.writeQuery(highlightQuery); + } + out.writeOptionalString(order); + out.writeOptionalBoolean(highlightFilter); + out.writeOptionalBoolean(forceSource); + out.writeOptionalVInt(boundaryMaxScan); + boolean hasBounaryChars = boundaryChars != null; + out.writeBoolean(hasBounaryChars); + if (hasBounaryChars) { + out.writeString(String.valueOf(boundaryChars)); + } + out.writeOptionalVInt(noMatchSize); + out.writeOptionalVInt(phraseLimit); + boolean hasOptions = options != null; + out.writeBoolean(hasOptions); + if (hasOptions) { + out.writeMap(options); + } + out.writeOptionalBoolean(requireFieldMatch); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java index b321b574d6a..dbae661fde9 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java @@ -19,16 +19,19 @@ package org.elasticsearch.search.highlight; -import org.apache.lucene.search.highlight.SimpleFragmenter; -import org.apache.lucene.search.highlight.SimpleSpanFragmenter; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; -import java.util.Map; +import java.util.Objects; /** * A builder for search highlighting. Settings can control how large fields @@ -36,46 +39,14 @@ import java.util.Map; * * @see org.elasticsearch.search.builder.SearchSourceBuilder#highlight() */ -public class HighlightBuilder implements ToXContent { +public class HighlightBuilder extends AbstractHighlighterBuilder implements Writeable, ToXContent { - private List fields; + public static final HighlightBuilder PROTOTYPE = new HighlightBuilder(); - private String tagsSchema; - - private Boolean highlightFilter; - - private Integer fragmentSize; - - private Integer numOfFragments; - - private String[] preTags; - - private String[] postTags; - - private String order; + private final List fields = new ArrayList<>(); private String encoder; - private Boolean requireFieldMatch; - - private Integer boundaryMaxScan; - - private char[] boundaryChars; - - private String highlighterType; - - private String fragmenter; - - private QueryBuilder highlightQuery; - - private Integer noMatchSize; - - private Integer phraseLimit; - - private Map options; - - private Boolean forceSource; - private boolean useExplicitFieldOrder = false; /** @@ -85,14 +56,9 @@ public class HighlightBuilder implements ToXContent { * @param name The field to highlight */ public HighlightBuilder field(String name) { - if (fields == null) { - fields = new ArrayList<>(); - } - fields.add(new Field(name)); - return this; + return field(new Field(name)); } - /** * Adds a field to be highlighted with a provided fragment size (in characters), and * default number of fragments of 5. @@ -101,11 +67,7 @@ public class HighlightBuilder implements ToXContent { * @param fragmentSize The size of a fragment in characters */ public HighlightBuilder field(String name, int fragmentSize) { - if (fields == null) { - fields = new ArrayList<>(); - } - fields.add(new Field(name).fragmentSize(fragmentSize)); - return this; + return field(new Field(name).fragmentSize(fragmentSize)); } @@ -118,14 +80,9 @@ public class HighlightBuilder implements ToXContent { * @param numberOfFragments The (maximum) number of fragments */ public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments) { - if (fields == null) { - fields = new ArrayList<>(); - } - fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments)); - return this; + return field(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments)); } - /** * Adds a field to be highlighted with a provided fragment size (in characters), and * a provided (maximum) number of fragments. @@ -136,56 +93,38 @@ public class HighlightBuilder implements ToXContent { * @param fragmentOffset The offset from the start of the fragment to the start of the highlight */ public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments, int fragmentOffset) { - if (fields == null) { - fields = new ArrayList<>(); - } - fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments) + return field(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments) .fragmentOffset(fragmentOffset)); - return this; } public HighlightBuilder field(Field field) { - if (fields == null) { - fields = new ArrayList<>(); - } fields.add(field); return this; } + public List fields() { + return this.fields; + } + /** - * Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes + * Set a tag scheme that encapsulates a built in pre and post tags. The allowed schemes * are styled and default. * * @param schemaName The tag scheme name */ public HighlightBuilder tagsSchema(String schemaName) { - this.tagsSchema = schemaName; - return this; - } - - /** - * Set this to true when using the highlighterType fvh - * and you want to provide highlighting on filter clauses in your - * query. Default is false. - */ - public HighlightBuilder highlightFilter(boolean highlightFilter) { - this.highlightFilter = highlightFilter; - return this; - } - - /** - * Sets the size of a fragment in characters (defaults to 100) - */ - public HighlightBuilder fragmentSize(Integer fragmentSize) { - this.fragmentSize = fragmentSize; - return this; - } - - /** - * Sets the maximum number of fragments returned - */ - public HighlightBuilder numOfFragments(Integer numOfFragments) { - this.numOfFragments = numOfFragments; + switch (schemaName) { + case "default": + preTags(HighlighterParseElement.DEFAULT_PRE_TAGS); + postTags(HighlighterParseElement.DEFAULT_POST_TAGS); + break; + case "styled": + preTags(HighlighterParseElement.STYLED_PRE_TAG); + postTags(HighlighterParseElement.STYLED_POST_TAGS); + break; + default: + throw new IllegalArgumentException("Unknown tag schema ["+ schemaName +"]"); + } return this; } @@ -201,125 +140,10 @@ public class HighlightBuilder implements ToXContent { } /** - * Explicitly set the pre tags that will be used for highlighting. + * Getter for {@link #encoder(String)} */ - public HighlightBuilder preTags(String... preTags) { - this.preTags = preTags; - return this; - } - - /** - * Explicitly set the post tags that will be used for highlighting. - */ - public HighlightBuilder postTags(String... postTags) { - this.postTags = postTags; - return this; - } - - /** - * The order of fragments per field. By default, ordered by the order in the - * highlighted text. Can be score, which then it will be ordered - * by score of the fragments. - */ - public HighlightBuilder order(String order) { - this.order = order; - return this; - } - - /** - * Set to true to cause a field to be highlighted only if a query matches that field. - * Default is false meaning that terms are highlighted on all requested fields regardless - * if the query matches specifically on them. - */ - public HighlightBuilder requireFieldMatch(boolean requireFieldMatch) { - this.requireFieldMatch = requireFieldMatch; - return this; - } - - /** - * When using the highlighterType fvh this setting - * controls how far to look for boundary characters, and defaults to 20. - */ - public HighlightBuilder boundaryMaxScan(Integer boundaryMaxScan) { - this.boundaryMaxScan = boundaryMaxScan; - return this; - } - - /** - * When using the highlighterType fvh this setting - * defines what constitutes a boundary for highlighting. It’s a single string with - * each boundary character defined in it. It defaults to .,!? \t\n - */ - public HighlightBuilder boundaryChars(char[] boundaryChars) { - this.boundaryChars = boundaryChars; - return this; - } - - /** - * Set type of highlighter to use. Out of the box supported types - * are plain, fvh and postings. - * The default option selected is dependent on the mappings defined for your index. - * Details of the different highlighter types are covered in the reference guide. - */ - public HighlightBuilder highlighterType(String highlighterType) { - this.highlighterType = highlighterType; - return this; - } - - /** - * Sets what fragmenter to use to break up text that is eligible for highlighting. - * This option is only applicable when using the plain highlighterType highlighter. - * Permitted values are "simple" or "span" relating to {@link SimpleFragmenter} and - * {@link SimpleSpanFragmenter} implementations respectively with the default being "span" - */ - public HighlightBuilder fragmenter(String fragmenter) { - this.fragmenter = fragmenter; - return this; - } - - /** - * Sets a query to be used for highlighting all fields instead of the search query. - */ - public HighlightBuilder highlightQuery(QueryBuilder highlightQuery) { - this.highlightQuery = highlightQuery; - return this; - } - - /** - * Sets the size of the fragment to return from the beginning of the field if there are no matches to - * highlight and the field doesn't also define noMatchSize. - * @param noMatchSize integer to set or null to leave out of request. default is null. - * @return this for chaining - */ - public HighlightBuilder noMatchSize(Integer noMatchSize) { - this.noMatchSize = noMatchSize; - return this; - } - - /** - * Sets the maximum number of phrases the fvh will consider if the field doesn't also define phraseLimit. - * @param phraseLimit maximum number of phrases the fvh will consider - * @return this for chaining - */ - public HighlightBuilder phraseLimit(Integer phraseLimit) { - this.phraseLimit = phraseLimit; - return this; - } - - /** - * Allows to set custom options for custom highlighters. - */ - public HighlightBuilder options(Map options) { - this.options = options; - return this; - } - - /** - * Forces the highlighting to highlight fields based on the source even if fields are stored separately. - */ - public HighlightBuilder forceSource(boolean forceSource) { - this.forceSource = forceSource; - return this; + public String encoder() { + return this.encoder; } /** @@ -331,71 +155,29 @@ public class HighlightBuilder implements ToXContent { return this; } + /** + * Gets value set with {@link #useExplicitFieldOrder(boolean)} + */ + public Boolean useExplicitFieldOrder() { + return this.useExplicitFieldOrder; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("highlight"); - innerXContent(builder, params); + innerXContent(builder); builder.endObject(); return builder; } - - public void innerXContent(XContentBuilder builder, Params params) throws IOException { - if (tagsSchema != null) { - builder.field("tags_schema", tagsSchema); - } - if (preTags != null) { - builder.array("pre_tags", preTags); - } - if (postTags != null) { - builder.array("post_tags", postTags); - } - if (order != null) { - builder.field("order", order); - } - if (highlightFilter != null) { - builder.field("highlight_filter", highlightFilter); - } - if (fragmentSize != null) { - builder.field("fragment_size", fragmentSize); - } - if (numOfFragments != null) { - builder.field("number_of_fragments", numOfFragments); - } + public void innerXContent(XContentBuilder builder) throws IOException { + // first write common options + commonOptionsToXContent(builder); + // special options for top-level highlighter if (encoder != null) { builder.field("encoder", encoder); } - if (requireFieldMatch != null) { - builder.field("require_field_match", requireFieldMatch); - } - if (boundaryMaxScan != null) { - builder.field("boundary_max_scan", boundaryMaxScan); - } - if (boundaryChars != null) { - builder.field("boundary_chars", boundaryChars); - } - if (highlighterType != null) { - builder.field("type", highlighterType); - } - if (fragmenter != null) { - builder.field("fragmenter", fragmenter); - } - if (highlightQuery != null) { - builder.field("highlight_query", highlightQuery); - } - if (noMatchSize != null) { - builder.field("no_match_size", noMatchSize); - } - if (phraseLimit != null) { - builder.field("phrase_limit", phraseLimit); - } - if (options != null && options.size() > 0) { - builder.field("options", options); - } - if (forceSource != null) { - builder.field("force_source", forceSource); - } - if (fields != null) { + if (fields.size() > 0) { if (useExplicitFieldOrder) { builder.startArray("fields"); } else { @@ -405,63 +187,7 @@ public class HighlightBuilder implements ToXContent { if (useExplicitFieldOrder) { builder.startObject(); } - builder.startObject(field.name()); - if (field.preTags != null) { - builder.field("pre_tags", field.preTags); - } - if (field.postTags != null) { - builder.field("post_tags", field.postTags); - } - if (field.fragmentSize != -1) { - builder.field("fragment_size", field.fragmentSize); - } - if (field.numOfFragments != -1) { - builder.field("number_of_fragments", field.numOfFragments); - } - if (field.fragmentOffset != -1) { - builder.field("fragment_offset", field.fragmentOffset); - } - if (field.highlightFilter != null) { - builder.field("highlight_filter", field.highlightFilter); - } - if (field.order != null) { - builder.field("order", field.order); - } - if (field.requireFieldMatch != null) { - builder.field("require_field_match", field.requireFieldMatch); - } - if (field.boundaryMaxScan != -1) { - builder.field("boundary_max_scan", field.boundaryMaxScan); - } - if (field.boundaryChars != null) { - builder.field("boundary_chars", field.boundaryChars); - } - if (field.highlighterType != null) { - builder.field("type", field.highlighterType); - } - if (field.fragmenter != null) { - builder.field("fragmenter", field.fragmenter); - } - if (field.highlightQuery != null) { - builder.field("highlight_query", field.highlightQuery); - } - if (field.noMatchSize != null) { - builder.field("no_match_size", field.noMatchSize); - } - if (field.matchedFields != null) { - builder.field("matched_fields", field.matchedFields); - } - if (field.phraseLimit != null) { - builder.field("phrase_limit", field.phraseLimit); - } - if (field.options != null && field.options.size() > 0) { - builder.field("options", field.options); - } - if (field.forceSource != null) { - builder.field("force_source", field.forceSource); - } - - builder.endObject(); + field.innerXContent(builder); if (useExplicitFieldOrder) { builder.endObject(); } @@ -474,26 +200,62 @@ public class HighlightBuilder implements ToXContent { } } - public static class Field { - final String name; - String[] preTags; - String[] postTags; - int fragmentSize = -1; + @Override + public final String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.prettyPrint(); + toXContent(builder, ToXContent.EMPTY_PARAMS); + return builder.string(); + } catch (Exception e) { + return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; + } + } + + @Override + protected int doHashCode() { + return Objects.hash(encoder, useExplicitFieldOrder, fields); + } + + @Override + protected boolean doEquals(HighlightBuilder other) { + return Objects.equals(encoder, other.encoder) && + Objects.equals(useExplicitFieldOrder, other.useExplicitFieldOrder) && + Objects.equals(fields, other.fields); + } + + @Override + public HighlightBuilder readFrom(StreamInput in) throws IOException { + HighlightBuilder highlightBuilder = new HighlightBuilder(); + highlightBuilder.readOptionsFrom(in) + .encoder(in.readOptionalString()) + .useExplicitFieldOrder(in.readBoolean()); + int fields = in.readVInt(); + for (int i = 0; i < fields; i++) { + highlightBuilder.field(Field.PROTOTYPE.readFrom(in)); + } + return highlightBuilder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeOptionsTo(out); + out.writeOptionalString(encoder); + out.writeBoolean(useExplicitFieldOrder); + out.writeVInt(fields.size()); + for (int i = 0; i < fields.size(); i++) { + fields.get(i).writeTo(out); + } + } + + public static class Field extends AbstractHighlighterBuilder implements Writeable { + static final Field PROTOTYPE = new Field("_na_"); + + private final String name; + int fragmentOffset = -1; - int numOfFragments = -1; - Boolean highlightFilter; - String order; - Boolean requireFieldMatch; - int boundaryMaxScan = -1; - char[] boundaryChars; - String highlighterType; - String fragmenter; - QueryBuilder highlightQuery; - Integer noMatchSize; + String[] matchedFields; - Integer phraseLimit; - Map options; - Boolean forceSource; public Field(String name) { this.name = name; @@ -503,118 +265,11 @@ public class HighlightBuilder implements ToXContent { return name; } - /** - * Explicitly set the pre tags for this field that will be used for highlighting. - * This overrides global settings set by {@link HighlightBuilder#preTags(String...)}. - */ - public Field preTags(String... preTags) { - this.preTags = preTags; - return this; - } - - /** - * Explicitly set the post tags for this field that will be used for highlighting. - * This overrides global settings set by {@link HighlightBuilder#postTags(String...)}. - */ - public Field postTags(String... postTags) { - this.postTags = postTags; - return this; - } - - public Field fragmentSize(int fragmentSize) { - this.fragmentSize = fragmentSize; - return this; - } - public Field fragmentOffset(int fragmentOffset) { this.fragmentOffset = fragmentOffset; return this; } - public Field numOfFragments(int numOfFragments) { - this.numOfFragments = numOfFragments; - return this; - } - - public Field highlightFilter(boolean highlightFilter) { - this.highlightFilter = highlightFilter; - return this; - } - - /** - * The order of fragments per field. By default, ordered by the order in the - * highlighted text. Can be score, which then it will be ordered - * by score of the fragments. - * This overrides global settings set by {@link HighlightBuilder#order(String)}. - */ - public Field order(String order) { - this.order = order; - return this; - } - - public Field requireFieldMatch(boolean requireFieldMatch) { - this.requireFieldMatch = requireFieldMatch; - return this; - } - - public Field boundaryMaxScan(int boundaryMaxScan) { - this.boundaryMaxScan = boundaryMaxScan; - return this; - } - - public Field boundaryChars(char[] boundaryChars) { - this.boundaryChars = boundaryChars; - return this; - } - - /** - * Set type of highlighter to use. Out of the box supported types - * are plain, fvh and postings. - * This overrides global settings set by {@link HighlightBuilder#highlighterType(String)}. - */ - public Field highlighterType(String highlighterType) { - this.highlighterType = highlighterType; - return this; - } - - /** - * Sets what fragmenter to use to break up text that is eligible for highlighting. - * This option is only applicable when using plain / normal highlighter. - * This overrides global settings set by {@link HighlightBuilder#fragmenter(String)}. - */ - public Field fragmenter(String fragmenter) { - this.fragmenter = fragmenter; - return this; - } - - /** - * Sets a query to use for highlighting this field instead of the search query. - */ - public Field highlightQuery(QueryBuilder highlightQuery) { - this.highlightQuery = highlightQuery; - return this; - } - - /** - * Sets the size of the fragment to return from the beginning of the field if there are no matches to - * highlight. - * @param noMatchSize integer to set or null to leave out of request. default is null. - * @return this for chaining - */ - public Field noMatchSize(Integer noMatchSize) { - this.noMatchSize = noMatchSize; - return this; - } - - /** - * Allows to set custom options for custom highlighters. - * This overrides global settings set by {@link HighlightBuilder#options(Map)}. - */ - public Field options(Map options) { - this.options = options; - return this; - } - /** * Set the matched fields to highlight against this field data. Default to null, meaning just * the named field. If you provide a list of fields here then don't forget to include name as @@ -625,24 +280,47 @@ public class HighlightBuilder implements ToXContent { return this; } - /** - * Sets the maximum number of phrases the fvh will consider. - * @param phraseLimit maximum number of phrases the fvh will consider - * @return this for chaining - */ - public Field phraseLimit(Integer phraseLimit) { - this.phraseLimit = phraseLimit; - return this; + public void innerXContent(XContentBuilder builder) throws IOException { + builder.startObject(name); + // write common options + commonOptionsToXContent(builder); + // write special field-highlighter options + if (fragmentOffset != -1) { + builder.field("fragment_offset", fragmentOffset); + } + if (matchedFields != null) { + builder.field("matched_fields", matchedFields); + } + builder.endObject(); } - - /** - * Forces the highlighting to highlight this field based on the source even if this field is stored separately. - */ - public Field forceSource(boolean forceSource) { - this.forceSource = forceSource; - return this; + @Override + protected int doHashCode() { + return Objects.hash(name, fragmentOffset, Arrays.hashCode(matchedFields)); } + @Override + protected boolean doEquals(Field other) { + return Objects.equals(name, other.name) && + Objects.equals(fragmentOffset, other.fragmentOffset) && + Arrays.equals(matchedFields, other.matchedFields); + } + + @Override + public Field readFrom(StreamInput in) throws IOException { + Field field = new Field(in.readString()); + field.fragmentOffset(in.readVInt()); + field.matchedFields(in.readOptionalStringArray()); + field.readOptionsFrom(in); + return field; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeVInt(fragmentOffset); + out.writeOptionalStringArray(matchedFields); + writeOptionsTo(out); + } } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java index 8fddeaed279..fdf9e2c26dd 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java @@ -52,16 +52,38 @@ import java.util.Set; */ public class HighlighterParseElement implements SearchParseElement { - private static final String[] DEFAULT_PRE_TAGS = new String[]{""}; - private static final String[] DEFAULT_POST_TAGS = new String[]{""}; - - private static final String[] STYLED_PRE_TAG = { + /** default for whether to highlight fields based on the source even if stored separately */ + public static final boolean DEFAULT_FORCE_SOURCE = false; + /** default for whether a field should be highlighted only if a query matches that field */ + public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true; + /** default for whether fvh should provide highlighting on filter clauses */ + public static final boolean DEFAULT_HIGHLIGHT_FILTER = false; + /** default for highlight fragments being ordered by score */ + public static final boolean DEFAULT_SCORE_ORDERED = false; + /** the default encoder setting */ + public static final String DEFAULT_ENCODER = "default"; + /** default for the maximum number of phrases the fvh will consider */ + public static final int DEFAULT_PHRASE_LIMIT = 256; + /** default for fragment size when there are no matches */ + public static final int DEFAULT_NO_MATCH_SIZE = 0; + /** the default number of fragments for highlighting */ + public static final int DEFAULT_NUMBER_OF_FRAGMENTS = 5; + /** the default number of fragments size in characters */ + public static final int DEFAULT_FRAGMENT_CHAR_SIZE = 100; + /** the default opening tag */ + public static final String[] DEFAULT_PRE_TAGS = new String[]{""}; + /** the default closing tag */ + public static final String[] DEFAULT_POST_TAGS = new String[]{""}; + + /** the default opening tags when tag_schema = "styled" */ + public static final String[] STYLED_PRE_TAG = { "", "", "", "", "", "", "", "", "", "" }; - private static final String[] STYLED_POST_TAGS = {""}; + /** the default closing tags when tag_schema = "styled" */ + public static final String[] STYLED_POST_TAGS = {""}; @Override public void parse(XContentParser parser, SearchContext context) throws Exception { @@ -78,11 +100,11 @@ public class HighlighterParseElement implements SearchParseElement { final List> fieldsOptions = new ArrayList<>(); final SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder() - .preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(false).highlightFilter(false) - .requireFieldMatch(true).forceSource(false).fragmentCharSize(100).numberOfFragments(5) - .encoder("default").boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN) + .preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(DEFAULT_SCORE_ORDERED).highlightFilter(DEFAULT_HIGHLIGHT_FILTER) + .requireFieldMatch(DEFAULT_REQUIRE_FIELD_MATCH).forceSource(DEFAULT_FORCE_SOURCE).fragmentCharSize(DEFAULT_FRAGMENT_CHAR_SIZE).numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS) + .encoder(DEFAULT_ENCODER).boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN) .boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS) - .noMatchSize(0).phraseLimit(256); + .noMatchSize(DEFAULT_NO_MATCH_SIZE).phraseLimit(DEFAULT_PHRASE_LIMIT); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java new file mode 100644 index 00000000000..cefc232fddb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -0,0 +1,332 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.highlight.HighlightBuilder.Field; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class HighlightBuilderTests extends ESTestCase { + + private static final int NUMBER_OF_TESTBUILDERS = 20; + private static NamedWriteableRegistry namedWriteableRegistry; + + /** + * setup for the whole base test class + */ + @BeforeClass + public static void init() { + if (namedWriteableRegistry == null) { + namedWriteableRegistry = new NamedWriteableRegistry(); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, new MatchAllQueryBuilder()); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, new IdsQueryBuilder()); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, new TermQueryBuilder("field", "value")); + } + } + + @AfterClass + public static void afterClass() throws Exception { + namedWriteableRegistry = null; + } + + /** + * Test serialization and deserialization of the highlighter builder + */ + public void testSerialization() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { + HighlightBuilder original = randomHighlighterBuilder(); + HighlightBuilder deserialized = serializedCopy(original); + assertEquals(deserialized, original); + assertEquals(deserialized.hashCode(), original.hashCode()); + assertNotSame(deserialized, original); + } + } + + /** + * Test equality and hashCode properties + */ + public void testEqualsAndHashcode() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { + HighlightBuilder firstBuilder = randomHighlighterBuilder(); + assertFalse("highlighter is equal to null", firstBuilder.equals(null)); + assertFalse("highlighter is equal to incompatible type", firstBuilder.equals("")); + assertTrue("highlighter is not equal to self", firstBuilder.equals(firstBuilder)); + assertThat("same highlighter's hashcode returns different values if called multiple times", firstBuilder.hashCode(), + equalTo(firstBuilder.hashCode())); + assertThat("different highlighters should not be equal", mutate(firstBuilder), not(equalTo(firstBuilder))); + + HighlightBuilder secondBuilder = serializedCopy(firstBuilder); + assertTrue("highlighter is not equal to self", secondBuilder.equals(secondBuilder)); + assertTrue("highlighter is not equal to its copy", firstBuilder.equals(secondBuilder)); + assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder)); + assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(firstBuilder.hashCode())); + + HighlightBuilder thirdBuilder = serializedCopy(secondBuilder); + assertTrue("highlighter is not equal to self", thirdBuilder.equals(thirdBuilder)); + assertTrue("highlighter is not equal to its copy", secondBuilder.equals(thirdBuilder)); + assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder)); + assertThat("highlighter copy's hashcode is different from original hashcode", firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode())); + assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder)); + assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder)); + } + } + + /** + * create random shape that is put under test + */ + private static HighlightBuilder randomHighlighterBuilder() { + HighlightBuilder testHighlighter = new HighlightBuilder(); + setRandomCommonOptions(testHighlighter); + testHighlighter.useExplicitFieldOrder(randomBoolean()); + if (randomBoolean()) { + testHighlighter.encoder(randomFrom(Arrays.asList(new String[]{"default", "html"}))); + } + int numberOfFields = randomIntBetween(1,5); + for (int i = 0; i < numberOfFields; i++) { + Field field = new Field(randomAsciiOfLengthBetween(1, 10)); + setRandomCommonOptions(field); + if (randomBoolean()) { + field.fragmentOffset(randomIntBetween(1, 100)); + } + if (randomBoolean()) { + field.matchedFields(randomStringArray(0, 4)); + } + testHighlighter.field(field); + } + return testHighlighter; + } + + private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightBuilder) { + if (randomBoolean()) { + highlightBuilder.preTags(randomStringArray(0, 3)); + } + if (randomBoolean()) { + highlightBuilder.postTags(randomStringArray(0, 3)); + } + if (randomBoolean()) { + highlightBuilder.fragmentSize(randomIntBetween(0, 100)); + } + if (randomBoolean()) { + highlightBuilder.numOfFragments(randomIntBetween(0, 10)); + } + if (randomBoolean()) { + highlightBuilder.highlighterType(randomAsciiOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + highlightBuilder.fragmenter(randomAsciiOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + QueryBuilder highlightQuery; + switch (randomInt(2)) { + case 0: + highlightQuery = new MatchAllQueryBuilder(); + break; + case 1: + highlightQuery = new IdsQueryBuilder(); + break; + default: + case 2: + highlightQuery = new TermQueryBuilder(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); + break; + } + highlightQuery.boost((float) randomDoubleBetween(0, 10, false)); + highlightBuilder.highlightQuery(highlightQuery); + } + if (randomBoolean()) { + highlightBuilder.order(randomAsciiOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + highlightBuilder.highlightFilter(randomBoolean()); + } + if (randomBoolean()) { + highlightBuilder.forceSource(randomBoolean()); + } + if (randomBoolean()) { + highlightBuilder.boundaryMaxScan(randomIntBetween(0, 10)); + } + if (randomBoolean()) { + highlightBuilder.boundaryChars(randomAsciiOfLengthBetween(1, 10).toCharArray()); + } + if (randomBoolean()) { + highlightBuilder.noMatchSize(randomIntBetween(0, 10)); + } + if (randomBoolean()) { + highlightBuilder.phraseLimit(randomIntBetween(0, 10)); + } + if (randomBoolean()) { + int items = randomIntBetween(0, 5); + Map options = new HashMap(items); + for (int i = 0; i < items; i++) { + Object value = null; + switch (randomInt(2)) { + case 0: + value = randomAsciiOfLengthBetween(1, 10); + break; + case 1: + value = new Integer(randomInt(1000)); + break; + case 2: + value = new Boolean(randomBoolean()); + break; + } + options.put(randomAsciiOfLengthBetween(1, 10), value); + } + } + if (randomBoolean()) { + highlightBuilder.requireFieldMatch(randomBoolean()); + } + } + + @SuppressWarnings("unchecked") + private static void mutateCommonOptions(AbstractHighlighterBuilder highlightBuilder) { + switch (randomIntBetween(1, 16)) { + case 1: + highlightBuilder.preTags(randomStringArray(4, 6)); + break; + case 2: + highlightBuilder.postTags(randomStringArray(4, 6)); + break; + case 3: + highlightBuilder.fragmentSize(randomIntBetween(101, 200)); + break; + case 4: + highlightBuilder.numOfFragments(randomIntBetween(11, 20)); + break; + case 5: + highlightBuilder.highlighterType(randomAsciiOfLengthBetween(11, 20)); + break; + case 6: + highlightBuilder.fragmenter(randomAsciiOfLengthBetween(11, 20)); + break; + case 7: + highlightBuilder.highlightQuery(new TermQueryBuilder(randomAsciiOfLengthBetween(11, 20), randomAsciiOfLengthBetween(11, 20))); + break; + case 8: + highlightBuilder.order(randomAsciiOfLengthBetween(11, 20)); + break; + case 9: + highlightBuilder.highlightFilter(toggleOrSet(highlightBuilder.highlightFilter())); + case 10: + highlightBuilder.forceSource(toggleOrSet(highlightBuilder.forceSource())); + break; + case 11: + highlightBuilder.boundaryMaxScan(randomIntBetween(11, 20)); + break; + case 12: + highlightBuilder.boundaryChars(randomAsciiOfLengthBetween(11, 20).toCharArray()); + break; + case 13: + highlightBuilder.noMatchSize(randomIntBetween(11, 20)); + break; + case 14: + highlightBuilder.phraseLimit(randomIntBetween(11, 20)); + break; + case 15: + int items = 6; + Map options = new HashMap(items); + for (int i = 0; i < items; i++) { + options.put(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)); + } + highlightBuilder.options(options); + break; + case 16: + highlightBuilder.requireFieldMatch(toggleOrSet(highlightBuilder.requireFieldMatch())); + break; + } + } + + private static Boolean toggleOrSet(Boolean flag) { + if (flag == null) { + return randomBoolean(); + } else { + return !flag.booleanValue(); + } + } + + private static String[] randomStringArray(int minSize, int maxSize) { + int size = randomIntBetween(minSize, maxSize); + String[] randomStrings = new String[size]; + for (int f = 0; f < size; f++) { + randomStrings[f] = randomAsciiOfLengthBetween(1, 10); + } + return randomStrings; + } + + /** + * mutate the given highlighter builder so the returned one is different in one aspect + */ + private static HighlightBuilder mutate(HighlightBuilder original) throws IOException { + HighlightBuilder mutation = serializedCopy(original); + if (randomBoolean()) { + mutateCommonOptions(mutation); + } else { + switch (randomIntBetween(0, 2)) { + // change settings that only exists on top level + case 0: + mutation.useExplicitFieldOrder(!original.useExplicitFieldOrder()); break; + case 1: + mutation.encoder(original.encoder() + randomAsciiOfLength(2)); break; + case 2: + if (randomBoolean()) { + // add another field + mutation.field(new Field(randomAsciiOfLength(10))); + } else { + // change existing fields + List originalFields = original.fields(); + Field fieldToChange = originalFields.get(randomInt(originalFields.size() - 1)); + if (randomBoolean()) { + fieldToChange.fragmentOffset(randomIntBetween(101, 200)); + } else { + fieldToChange.matchedFields(randomStringArray(5, 10)); + } + } + } + } + return mutation; + } + + private static HighlightBuilder serializedCopy(HighlightBuilder original) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + original.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { + return HighlightBuilder.PROTOTYPE.readFrom(in); + } + } + } +} From f7e7a6bfadfd025d8569a8c02a8ee1b2e85c4372 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Dec 2015 09:40:54 -0500 Subject: [PATCH 32/40] compile against compact3 profile --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 42098a59510..25b1ed3faf9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -295,6 +295,9 @@ class BuildPlugin implements Plugin { */ // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing' + // compile with compact 3 profile by default + // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE + options.compilerArgs << '-profile' << 'compact3' options.encoding = 'UTF-8' } } From 304695e7eea8d80e53b1313019370e2965a13c2a Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 1 Dec 2015 18:03:11 +0100 Subject: [PATCH 33/40] Don't treat _default_ as a regular type. This adds safety that you can't index into the `_default_` type (it was possible before), and can't add default mappers to the field type lookups (was not happening in tests but I think this is still a good check). Also MapperService.types() now excludes `_default` so that eg. the `ids` query does not try to search on this type anymore. --- .../index/mapper/DocumentMapper.java | 2 +- .../index/mapper/DocumentParser.java | 4 ++ .../index/mapper/FieldTypeLookup.java | 7 ++- .../index/mapper/MapperService.java | 19 ++++-- .../index/mapper/FieldTypeLookupTests.java | 41 ++++++++----- .../index/mapper/MapperServiceTests.java | 59 +++++++++++++++++++ 6 files changed, 111 insertions(+), 21 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 5f266cbd48f..53e875cea91 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -351,7 +351,7 @@ public class DocumentMapper implements ToXContent { this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers); // finally update for the entire index - mapperService.addMappers(objectMappers, fieldMappers); + mapperService.addMappers(type, objectMappers, fieldMappers); } public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index de4dc387c88..aef8d474a6e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -79,6 +79,10 @@ class DocumentParser implements Closeable { } private ParsedDocument innerParseDocument(SourceToParse source) throws MapperParsingException { + if (docMapper.type().equals(MapperService.DEFAULT_MAPPING)) { + throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]"); + } + ParseContext.InternalParseContext context = cache.get(); final Mapping mapping = docMapper.mapping(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 1b0e827ac35..3fad73ebba6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -27,6 +27,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Objects; import java.util.Set; /** @@ -56,7 +57,11 @@ class FieldTypeLookup implements Iterable { * from the provided fields. If a field already exists, the field type will be updated * to use the new mappers field type. */ - public FieldTypeLookup copyAndAddAll(Collection newFieldMappers) { + public FieldTypeLookup copyAndAddAll(String type, Collection newFieldMappers) { + Objects.requireNonNull(type, "type must not be null"); + if (MapperService.DEFAULT_MAPPING.equals(type)) { + throw new IllegalArgumentException("Default mappings should not be added to the lookup"); + } CopyOnWriteHashMap fullName = this.fullNameToFieldType; CopyOnWriteHashMap indexName = this.indexNameToFieldType; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 90909737805..384095ba137 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -267,7 +267,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } MapperUtils.collect(mapper.mapping().root, newObjectMappers, newFieldMappers); checkNewMappersCompatibility(newObjectMappers, newFieldMappers, updateAllTypes); - addMappers(newObjectMappers, newFieldMappers); + addMappers(mapper.type(), newObjectMappers, newFieldMappers); for (DocumentTypeListener typeListener : typeListeners) { typeListener.beforeCreate(mapper); @@ -318,7 +318,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { fieldTypes.checkCompatibility(newFieldMappers, updateAllTypes); } - protected void addMappers(Collection objectMappers, Collection fieldMappers) { + protected void addMappers(String type, Collection objectMappers, Collection fieldMappers) { assert mappingLock.isWriteLockedByCurrentThread(); ImmutableOpenMap.Builder fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers); for (ObjectMapper objectMapper : objectMappers) { @@ -328,7 +328,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } } this.fullPathObjectMappers = fullPathObjectMappers.build(); - this.fieldTypes = this.fieldTypes.copyAndAddAll(fieldMappers); + this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers); } public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { @@ -345,10 +345,21 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return mappers.containsKey(mappingType); } + /** + * Return the set of concrete types that have a mapping. + * NOTE: this does not return the default mapping. + */ public Collection types() { - return mappers.keySet(); + final Set types = new HashSet<>(mappers.keySet()); + types.remove(DEFAULT_MAPPING); + return Collections.unmodifiableSet(types); } + /** + * Return the {@link DocumentMapper} for the given type. By using the special + * {@value #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for + * the default mapping. + */ public DocumentMapper documentMapper(String type) { return mappers.get(type); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 6ab4ca38d40..8d6a0800461 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -47,10 +48,20 @@ public class FieldTypeLookupTests extends ESTestCase { assertFalse(itr.hasNext()); } + public void testDefaultMapping() { + FieldTypeLookup lookup = new FieldTypeLookup(); + try { + lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList()); + fail(); + } catch (IllegalArgumentException expected) { + assertEquals("Default mappings should not be added to the lookup", expected.getMessage()); + } + } + public void testAddNewField() { FieldTypeLookup lookup = new FieldTypeLookup(); FakeFieldMapper f = new FakeFieldMapper("foo", "bar"); - FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f)); assertNull(lookup.get("foo")); assertNull(lookup.get("bar")); assertNull(lookup.getByIndexName("foo")); @@ -67,8 +78,8 @@ public class FieldTypeLookupTests extends ESTestCase { MappedFieldType originalFieldType = f.fieldType(); FakeFieldMapper f2 = new FakeFieldMapper("foo", "foo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f)); - FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2)); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertNotSame(originalFieldType, f.fieldType()); assertSame(f.fieldType(), f2.fieldType()); @@ -82,8 +93,8 @@ public class FieldTypeLookupTests extends ESTestCase { FakeFieldMapper f2 = new FakeFieldMapper("bar", "foo"); MappedFieldType originalFieldType = f.fieldType(); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f)); - FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2)); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertNotSame(originalFieldType, f.fieldType()); assertSame(f.fieldType(), f2.fieldType()); @@ -98,8 +109,8 @@ public class FieldTypeLookupTests extends ESTestCase { FakeFieldMapper f2 = new FakeFieldMapper("foo", "bar"); MappedFieldType originalFieldType = f.fieldType(); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f)); - FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2)); + lookup = lookup.copyAndAddAll("type1", newList(f)); + FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2)); assertNotSame(originalFieldType, f.fieldType()); assertSame(f.fieldType(), f2.fieldType()); @@ -113,18 +124,18 @@ public class FieldTypeLookupTests extends ESTestCase { FakeFieldMapper f = new FakeFieldMapper("foo", "foo"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f, f2)); + lookup = lookup.copyAndAddAll("type1", newList(f, f2)); try { FakeFieldMapper f3 = new FakeFieldMapper("foo", "bar"); - lookup.copyAndAddAll(newList(f3)); + lookup.copyAndAddAll("type2", newList(f3)); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("insane mappings")); } try { FakeFieldMapper f3 = new FakeFieldMapper("bar", "foo"); - lookup.copyAndAddAll(newList(f3)); + lookup.copyAndAddAll("type2", newList(f3)); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("insane mappings")); } @@ -139,7 +150,7 @@ public class FieldTypeLookupTests extends ESTestCase { public void testCheckCompatibilityMismatchedTypes() { FieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f1)); + lookup = lookup.copyAndAddAll("type", newList(f1)); MappedFieldType ft2 = FakeFieldMapper.makeOtherFieldType("foo", "foo"); FieldMapper f2 = new FakeFieldMapper("foo", ft2); @@ -161,7 +172,7 @@ public class FieldTypeLookupTests extends ESTestCase { public void testCheckCompatibilityConflict() { FieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f1)); + lookup = lookup.copyAndAddAll("type", newList(f1)); MappedFieldType ft2 = FakeFieldMapper.makeFieldType("foo", "bar"); ft2.setBoost(2.0f); @@ -196,7 +207,7 @@ public class FieldTypeLookupTests extends ESTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f1, f2)); + lookup = lookup.copyAndAddAll("type", newList(f1, f2)); Collection names = lookup.simpleMatchToIndexNames("b*"); assertTrue(names.contains("baz")); assertTrue(names.contains("boo")); @@ -206,7 +217,7 @@ public class FieldTypeLookupTests extends ESTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f1, f2)); + lookup = lookup.copyAndAddAll("type", newList(f1, f2)); Collection names = lookup.simpleMatchToFullName("b*"); assertTrue(names.contains("foo")); assertTrue(names.contains("bar")); @@ -215,7 +226,7 @@ public class FieldTypeLookupTests extends ESTestCase { public void testIteratorImmutable() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldTypeLookup lookup = new FieldTypeLookup(); - lookup = lookup.copyAndAddAll(newList(f1)); + lookup = lookup.copyAndAddAll("type", newList(f1)); try { Iterator itr = lookup.iterator(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index b37392821a5..2b200524b8e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.IndexService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Rule; import org.junit.rules.ExpectedException; @@ -31,6 +33,11 @@ import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.hasToString; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.ExecutionException; + public class MapperServiceTests extends ESSingleNodeTestCase { @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -82,4 +89,56 @@ public class MapperServiceTests extends ESSingleNodeTestCase { .execute() .actionGet(); } + + public void testTypes() throws Exception { + IndexService indexService1 = createIndex("index1"); + MapperService mapperService = indexService1.mapperService(); + assertEquals(Collections.emptySet(), mapperService.types()); + + mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), true, false); + assertNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); + assertEquals(Collections.singleton("type1"), mapperService.types()); + + mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), true, false); + assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); + assertEquals(Collections.singleton("type1"), mapperService.types()); + + mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), true, false); + assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); + assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types()); + } + + public void testIndexIntoDefaultMapping() throws Throwable { + // 1. test implicit index creation + try { + client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1").setSource("{").execute().get(); + fail(); + } catch (Throwable t) { + if (t instanceof ExecutionException) { + t = ((ExecutionException) t).getCause(); + } + if (t instanceof IllegalArgumentException) { + assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage()); + } else { + throw t; + } + } + + // 2. already existing index + IndexService indexService = createIndex("index2"); + try { + client().prepareIndex("index2", MapperService.DEFAULT_MAPPING, "2").setSource().execute().get(); + fail(); + } catch (Throwable t) { + if (t instanceof ExecutionException) { + t = ((ExecutionException) t).getCause(); + } + if (t instanceof IllegalArgumentException) { + assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage()); + } else { + throw t; + } + } + assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING)); + } } From 9015d0ca73d8b7cfff6c839b643ab3367e411531 Mon Sep 17 00:00:00 2001 From: andrejserafim Date: Tue, 1 Dec 2015 16:33:46 +0000 Subject: [PATCH 34/40] Fix REST test command line instructions Closes #15154 --- TESTING.asciidoc | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index da238c3437b..569c16b0747 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -286,11 +286,20 @@ The REST layer is tested through specific tests that are shared between all the elasticsearch official clients and consist of YAML files that describe the operations to be executed and the obtained results that need to be tested. -The REST tests are run automatically when executing the maven test command. To run only the +The REST tests are run automatically when executing the "gradle check" command. To run only the REST tests use the following command: --------------------------------------------------------------------------- -gradle integTest -Dtests.filter="@Rest" +gradle :distribution:tar:integTest \ + -Dtests.class=org.elasticsearch.test.rest.RestIT +--------------------------------------------------------------------------- + +A specific test case can be run with + +--------------------------------------------------------------------------- +gradle :distribution:tar:integTest \ + -Dtests.class=org.elasticsearch.test.rest.RestIT \ + -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- `RestNIT` are the executable test classes that runs all the From e52faa81cc7e028b11017b5b0b14866e5234bc95 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 1 Dec 2015 13:16:28 -0500 Subject: [PATCH 35/40] Fix typo in field name in MetaDataMappingService --- .../cluster/metadata/MetaDataMappingService.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c2c05241679..c2725359140 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -53,7 +53,7 @@ public class MetaDataMappingService extends AbstractComponent { private final ClusterService clusterService; private final IndicesService indicesService; - final ClusterStateTaskExecutor refreshExectuor = new RefreshTaskExecutor(); + final ClusterStateTaskExecutor refreshExecutor = new RefreshTaskExecutor(); final ClusterStateTaskExecutor putMappingExecutor = new PutMappingExecutor(); private final NodeServicesProvider nodeServicesProvider; @@ -211,10 +211,10 @@ public class MetaDataMappingService extends AbstractComponent { public void refreshMapping(final String index, final String indexUUID, final String... types) { final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types); clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", - refreshTask, - ClusterStateTaskConfig.build(Priority.HIGH), - refreshExectuor, - (source, t) -> logger.warn("failure during [{}]", t, source) + refreshTask, + ClusterStateTaskConfig.build(Priority.HIGH), + refreshExecutor, + (source, t) -> logger.warn("failure during [{}]", t, source) ); } From fcb6f44acf92431a39e503e61261506b528bf20d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 1 Dec 2015 11:04:46 -0800 Subject: [PATCH 36/40] Build: Add ability to specify the compact profile for javac --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 25b1ed3faf9..b8902e8fa12 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -283,6 +283,7 @@ class BuildPlugin implements Plugin { /** Adds compiler settings to the project */ static void configureCompile(Project project) { + project.ext.compactProfile = 'compact3' project.afterEvaluate { // fail on all javac warnings project.tasks.withType(JavaCompile) { @@ -297,7 +298,9 @@ class BuildPlugin implements Plugin { options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing' // compile with compact 3 profile by default // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE - options.compilerArgs << '-profile' << 'compact3' + if (project.compactProfile != 'full') { + options.compilerArgs << '-profile' << project.compactProfile + } options.encoding = 'UTF-8' } } From d68c6673a2cd6ad8eab5d26c57d833f66682facc Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 1 Dec 2015 17:08:27 -0800 Subject: [PATCH 37/40] Build: Cleanup precommit task gradle code This change attempts to simplify the gradle tasks for precommit. One major part of that is using a "less groovy style", as well as being more consistent about how tasks are created and where they are configured. It also allows the things creating the tasks to set up inter task dependencies, instead of assuming them (ie decoupling from tasks eleswhere in the build). --- .../elasticsearch/gradle/BuildPlugin.groovy | 9 +- .../precommit/DependencyLicensesTask.groovy | 101 +++++++++++----- .../precommit/ForbiddenPatternsTask.groovy | 41 ++++--- .../gradle/precommit/JarHellTask.groovy | 62 ++++++++++ .../gradle/precommit/PrecommitTasks.groovy | 110 ++++-------------- .../gradle/precommit/UpdateShasTask.groovy | 65 +++++++++++ .../gradle/test/RestIntegTestTask.groovy | 1 + .../test/StandaloneTestBasePlugin.groovy | 3 +- .../gradle/test/StandaloneTestPlugin.groovy | 1 + core/build.gradle | 3 + distribution/build.gradle | 8 +- plugins/build.gradle | 5 - 12 files changed, 269 insertions(+), 140 deletions(-) create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy create mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index b8902e8fa12..c4d0ced6b5c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -62,7 +62,7 @@ class BuildPlugin implements Plugin { configureCompile(project) configureTest(project) - PrecommitTasks.configure(project) + configurePrecommit(project) } /** Performs checks on the build environment and prints information about the build environment. */ @@ -416,4 +416,11 @@ class BuildPlugin implements Plugin { } return test } + + private static configurePrecommit(Project project) { + Task precommit = PrecommitTasks.create(project, true) + project.check.dependsOn(precommit) + project.test.mustRunAfter(precommit) + project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy index 1161fa35666..5b24bd32815 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy @@ -18,63 +18,100 @@ */ package org.elasticsearch.gradle.precommit -import org.gradle.api.DefaultTask -import org.gradle.api.GradleException -import org.gradle.api.InvalidUserDataException -import org.gradle.api.Project -import org.gradle.api.Task +import org.gradle.api.* import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputDirectory import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.StopActionException import org.gradle.api.tasks.TaskAction -import org.gradle.api.tasks.VerificationTask import java.nio.file.Files import java.security.MessageDigest import java.util.regex.Matcher import java.util.regex.Pattern -class DependencyLicensesTask extends DefaultTask { - static final String SHA_EXTENSION = '.sha1' - - static Task configure(Project project, Closure closure) { - DependencyLicensesTask task = project.tasks.create(type: DependencyLicensesTask, name: 'dependencyLicenses') - UpdateShasTask update = project.tasks.create(type: UpdateShasTask, name: 'updateShas') - update.parentTask = task - task.configure(closure) - project.check.dependsOn(task) - return task - } +/** + * A task to check licenses for dependencies. + * + * There are two parts to the check: + *
    + *
  • LICENSE and NOTICE files
  • + *
  • SHA checksums for each dependency jar
  • + *
+ * + * The directory to find the license and sha files in defaults to the dir @{code licenses} + * in the project directory for this task. You can override this directory: + *
+ *   dependencyLicenses {
+ *     licensesDir = project.file('mybetterlicensedir')
+ *   }
+ * 
+ * + * The jar files to check default to the dependencies from the default configuration. You + * can override this, for example, to only check compile dependencies: + *
+ *   dependencyLicenses {
+ *     dependencies = project.configurations.compile
+ *   }
+ * 
+ * + * Every jar must have a {@code .sha1} file in the licenses dir. These can be managed + * automatically using the {@code updateShas} helper task that is created along + * with this task. It will add {@code .sha1} files for new jars that are in dependencies + * and remove old {@code .sha1} files that are no longer needed. + * + * Every jar must also have a LICENSE and NOTICE file. However, multiple jars can share + * LICENSE and NOTICE files by mapping a pattern to the same name. + *
+ *   dependencyLicenses {
+ *     mapping from: /lucene-.*/, to: 'lucene'
+ *   }
+ * 
+ */ +public class DependencyLicensesTask extends DefaultTask { + private static final String SHA_EXTENSION = '.sha1' + // TODO: we should be able to default this to eg compile deps, but we need to move the licenses + // check from distribution to core (ie this should only be run on java projects) + /** A collection of jar files that should be checked. */ @InputFiles - FileCollection dependencies + public FileCollection dependencies + /** The directory to find the license and sha files in. */ @InputDirectory - File licensesDir = new File(project.projectDir, 'licenses') + public File licensesDir = new File(project.projectDir, 'licenses') - LinkedHashMap mappings = new LinkedHashMap<>() + /** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */ + private LinkedHashMap mappings = new LinkedHashMap<>() + /** + * Add a mapping from a regex pattern for the jar name, to a prefix to find + * the LICENSE and NOTICE file for that jar. + */ @Input - void mapping(Map props) { - String from = props.get('from') + public void mapping(Map props) { + String from = props.remove('from') if (from == null) { throw new InvalidUserDataException('Missing "from" setting for license name mapping') } - String to = props.get('to') + String to = props.remove('to') if (to == null) { throw new InvalidUserDataException('Missing "to" setting for license name mapping') } + if (props.isEmpty() == false) { + throw new InvalidUserDataException("Unknown properties for mapping on dependencyLicenses: ${props.keySet()}") + } mappings.put(from, to) } @TaskAction - void checkDependencies() { - // TODO: empty license dir (or error when dir exists and no deps) + public void checkDependencies() { if (licensesDir.exists() == false && dependencies.isEmpty() == false) { throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies") } + if (licensesDir.exists() && dependencies.isEmpty()) { + throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies") + } // order is the same for keys and values iteration since we use a linked hashmap List mapped = new ArrayList<>(mappings.values()) @@ -127,7 +164,7 @@ class DependencyLicensesTask extends DefaultTask { } } - void checkSha(File jar, String jarName, Set shaFiles) { + private void checkSha(File jar, String jarName, Set shaFiles) { File shaFile = new File(licensesDir, jarName + SHA_EXTENSION) if (shaFile.exists() == false) { throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create") @@ -143,7 +180,7 @@ class DependencyLicensesTask extends DefaultTask { shaFiles.remove(shaFile) } - void checkFile(String name, String jarName, Map counters, String type) { + private void checkFile(String name, String jarName, Map counters, String type) { String fileName = "${name}-${type}" Integer count = counters.get(fileName) if (count == null) { @@ -158,10 +195,12 @@ class DependencyLicensesTask extends DefaultTask { counters.put(fileName, count + 1) } - static class UpdateShasTask extends DefaultTask { - DependencyLicensesTask parentTask + /** A helper task to update the sha files in the license dir. */ + public static class UpdateShasTask extends DefaultTask { + private DependencyLicensesTask parentTask + @TaskAction - void updateShas() { + public void updateShas() { Set shaFiles = new HashSet() parentTask.licensesDir.eachFile { String name = it.getName() diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index 6ed18f4d18c..5fa63956b57 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -19,10 +19,11 @@ package org.elasticsearch.gradle.precommit import org.gradle.api.DefaultTask +import org.gradle.api.GradleException +import org.gradle.api.InvalidUserDataException import org.gradle.api.file.FileCollection import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.OutputFile -import org.gradle.api.tasks.OutputFiles import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.util.PatternFilterable @@ -33,14 +34,19 @@ import java.util.regex.Pattern /** * Checks for patterns in source files for the project which are forbidden. */ -class ForbiddenPatternsTask extends DefaultTask { - Map patterns = new LinkedHashMap<>() - PatternFilterable filesFilter = new PatternSet() +public class ForbiddenPatternsTask extends DefaultTask { + + /** The rules: a map from the rule name, to a rule regex pattern. */ + private Map patterns = new LinkedHashMap<>() + /** A pattern set of which files should be checked. */ + private PatternFilterable filesFilter = new PatternSet() @OutputFile File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns") - ForbiddenPatternsTask() { + public ForbiddenPatternsTask() { + description = 'Checks source files for invalid patterns like nocommits or tabs' + // we always include all source files, and exclude what should not be checked filesFilter.include('**') // exclude known binary extensions @@ -52,23 +58,28 @@ class ForbiddenPatternsTask extends DefaultTask { filesFilter.exclude('**/*.crt') filesFilter.exclude('**/*.png') - // TODO: add compile and test compile outputs as this tasks outputs, so we don't rerun when source files haven't changed + // add mandatory rules + patterns.put('nocommit', /nocommit/) + patterns.put('tab', /\t/) } /** Adds a file glob pattern to be excluded */ - void exclude(String... excludes) { + public void exclude(String... excludes) { this.filesFilter.exclude(excludes) } - /** Adds pattern to forbid */ + /** Adds a pattern to forbid. T */ void rule(Map props) { - String name = props.get('name') + String name = props.remove('name') if (name == null) { - throw new IllegalArgumentException('Missing [name] for invalid pattern rule') + throw new InvalidUserDataException('Missing [name] for invalid pattern rule') } - String pattern = props.get('pattern') + String pattern = props.remove('pattern') if (pattern == null) { - throw new IllegalArgumentException('Missing [pattern] for invalid pattern rule') + throw new InvalidUserDataException('Missing [pattern] for invalid pattern rule') + } + if (props.isEmpty() == false) { + throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: ${props.keySet()}") } // TODO: fail if pattern contains a newline, it won't work (currently) patterns.put(name, pattern) @@ -89,14 +100,14 @@ class ForbiddenPatternsTask extends DefaultTask { Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')') List failures = new ArrayList<>() for (File f : files()) { - f.eachLine('UTF-8') { line, lineNumber -> + f.eachLine('UTF-8') { String line, int lineNumber -> if (allPatterns.matcher(line).find()) { - addErrorMessages(failures, f, (String)line, (int)lineNumber) + addErrorMessages(failures, f, line, lineNumber) } } } if (failures.isEmpty() == false) { - throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n')) + throw new GradleException('Found invalid patterns:\n' + failures.join('\n')) } outputMarker.setText('done', 'UTF-8') } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy new file mode 100644 index 00000000000..2873fbd4df5 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit + +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.InputFile +import org.gradle.api.tasks.OutputFile + +/** + * Runs CheckJarHell on a classpath. + */ +public class JarHellTask extends LoggedExec { + + /** + * We use a simple "marker" file that we touch when the task succeeds + * as the task output. This is compared against the modified time of the + * inputs (ie the jars/class files). + */ + @OutputFile + public File successMarker = new File(project.buildDir, 'markers/jarHell') + + /** The classpath to run jarhell check on, defaults to the test runtime classpath */ + @InputFile + public FileCollection classpath = project.sourceSets.test.runtimeClasspath + + public JarHellTask() { + project.afterEvaluate { + dependsOn(classpath) + description = "Runs CheckJarHell on ${classpath}" + executable = new File(project.javaHome, 'bin/java') + doFirst({ + /* JarHell doesn't like getting directories that don't exist but + gradle isn't especially careful about that. So we have to do it + filter it ourselves. */ + FileCollection taskClasspath = classpath.filter { it.exists() } + args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell') + }) + doLast({ + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + }) + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index aebc00e038a..8b1a98139ba 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,16 +18,10 @@ */ package org.elasticsearch.gradle.precommit -import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis -import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin -import org.gradle.api.GradleException import org.gradle.api.Project import org.gradle.api.Task -import org.gradle.api.file.FileCollection import org.gradle.api.plugins.JavaBasePlugin -import org.gradle.api.tasks.Exec -import org.gradle.api.tasks.TaskContainer /** * Validation tasks which should be run before committing. These run before tests. @@ -35,36 +29,34 @@ import org.gradle.api.tasks.TaskContainer class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ - static void configure(Project project) { - List precommitTasks = [ - configureForbiddenApis(project), - configureForbiddenPatterns(project.tasks), - configureJarHell(project)] + public static Task create(Project project, boolean includeDependencyLicenses) { - Map precommitOptions = [ - name: 'precommit', - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Runs all non-test checks.', - dependsOn: precommitTasks - ] - Task precommit = project.tasks.create(precommitOptions) - project.check.dependsOn(precommit) + List precommitTasks = [ + configureForbiddenApis(project), + project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), + project.tasks.create('jarHell', JarHellTask.class)] - // delay ordering relative to test tasks, since they may not be setup yet - project.afterEvaluate { - Task test = project.tasks.findByName('test') - if (test != null) { - test.mustRunAfter(precommit) - } - Task integTest = project.tasks.findByName('integTest') - if (integTest != null) { - integTest.mustRunAfter(precommit) - } + // tasks with just tests don't need dependency licenses, so this flag makes adding + // the task optional + if (includeDependencyLicenses) { + DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class) + precommitTasks.add(dependencyLicenses) + // we also create the updateShas helper task that is associated with dependencyLicenses + UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class) + updateShas.parentTask = dependencyLicenses } + + Map precommitOptions = [ + name: 'precommit', + group: JavaBasePlugin.VERIFICATION_GROUP, + description: 'Runs all non-test checks.', + dependsOn: precommitTasks + ] + return project.tasks.create(precommitOptions) } - static Task configureForbiddenApis(Project project) { - project.pluginManager.apply('de.thetaphi.forbiddenapis') + private static Task configureForbiddenApis(Project project) { + project.pluginManager.apply(ForbiddenApisPlugin.class) project.forbiddenApis { internalRuntimeForbidden = true failOnUnsupportedJava = false @@ -75,7 +67,7 @@ class PrecommitTasks { Task mainForbidden = project.tasks.findByName('forbiddenApisMain') if (mainForbidden != null) { mainForbidden.configure { - bundledSignatures += ['jdk-system-out'] + bundledSignatures += 'jdk-system-out' signaturesURLs += [ getClass().getResource('/forbidden/core-signatures.txt'), getClass().getResource('/forbidden/third-party-signatures.txt')] @@ -84,63 +76,11 @@ class PrecommitTasks { Task testForbidden = project.tasks.findByName('forbiddenApisTest') if (testForbidden != null) { testForbidden.configure { - signaturesURLs += [getClass().getResource('/forbidden/test-signatures.txt')] + signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt') } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') forbiddenApis.group = "" // clear group, so this does not show up under verification tasks return forbiddenApis } - - static Task configureForbiddenPatterns(TaskContainer tasks) { - Map options = [ - name: 'forbiddenPatterns', - type: ForbiddenPatternsTask, - description: 'Checks source files for invalid patterns like nocommits or tabs', - ] - return tasks.create(options) { - rule name: 'nocommit', pattern: /nocommit/ - rule name: 'tab', pattern: /\t/ - } - } - - /** - * Adds a task to run jar hell before on the test classpath. - * - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). - */ - static Task configureJarHell(Project project) { - File successMarker = new File(project.buildDir, 'markers/jarHell') - Exec task = project.tasks.create(name: 'jarHell', type: Exec) - FileCollection testClasspath = project.sourceSets.test.runtimeClasspath - task.dependsOn(testClasspath) - task.inputs.files(testClasspath) - task.outputs.file(successMarker) - task.executable = new File(project.javaHome, 'bin/java') - task.doFirst({ - /* JarHell doesn't like getting directories that don't exist but - gradle isn't especially careful about that. So we have to do it - filter it ourselves. */ - def taskClasspath = testClasspath.filter { it.exists() } - task.args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell') - }) - if (task.logger.isInfoEnabled() == false) { - task.standardOutput = new ByteArrayOutputStream() - task.errorOutput = task.standardOutput - task.ignoreExitValue = true - task.doLast({ - if (execResult.exitValue != 0) { - logger.error(standardOutput.toString()) - throw new GradleException("JarHell failed") - } - }) - } - task.doLast({ - successMarker.parentFile.mkdirs() - successMarker.setText("", 'UTF-8') - }) - return task - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy new file mode 100644 index 00000000000..d0c73e6ad76 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/UpdateShasTask.groovy @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit + +import org.gradle.api.DefaultTask +import org.gradle.api.tasks.TaskAction + +import java.nio.file.Files +import java.security.MessageDigest + +/** + * A task to update shas used by {@code DependencyLicensesCheck} + */ +public class UpdateShasTask extends DefaultTask { + + /** The parent dependency licenses task to use configuration from */ + public DependencyLicensesTask parentTask + + public UpdateShasTask() { + description = 'Updates the sha files for the dependencyLicenses check' + } + + @TaskAction + public void updateShas() { + Set shaFiles = new HashSet() + parentTask.licensesDir.eachFile { + String name = it.getName() + if (name.endsWith(SHA_EXTENSION)) { + shaFiles.add(it) + } + } + for (File dependency : parentTask.dependencies) { + String jarName = dependency.getName() + File shaFile = new File(parentTask.licensesDir, jarName + SHA_EXTENSION) + if (shaFile.exists() == false) { + logger.lifecycle("Adding sha for ${jarName}") + String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString() + shaFile.setText(sha, 'UTF-8') + } else { + shaFiles.remove(shaFile) + } + } + shaFiles.each { shaFile -> + logger.lifecycle("Removing unused sha ${shaFile.getName()}") + Files.delete(shaFile.toPath()) + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 47cbdd5cb48..4dc94c40108 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -58,6 +58,7 @@ class RestIntegTestTask extends RandomizedTestingTask { integTest.testClassesDir = test.testClassesDir integTest.mustRunAfter(test) } + integTest.mustRunAfter(project.precommit) project.check.dependsOn(integTest) RestSpecHack.configureDependencies(project) project.afterEvaluate { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy index 271bc5e58be..62f6bd553a4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy @@ -56,6 +56,7 @@ class StandaloneTestBasePlugin implements Plugin { plusConfigurations = [project.configurations.testRuntime] } } - PrecommitTasks.configure(project) + PrecommitTasks.create(project, false) + project.check.dependsOn(project.precommit) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index 21bf7e9a01a..b560500aae3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -45,6 +45,7 @@ class StandaloneTestPlugin implements Plugin { classpath = project.sourceSets.test.runtimeClasspath testClassesDir project.sourceSets.test.output.classesDir } + test.mustRunAfter(project.precommit) project.check.dependsOn(test) } } diff --git a/core/build.gradle b/core/build.gradle index 618e252e9a8..62d5cd3a707 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -117,6 +117,9 @@ forbiddenPatterns { exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' } +// dependency license are currently checked in distribution +dependencyLicenses.enabled = false + if (isEclipse == false || project.path == ":core-tests") { task integTest(type: RandomizedTestingTask, group: JavaBasePlugin.VERIFICATION_GROUP, diff --git a/distribution/build.gradle b/distribution/build.gradle index 6ceb940e4a6..deeba3bef5b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -20,6 +20,7 @@ import org.apache.tools.ant.filters.FixCrLfFilter import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.precommit.DependencyLicensesTask +import org.elasticsearch.gradle.precommit.UpdateShasTask import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.EmptyDirTask import org.elasticsearch.gradle.MavenFilteringHack @@ -293,13 +294,16 @@ configure(subprojects.findAll { it.name == 'deb' || it.name == 'rpm' }) { // TODO: dependency checks should really be when building the jar itself, which would remove the need // for this hackery and instead we can do this inside the BuildPlugin -task check(group: 'Verification', description: 'Runs all checks.') {} // dummy task! -DependencyLicensesTask.configure(project) { +task dependencyLicenses(type: DependencyLicensesTask) { dependsOn = [dependencyFiles] dependencies = dependencyFiles mapping from: /lucene-.*/, to: 'lucene' mapping from: /jackson-.*/, to: 'jackson' } +task check(group: 'Verification', description: 'Runs all checks.', dependsOn: dependencyLicenses) {} // dummy task! +task updateShas(type: UpdateShasTask) { + parentTask = dependencyLicenses +} RunTask.configure(project) diff --git a/plugins/build.gradle b/plugins/build.gradle index fae8113ce96..90429cc83d1 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -29,9 +29,4 @@ subprojects { // for local ES plugins, the name of the plugin is the same as the directory name project.name } - - Task dependencyLicensesTask = DependencyLicensesTask.configure(project) { - dependencies = project.configurations.runtime - project.configurations.provided - } - project.precommit.dependsOn(dependencyLicensesTask) } From 144225f4e558fae3d1266bacac22a71d429159bd Mon Sep 17 00:00:00 2001 From: Xavier Coulon Date: Tue, 1 Dec 2015 23:03:28 +0100 Subject: [PATCH 38/40] Fixing typo Replace "Too shade or not to shade..." with "To shade or not to shade..." (cherry picked from commit f44c5a4) (cherry picked from commit 12d5510) --- docs/java-api/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index f976ebc2426..16403d5c147 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -48,7 +48,7 @@ third party dependency that in turn depends on an outdated version of a package, * The second option is to relocate the troublesome dependencies and to shade them either with your own application or with Elasticsearch and any plugins needed by the Elasticsearch client. -The https://www.elastic.co/blog/to-shade-or-not-to-shade["Too shade or not to shade" blog post] describes +The https://www.elastic.co/blog/to-shade-or-not-to-shade["To shade or not to shade" blog post] describes all the steps for doing so. == Embedding jar with dependencies From 9c77cdc2019b96ecf059eb0f8339875f0648d31d Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 2 Dec 2015 10:08:35 +0100 Subject: [PATCH 39/40] Fix build failure caused by Licenses Check in test-framework module Typical failure: ``` :test-framework:dependencyLicenses (Thread[main,5,main]) started. :test-framework:dependencyLicenses Executing task ':test-framework:dependencyLicenses' (up-to-date check took 0.0 secs) due to: Task has not declared any outputs. :test-framework:dependencyLicenses FAILED :test-framework:dependencyLicenses (Thread[main,5,main]) completed. Took 0.023 secs. FAILURE: Build failed with an exception. * What went wrong: Execution failed for task ':test-framework:dependencyLicenses'. > Licences dir /mnt/jenkins/workspace/es_core_master_strong/test-framework/licenses does not exist, but there are dependencies ``` Related to #15168 --- .../gradle/precommit/DependencyLicensesTask.groovy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy index 5b24bd32815..d0466d7606d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/DependencyLicensesTask.groovy @@ -106,6 +106,10 @@ public class DependencyLicensesTask extends DefaultTask { @TaskAction public void checkDependencies() { + // TODO REMOVE THIS DIRTY FIX FOR #15168 + if (licensesDir.exists() == false) { + return + } if (licensesDir.exists() == false && dependencies.isEmpty() == false) { throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies") } From d23d8a891ff4f128dad6f0f90263cb2c373fe4cb Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 2 Dec 2015 10:22:52 +0100 Subject: [PATCH 40/40] Remove "empty" licenses dir Follow up #15168 We don't need to have "fake" licenses dir anymore. --- plugins/delete-by-query/licenses/no_deps.txt | 1 - plugins/discovery-multicast/licenses/no_deps.txt | 0 plugins/jvm-example/licenses/no_deps.txt | 1 - plugins/mapper-murmur3/licenses/no_deps.txt | 1 - plugins/mapper-size/licenses/no_deps.txt | 1 - plugins/site-example/licenses/no_deps.txt | 1 - plugins/store-smb/licenses/no_deps.txt | 1 - 7 files changed, 6 deletions(-) delete mode 100644 plugins/delete-by-query/licenses/no_deps.txt delete mode 100644 plugins/discovery-multicast/licenses/no_deps.txt delete mode 100644 plugins/jvm-example/licenses/no_deps.txt delete mode 100644 plugins/mapper-murmur3/licenses/no_deps.txt delete mode 100644 plugins/mapper-size/licenses/no_deps.txt delete mode 100644 plugins/site-example/licenses/no_deps.txt delete mode 100644 plugins/store-smb/licenses/no_deps.txt diff --git a/plugins/delete-by-query/licenses/no_deps.txt b/plugins/delete-by-query/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/delete-by-query/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies diff --git a/plugins/discovery-multicast/licenses/no_deps.txt b/plugins/discovery-multicast/licenses/no_deps.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/plugins/jvm-example/licenses/no_deps.txt b/plugins/jvm-example/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/jvm-example/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies diff --git a/plugins/mapper-murmur3/licenses/no_deps.txt b/plugins/mapper-murmur3/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/mapper-murmur3/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies diff --git a/plugins/mapper-size/licenses/no_deps.txt b/plugins/mapper-size/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/mapper-size/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies diff --git a/plugins/site-example/licenses/no_deps.txt b/plugins/site-example/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/site-example/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies diff --git a/plugins/store-smb/licenses/no_deps.txt b/plugins/store-smb/licenses/no_deps.txt deleted file mode 100644 index 8cce254d037..00000000000 --- a/plugins/store-smb/licenses/no_deps.txt +++ /dev/null @@ -1 +0,0 @@ -This plugin has no third party dependencies