From 3df2a086d4bf3c5608aabf06c1766e10c901af35 Mon Sep 17 00:00:00 2001 From: Masaru Hasegawa Date: Mon, 7 Nov 2016 17:20:06 +0900 Subject: [PATCH 01/41] Resolve index names in indices_boost This change allows specifying alias/wildcard expression in indices_boost. And added another format for specifying indices_boost. It accepts array of index name and boost pair. If an index is included in multiple aliases/wildcard expressions, the first match will be used. With new format, old format is marked as deprecated. Closes #4756 --- .../search/AbstractSearchAsyncAction.java | 13 +- .../SearchDfsQueryAndFetchAsyncAction.java | 9 +- .../SearchDfsQueryThenFetchAsyncAction.java | 10 +- .../SearchQueryAndFetchAsyncAction.java | 4 +- .../SearchQueryThenFetchAsyncAction.java | 6 +- .../action/search/TransportSearchAction.java | 38 +++- .../search/DefaultSearchContext.java | 9 +- .../elasticsearch/search/SearchService.java | 8 - .../search/builder/SearchSourceBuilder.java | 175 +++++++++++----- .../internal/FilteredSearchContext.java | 6 - .../search/internal/SearchContext.java | 2 - .../internal/ShardSearchLocalRequest.java | 33 ++- .../search/internal/ShardSearchRequest.java | 2 + .../internal/ShardSearchTransportRequest.java | 9 +- .../search/internal/SubSearchContext.java | 8 +- .../action/search/SearchAsyncActionTests.java | 2 +- .../index/SearchSlowLogTests.java | 5 + .../search/SearchServiceTests.java | 8 +- .../builder/SearchSourceBuilderTests.java | 74 +++++++ .../SimpleIndicesBoostSearchIT.java | 107 ---------- .../ShardSearchTransportRequestTests.java | 23 +- docs/build.gradle | 15 ++ .../search/request/index-boost.asciidoc | 21 ++ .../test/search/40_indices_boost.yaml | 196 ++++++++++++++++++ .../elasticsearch/test/TestSearchContext.java | 5 - 25 files changed, 555 insertions(+), 233 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yaml diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index c634381d851..2479ff86750 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -50,6 +50,7 @@ import java.util.function.Function; abstract class AbstractSearchAsyncAction extends AbstractAsyncAction { + private static final float DEFAULT_INDEX_BOOST = 1.0f; protected final Logger logger; protected final SearchTransportService searchTransportService; @@ -66,6 +67,7 @@ abstract class AbstractSearchAsyncAction private final AtomicInteger totalOps = new AtomicInteger(); protected final AtomicArray firstResults; private final Map aliasFilter; + private final Map concreteIndexBoosts; private final long clusterStateVersion; private volatile AtomicArray shardFailures; private final Object shardFailuresMutex = new Object(); @@ -73,9 +75,9 @@ abstract class AbstractSearchAsyncAction protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService, Function nodeIdToDiscoveryNode, - Map aliasFilter, Executor executor, SearchRequest request, - ActionListener listener, GroupShardsIterator shardsIts, long startTime, - long clusterStateVersion, SearchTask task) { + Map aliasFilter, Map concreteIndexBoosts, + Executor executor, SearchRequest request, ActionListener listener, + GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { super(startTime); this.logger = logger; this.searchTransportService = searchTransportService; @@ -91,6 +93,7 @@ abstract class AbstractSearchAsyncAction expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); firstResults = new AtomicArray<>(shardsIts.size()); this.aliasFilter = aliasFilter; + this.concreteIndexBoosts = concreteIndexBoosts; } public void start() { @@ -125,8 +128,10 @@ abstract class AbstractSearchAsyncAction } else { AliasFilter filter = this.aliasFilter.get(shard.index().getUUID()); assert filter != null; + + float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST); ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(), - filter, startTime()); + filter, indexBoost, startTime()); sendExecuteFirstPhase(node, transportRequest , new ActionListener() { @Override public void onResponse(FirstResult result) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 54117495cba..9db3a21c485 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -47,10 +47,11 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, - Map aliasFilter, SearchPhaseController searchPhaseController, - Executor executor, SearchRequest request, ActionListener listener, - GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + Map aliasFilter, Map concreteIndexBoosts, + SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, + long startTime, long clusterStateVersion, SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); this.searchPhaseController = searchPhaseController; queryFetchResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 3f8b20bc1fa..3fe24cc9911 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -55,11 +55,11 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, - Map aliasFilter, SearchPhaseController searchPhaseController, - Executor executor, SearchRequest request, ActionListener listener, - GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, - SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + Map aliasFilter, Map concreteIndexBoosts, + SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, long startTime, + long clusterStateVersion, SearchTask task) { + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); this.searchPhaseController = searchPhaseController; queryResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index 25e7e14bb87..f597ede64bc 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -40,12 +40,12 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, - Map aliasFilter, + Map aliasFilter, Map concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); this.searchPhaseController = searchPhaseController; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 23b744e5de1..7b300063291 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -50,13 +50,13 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToDiscoveryNode, Map aliasFilter, + Function nodeIdToDiscoveryNode, + Map aliasFilter, Map concreteIndexBoosts, SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) { - super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener, + super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener, shardsIts, startTime, clusterStateVersion, task); this.searchPhaseController = searchPhaseController; fetchResults = new AtomicArray<>(firstResults.length()); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index b94f4b35096..48ee5cc288b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -84,6 +85,29 @@ public class TransportSearchAction extends HandledTransportAction resolveIndexBoosts(SearchRequest searchRequest, ClusterState clusterState) { + if (searchRequest.source() == null) { + return Collections.emptyMap(); + } + + SearchSourceBuilder source = searchRequest.source(); + if (source.indexBoosts() == null) { + return Collections.emptyMap(); + } + + Map concreteIndexBoosts = new HashMap<>(); + for (SearchSourceBuilder.IndexBoost ib : source.indexBoosts()) { + Index[] concreteIndices = + indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), ib.getIndex()); + + for (Index concreteIndex : concreteIndices) { + concreteIndexBoosts.putIfAbsent(concreteIndex.getUUID(), ib.getBoost()); + } + } + + return Collections.unmodifiableMap(concreteIndexBoosts); + } + @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { // pure paranoia if time goes backwards we are at least positive @@ -107,6 +131,8 @@ public class TransportSearchAction extends HandledTransportAction concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); + // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard @@ -125,7 +151,7 @@ public class TransportSearchAction extends HandledTransportAction aliasFilter, + Map concreteIndexBoosts, ActionListener listener) { final Function nodesLookup = state.nodes()::get; final long clusterStateVersion = state.version(); @@ -143,22 +170,22 @@ public class TransportSearchAction extends HandledTransportAction indexBoostMap = source.indexBoost(); - if (indexBoostMap != null) { - Float indexBoost = indexBoostMap.get(context.shardTarget().index()); - if (indexBoost != null) { - context.queryBoost(indexBoost); - } - } Map innerHitBuilders = new HashMap<>(); if (source.query() != null) { InnerHitBuilder.extractInnerHits(source.query(), innerHitBuilders); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 34fb29305bd..091ddea07f0 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -19,16 +19,16 @@ package org.elasticsearch.search.builder; -import com.carrotsearch.hppc.ObjectFloatHashMap; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -63,10 +63,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import static org.elasticsearch.common.collect.Tuple.tuple; /** * A search source builder allowing to easily build search source. Simple @@ -76,6 +72,8 @@ import static org.elasticsearch.common.collect.Tuple.tuple; * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder) */ public final class SearchSourceBuilder extends ToXContentToBytes implements Writeable { + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(SearchSourceBuilder.class)); public static final ParseField FROM_FIELD = new ParseField("from"); public static final ParseField SIZE_FIELD = new ParseField("size"); @@ -167,7 +165,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private List rescoreBuilders; - private ObjectFloatHashMap indexBoost = null; + private List indexBoosts = new ArrayList<>(); private List stats; @@ -193,13 +191,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); from = in.readVInt(); highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); - int indexBoostSize = in.readVInt(); - if (indexBoostSize > 0) { - indexBoost = new ObjectFloatHashMap<>(indexBoostSize); - for (int i = 0; i < indexBoostSize; i++) { - indexBoost.put(in.readString(), in.readFloat()); - } - } + indexBoosts = in.readList(IndexBoost::new); minScore = in.readOptionalFloat(); postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -240,11 +232,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ out.writeOptionalWriteable(storedFieldsContext); out.writeVInt(from); out.writeOptionalWriteable(highlightBuilder); - int indexBoostSize = indexBoost == null ? 0 : indexBoost.size(); - out.writeVInt(indexBoostSize); - if (indexBoostSize > 0) { - writeIndexBoost(out); - } + out.writeList(indexBoosts); out.writeOptionalFloat(minScore); out.writeOptionalNamedWriteable(postQueryBuilder); out.writeOptionalNamedWriteable(queryBuilder); @@ -283,17 +271,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ out.writeOptionalWriteable(sliceBuilder); } - private void writeIndexBoost(StreamOutput out) throws IOException { - List> ibs = StreamSupport - .stream(indexBoost.spliterator(), false) - .map(i -> tuple(i.key, i.value)).sorted((o1, o2) -> o1.v1().compareTo(o2.v1())) - .collect(Collectors.toList()); - for (Tuple ib : ibs) { - out.writeString(ib.v1()); - out.writeFloat(ib.v2()); - } - } - /** * Sets the search query for this request. * @@ -816,28 +793,26 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } /** - * Sets the boost a specific index will receive when the query is executed + * Sets the boost a specific index or alias will receive when the query is executed * against it. * * @param index - * The index to apply the boost against + * The index or alias to apply the boost against * @param indexBoost * The boost to apply to the index */ public SearchSourceBuilder indexBoost(String index, float indexBoost) { - if (this.indexBoost == null) { - this.indexBoost = new ObjectFloatHashMap<>(); - } - this.indexBoost.put(index, indexBoost); + Objects.requireNonNull(index, "index must not be null"); + this.indexBoosts.add(new IndexBoost(index, indexBoost)); return this; } /** - * Gets the boost a specific indices will receive when the query is + * Gets the boost a specific indices or aliases will receive when the query is * executed against them. */ - public ObjectFloatHashMap indexBoost() { - return indexBoost; + public List indexBoosts() { + return indexBoosts; } /** @@ -916,7 +891,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ rewrittenBuilder.storedFieldsContext = storedFieldsContext; rewrittenBuilder.from = from; rewrittenBuilder.highlightBuilder = highlightBuilder; - rewrittenBuilder.indexBoost = indexBoost; + rewrittenBuilder.indexBoosts = indexBoosts; rewrittenBuilder.minScore = minScore; rewrittenBuilder.postQueryBuilder = postQueryBuilder; rewrittenBuilder.profile = profile; @@ -1002,15 +977,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ scriptFields.add(new ScriptField(context)); } } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) { - indexBoost = new ObjectFloatHashMap<>(); + DEPRECATION_LOGGER.deprecated( + "Object format in indices_boost is deprecated, please use array format instead"); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token.isValue()) { - indexBoost.put(currentFieldName, parser.floatValue()); + indexBoosts.add(new IndexBoost(currentFieldName, parser.floatValue())); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + - " in [" + currentFieldName + "].", parser.getTokenLocation()); + " in [" + currentFieldName + "].", parser.getTokenLocation()); } } } else if (context.getParseFieldMatcher().match(currentFieldName, AGGREGATIONS_FIELD) @@ -1059,9 +1035,13 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ docValueFields.add(parser.text()); } else { throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + - "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); + "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); } } + } else if (context.getParseFieldMatcher().match(currentFieldName, INDICES_BOOST_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + indexBoosts.add(new IndexBoost(context)); + } } else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) { sorts = new ArrayList<>(SortBuilder.fromXContent(context)); } else if (context.getParseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { @@ -1191,18 +1171,13 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(SLICE.getPreferredName(), sliceBuilder); } - if (indexBoost != null) { - builder.startObject(INDICES_BOOST_FIELD.getPreferredName()); - assert !indexBoost.containsKey(null); - final Object[] keys = indexBoost.keys; - final float[] values = indexBoost.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - builder.field((String) keys[i], values[i]); - } - } + builder.startArray(INDICES_BOOST_FIELD.getPreferredName()); + for (IndexBoost ib : indexBoosts) { + builder.startObject(); + builder.field(ib.index, ib.boost); builder.endObject(); } + builder.endArray(); if (aggregations != null) { builder.field(AGGREGATIONS_FIELD.getPreferredName(), aggregations); @@ -1237,6 +1212,91 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } } + public static class IndexBoost implements Writeable, ToXContent { + private final String index; + private final float boost; + + IndexBoost(String index, float boost) { + this.index = index; + this.boost = boost; + } + + IndexBoost(StreamInput in) throws IOException { + index = in.readString(); + boost = in.readFloat(); + } + + IndexBoost(QueryParseContext context) throws IOException { + XContentParser parser = context.parser(); + XContentParser.Token token = parser.currentToken(); + + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + if (token == XContentParser.Token.FIELD_NAME) { + index = parser.currentName(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.FIELD_NAME + + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]", parser.getTokenLocation()); + } + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + boost = parser.floatValue(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_NUMBER + + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]", parser.getTokenLocation()); + } + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.END_OBJECT + + "] in [" + INDICES_BOOST_FIELD + "] but found [" + token + "]", parser.getTokenLocation()); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT + + "] in [" + parser.currentName() + "] but found [" + token + "]", parser.getTokenLocation()); + } + } + + public String getIndex() { + return index; + } + + public float getBoost() { + return boost; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeFloat(boost); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(index, boost); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, boost); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + IndexBoost other = (IndexBoost) obj; + return Objects.equals(index, other.index) + && Objects.equals(boost, other.boost); + } + + } public static class ScriptField implements Writeable, ToXContent { private final boolean ignoreFailure; @@ -1352,8 +1412,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ @Override public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, - indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder, - sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, profile, extBuilders); + indexBoosts, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, + sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, + profile, extBuilders); } @Override @@ -1372,7 +1433,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ && Objects.equals(storedFieldsContext, other.storedFieldsContext) && Objects.equals(from, other.from) && Objects.equals(highlightBuilder, other.highlightBuilder) - && Objects.equals(indexBoost, other.indexBoost) + && Objects.equals(indexBoosts, other.indexBoosts) && Objects.equals(minScore, other.minScore) && Objects.equals(postQueryBuilder, other.postQueryBuilder) && Objects.equals(queryBuilder, other.queryBuilder) diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index ec5cbf145d3..ad86b0b69fa 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; @@ -144,11 +143,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.queryBoost(); } - @Override - public SearchContext queryBoost(float queryBoost) { - return in.queryBoost(queryBoost); - } - @Override public long getOriginNanoTime() { return in.getOriginNanoTime(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index ce845a84169..c89a4a4892b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -148,8 +148,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract float queryBoost(); - public abstract SearchContext queryBoost(float queryBoost); - public abstract long getOriginNanoTime(); public abstract ScrollContext scrollContext(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 5cc06ed055f..1f3868c0dba 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; @@ -34,6 +35,7 @@ import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; +import java.util.Optional; /** * Shard level search request that gets created and consumed on the local node. @@ -63,6 +65,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private Scroll scroll; private String[] types = Strings.EMPTY_ARRAY; private AliasFilter aliasFilter; + private float indexBoost; private SearchSourceBuilder source; private Boolean requestCache; private long nowInMillis; @@ -73,9 +76,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, long nowInMillis) { + AliasFilter aliasFilter, float indexBoost, long nowInMillis) { this(shardId, numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost); this.scroll = searchRequest.scroll(); this.nowInMillis = nowInMillis; } @@ -85,10 +88,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.nowInMillis = nowInMillis; this.aliasFilter = aliasFilter; this.shardId = shardId; + indexBoost = 1.0f; } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache, AliasFilter aliasFilter) { + Boolean requestCache, AliasFilter aliasFilter, float indexBoost) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; @@ -96,6 +100,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.types = types; this.requestCache = requestCache; this.aliasFilter = aliasFilter; + this.indexBoost = indexBoost; } @@ -134,6 +139,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { return aliasFilter.getQueryBuilder(); } + @Override + public float indexBoost() { + return indexBoost; + } + @Override public long nowInMillis() { return nowInMillis; @@ -167,6 +177,20 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { source = in.readOptionalWriteable(SearchSourceBuilder::new); types = in.readStringArray(); aliasFilter = new AliasFilter(in); + if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) { + indexBoost = in.readFloat(); + } else { + // Nodes < 5.2.0 doesn't send index boost. Read it from source. + if (source != null) { + Optional boost = source.indexBoosts() + .stream() + .filter(ib -> ib.getIndex().equals(shardId.getIndexName())) + .findFirst(); + indexBoost = boost.isPresent() ? boost.get().getBoost() : 1.0f; + } else { + indexBoost = 1.0f; + } + } nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); } @@ -181,6 +205,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { out.writeOptionalWriteable(source); out.writeStringArray(types); aliasFilter.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) { + out.writeFloat(indexBoost); + } if (!asKey) { out.writeVLong(nowInMillis); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 01852506cdc..2491091e678 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -62,6 +62,8 @@ public interface ShardSearchRequest { QueryBuilder filteringAliases(); + float indexBoost(); + long nowInMillis(); Boolean requestCache(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 41f060cc1f7..0c4a0515dfd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -54,8 +54,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } public ShardSearchTransportRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, long nowInMillis) { - this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, nowInMillis); + AliasFilter aliasFilter, float indexBoost, long nowInMillis) { + this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost, nowInMillis); this.originalIndices = new OriginalIndices(searchRequest); } @@ -111,6 +111,11 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha return shardSearchLocalRequest.filteringAliases(); } + @Override + public float indexBoost() { + return shardSearchLocalRequest.indexBoost(); + } + @Override public long nowInMillis() { return shardSearchLocalRequest.nowInMillis(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index f9b6aeb482a..26d47a44f80 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -22,14 +22,13 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortAndFormats; @@ -85,11 +84,6 @@ public class SubSearchContext extends FilteredSearchContext { throw new UnsupportedOperationException("this context should be read only"); } - @Override - public SearchContext queryBoost(float queryBoost) { - throw new UnsupportedOperationException("Not supported"); - } - @Override public SearchContext scrollContext(ScrollContext scrollContext) { throw new UnsupportedOperationException("Not supported"); diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index c4645e20a5e..86a8a8fa7c3 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -88,7 +88,7 @@ public class SearchAsyncActionTests extends ESTestCase { lookup.put(primaryNode.getId(), primaryNode); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction(logger, transportService, lookup::get, - aliasFilters, null, request, responseListener, shardsIter, 0, 0, null) { + aliasFilters, Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0, null) { TestSearchResponse response = new TestSearchResponse(); @Override diff --git a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index cc647f5ff8d..39ccfd9f463 100644 --- a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -87,6 +87,11 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { return null; } + @Override + public float indexBoost() { + return 1.0f; + } + @Override public long nowInMillis() { return 0; diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 2db44c9d0ba..13751ae4539 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -185,7 +185,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase { try { QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY)), + new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), new SearchTask(123L, "", "", "", null)); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); @@ -220,7 +220,8 @@ public class SearchServiceTests extends ESSingleNodeTestCase { new SearchSourceBuilder(), new String[0], false, - new AliasFilter(null, Strings.EMPTY_ARRAY)), + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f), null); // the search context should inherit the default timeout assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); @@ -234,7 +235,8 @@ public class SearchServiceTests extends ESSingleNodeTestCase { new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds)), new String[0], false, - new AliasFilter(null, Strings.EMPTY_ARRAY)), + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f), null); // the search context should inherit the query timeout assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index c494e7c14eb..7e0d47dfcd4 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -301,4 +301,78 @@ public class SearchSourceBuilderTests extends AbstractSearchTestCase { String query = "{ \"query\": {} }"; assertParseSearchSource(builder, new BytesArray(query), ParseFieldMatcher.EMPTY); } + + public void testParseIndicesBoost() throws IOException { + { + String restContent = " { \"indices_boost\": {\"foo\": 1.0, \"bar\": 2.0}}"; + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), + searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers); + assertEquals(2, searchSourceBuilder.indexBoosts().size()); + assertEquals(new SearchSourceBuilder.IndexBoost("foo", 1.0f), searchSourceBuilder.indexBoosts().get(0)); + assertEquals(new SearchSourceBuilder.IndexBoost("bar", 2.0f), searchSourceBuilder.indexBoosts().get(1)); + } + } + + { + String restContent = "{" + + " \"indices_boost\" : [\n" + + " { \"foo\" : 1.0 },\n" + + " { \"bar\" : 2.0 },\n" + + " { \"baz\" : 3.0 }\n" + + " ]}"; + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(createParseContext(parser), + searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers); + assertEquals(3, searchSourceBuilder.indexBoosts().size()); + assertEquals(new SearchSourceBuilder.IndexBoost("foo", 1.0f), searchSourceBuilder.indexBoosts().get(0)); + assertEquals(new SearchSourceBuilder.IndexBoost("bar", 2.0f), searchSourceBuilder.indexBoosts().get(1)); + assertEquals(new SearchSourceBuilder.IndexBoost("baz", 3.0f), searchSourceBuilder.indexBoosts().get(2)); + } + } + + { + String restContent = "{" + + " \"indices_boost\" : [\n" + + " { \"foo\" : 1.0, \"bar\": 2.0}\n" + // invalid format + " ]}"; + + assertIndicesBoostParseErrorMessage(restContent, "Expected [END_OBJECT] in [indices_boost] but found [FIELD_NAME]"); + } + + { + String restContent = "{" + + " \"indices_boost\" : [\n" + + " {}\n" + // invalid format + " ]}"; + + assertIndicesBoostParseErrorMessage(restContent, "Expected [FIELD_NAME] in [indices_boost] but found [END_OBJECT]"); + } + + { + String restContent = "{" + + " \"indices_boost\" : [\n" + + " { \"foo\" : \"bar\"}\n" + // invalid format + " ]}"; + + assertIndicesBoostParseErrorMessage(restContent, "Expected [VALUE_NUMBER] in [indices_boost] but found [VALUE_STRING]"); + } + + { + String restContent = "{" + + " \"indices_boost\" : [\n" + + " { \"foo\" : {\"bar\": 1}}\n" + // invalid format + " ]}"; + + assertIndicesBoostParseErrorMessage(restContent, "Expected [VALUE_NUMBER] in [indices_boost] but found [START_OBJECT]"); + } + } + + private void assertIndicesBoostParseErrorMessage(String restContent, String expectedErrorMessage) throws IOException { + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(createParseContext(parser), + searchRequestParsers.aggParsers, searchRequestParsers.suggesters, searchRequestParsers.searchExtParsers)); + assertEquals(expectedErrorMessage, e.getMessage()); + } + } } diff --git a/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java b/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java deleted file mode 100644 index 168729d5c0b..00000000000 --- a/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchIT.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.indicesboost; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.test.ESIntegTestCase; - -import static org.elasticsearch.client.Requests.indexRequest; -import static org.elasticsearch.client.Requests.searchRequest; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.equalTo; - -public class SimpleIndicesBoostSearchIT extends ESIntegTestCase { - public void testIndicesBoost() throws Exception { - assertHitCount(client().prepareSearch().setQuery(termQuery("test", "value")).get(), 0); - - try { - client().prepareSearch("test").setQuery(termQuery("test", "value")).execute().actionGet(); - fail("should fail"); - } catch (Exception e) { - // ignore, no indices - } - - createIndex("test1", "test2"); - ensureGreen(); - client().index(indexRequest("test1").type("type1").id("1") - .source(jsonBuilder().startObject().field("test", "value check").endObject())).actionGet(); - client().index(indexRequest("test2").type("type1").id("1") - .source(jsonBuilder().startObject().field("test", "value beck").endObject())).actionGet(); - refresh(); - - float indexBoost = 1.1f; - - logger.info("--- QUERY_THEN_FETCH"); - - logger.info("Query with test1 boosted"); - SearchResponse response = client().search(searchRequest() - .searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value"))) - ).actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(2L)); - logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation()); - logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation()); - assertThat(response.getHits().getAt(0).index(), equalTo("test1")); - assertThat(response.getHits().getAt(1).index(), equalTo("test2")); - - logger.info("Query with test2 boosted"); - response = client().search(searchRequest() - .searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value"))) - ).actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(2L)); - logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation()); - logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation()); - assertThat(response.getHits().getAt(0).index(), equalTo("test2")); - assertThat(response.getHits().getAt(1).index(), equalTo("test1")); - - logger.info("--- DFS_QUERY_THEN_FETCH"); - - logger.info("Query with test1 boosted"); - response = client().search(searchRequest() - .searchType(SearchType.DFS_QUERY_THEN_FETCH) - .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value"))) - ).actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(2L)); - logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation()); - logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation()); - assertThat(response.getHits().getAt(0).index(), equalTo("test1")); - assertThat(response.getHits().getAt(1).index(), equalTo("test2")); - - logger.info("Query with test2 boosted"); - response = client().search(searchRequest() - .searchType(SearchType.DFS_QUERY_THEN_FETCH) - .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value"))) - ).actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(2L)); - logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation()); - logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation()); - assertThat(response.getHits().getAt(0).index(), equalTo("test2")); - assertThat(response.getHits().getAt(1).index(), equalTo("test1")); - } -} diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 168d8f869ba..2eeb0a1813b 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -81,6 +81,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases()); + assertEquals(deserializedRequest.indexBoost(), shardSearchTransportRequest.indexBoost(), 0.0f); } } } @@ -96,7 +97,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } return new ShardSearchTransportRequest(searchRequest, shardId, - randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong())); + randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), Math.abs(randomLong())); } public void testFilteringAliases() throws Exception { @@ -213,4 +214,24 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { } } + // BWC test for changes from #21393 + public void testSerialize50RequestForIndexBoost() throws IOException { + BytesArray requestBytes = new BytesArray(Base64.getDecoder() + // this is a base64 encoded request generated with the same input + .decode("AAZpbmRleDEWTjEyM2trbHFUT21XZDY1Z2VDYlo5ZwABBAABAAIA/wD/////DwABBmluZGV4MUAAAAAAAAAAAP////8PAAAAAAAAAgAAAA" + + "AAAPa/q8mOKwIAJg==")); + + try (StreamInput in = new NamedWriteableAwareStreamInput(requestBytes.streamInput(), namedWriteableRegistry)) { + in.setVersion(Version.V_5_0_0); + ShardSearchTransportRequest readRequest = new ShardSearchTransportRequest(); + readRequest.readFrom(in); + assertEquals(0, in.available()); + assertEquals(2.0f, readRequest.indexBoost(), 0); + + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(Version.V_5_0_0); + readRequest.writeTo(output); + assertEquals(output.bytes().toBytesRef(), requestBytes.toBytesRef()); + } + } } diff --git a/docs/build.gradle b/docs/build.gradle index 5ba625cad94..5347e529fa3 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -368,3 +368,18 @@ buildRestTests.setups['range_index'] = ''' body: | {"index":{"_id": 1}} {"expected_attendees": {"gte": 10, "lte": 20}, "time_frame": {"gte": "2015-10-31 12:00:00", "lte": "2015-11-01"}}''' + +// Used by index boost doc +buildRestTests.setups['index_boost'] = ''' + - do: + indices.create: + index: index1 + - do: + indices.create: + index: index2 + + - do: + indices.put_alias: + index: index1 + name: alias1 +''' diff --git a/docs/reference/search/request/index-boost.asciidoc b/docs/reference/search/request/index-boost.asciidoc index bf766ce8a8c..683fe910f5e 100644 --- a/docs/reference/search/request/index-boost.asciidoc +++ b/docs/reference/search/request/index-boost.asciidoc @@ -6,6 +6,7 @@ across more than one indices. This is very handy when hits coming from one index matter more than hits coming from another index (think social graph where each user has an index). +deprecated[5.2.0, This format is deprecated. Please use array format instead.] [source,js] -------------------------------------------------- GET /_search @@ -17,3 +18,23 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[setup:index_boost warning:Object format in indices_boost is deprecated, please use array format instead] + +You can also specify it as an array to control the order of boosts. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "indices_boost" : [ + { "alias1" : 1.4 }, + { "index*" : 1.3 } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +This is important when you use aliases or wildcard expression. +If multiple matches are found, the first match will be used. +For example, if an index is included in both `alias1` and `index*`, boost value of `1.4` is applied. \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yaml new file mode 100644 index 00000000000..8271b0583f7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/40_indices_boost.yaml @@ -0,0 +1,196 @@ +setup: + - do: + indices.create: + index: test_1 + - do: + indices.create: + index: test_2 + + - do: + indices.put_alias: + index: test_1 + name: alias_1 + + - do: + indices.put_alias: + index: test_2 + name: alias_2 + + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - do: + index: + index: test_2 + type: test + id: 1 + body: { foo: bar } + + - do: + indices.refresh: + index: [test_1, test_2] + +--- +"Indices boost using object": + - skip: + version: " - 5.1.99" + reason: deprecation was added in 5.2.0 + features: "warnings" + + - do: + warnings: + - 'Object format in indices_boost is deprecated, please use array format instead' + search: + index: _all + body: + indices_boost: {test_1: 2.0, test_2: 1.0} + + - match: { hits.total: 2 } + - match: { hits.hits.0._index: test_1 } + - match: { hits.hits.1._index: test_2 } + + - do: + warnings: + - 'Object format in indices_boost is deprecated, please use array format instead' + search: + index: _all + body: + indices_boost: {test_1: 1.0, test_2: 2.0} + + - match: { hits.total: 2 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } + +--- +"Indices boost using array": + - skip: + version: " - 5.1.99" + reason: array format was added in 5.2.0 + + - do: + search: + index: _all + body: + indices_boost: [{test_1: 2.0}, {test_2: 1.0}] + + - match: { hits.total: 2 } + - match: { hits.hits.0._index: test_1 } + - match: { hits.hits.1._index: test_2 } + + - do: + search: + index: _all + body: + indices_boost: [{test_1: 1.0}, {test_2: 2.0}] + + - match: { hits.total: 2 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } + +--- +"Indices boost using array with alias": + - skip: + version: " - 5.1.99" + reason: array format was added in 5.2.0 + + - do: + search: + index: _all + body: + indices_boost: [{alias_1: 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_1 } + - match: { hits.hits.1._index: test_2 } + + - do: + search: + index: _all + body: + indices_boost: [{alias_2: 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } + +--- +"Indices boost using array with wildcard": + - skip: + version: " - 5.1.99" + reason: array format was added in 5.2.0 + + - do: + search: + index: _all + body: + indices_boost: [{"*_1": 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_1 } + - match: { hits.hits.1._index: test_2 } + + - do: + search: + index: _all + body: + indices_boost: [{"*_2": 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } + +--- +"Indices boost using array multiple match": + - skip: + version: " - 5.1.99" + reason: array format was added in 5.2.0 + + - do: + search: + index: _all + body: + # First match (3.0) is used for test_1 + indices_boost: [{"*_1": 3.0}, {alias_1: 1.0}, {test_2: 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_1 } + - match: { hits.hits.1._index: test_2 } + + - do: + search: + index: _all + body: + # First match (1.0) is used for test_1 + indices_boost: [{"*_1": 1.0}, {test_2: 2.0}, {alias_1: 3.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } + +--- +"Indices boost for nonexistent index/alias": + - skip: + version: " - 5.1.99" + reason: array format was added in 5.2.0 + + - do: + catch: /no such index/ + search: + index: _all + body: + indices_boost: [{nonexistent: 2.0}, {test_1: 1.0}, {test_2: 2.0}] + + - do: + search: + index: _all + ignore_unavailable: true + body: + indices_boost: [{nonexistent: 2.0}, {test_1: 1.0}, {test_2: 2.0}] + + - match: { hits.total: 2} + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.1._index: test_1 } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 1e86f940a11..b21142bd407 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -156,11 +156,6 @@ public class TestSearchContext extends SearchContext { return 0; } - @Override - public SearchContext queryBoost(float queryBoost) { - return null; - } - @Override public long getOriginNanoTime() { return originNanoTime; From 87a016a155b3c0ea99af076766d6bd8675d7faa9 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 12 Dec 2016 14:21:10 +0100 Subject: [PATCH 02/41] Update search template doc for 5.1 From 5.1, we can't use anymore `ScriptService.ScriptType` but `ScriptType`. Related to https://github.com/elastic/elasticsearch/pull/21136#issuecomment-266429243 --- docs/java-api/search.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc index 4b858105a26..e0029930e6d 100644 --- a/docs/java-api/search.asciidoc +++ b/docs/java-api/search.asciidoc @@ -216,7 +216,7 @@ To execute a stored templates, use `ScriptService.ScriptType.STORED`: -------------------------------------------------- SearchResponse sr = new SearchTemplateRequestBuilder(client) .setScript("template_gender") <1> - .setScriptType(ScriptService.ScriptType.STORED) <2> + .setScriptType(ScriptType.STORED) <2> .setScriptParams(template_params) <3> .setRequest(new SearchRequest()) <4> .get() <5> @@ -241,7 +241,7 @@ sr = new SearchTemplateRequestBuilder(client) " }\n" + " }\n" + "}") - .setScriptType(ScriptService.ScriptType.INLINE) <2> + .setScriptType(ScriptType.INLINE) <2> .setScriptParams(template_params) <3> .setRequest(new SearchRequest()) <4> .get() <5> From 11a62483449c294380c214d8825cbc1837c54924 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Mon, 12 Dec 2016 14:41:42 +0100 Subject: [PATCH 03/41] Update script query doc for 5.1 From 5.1, we changed the order of Script class ctor. Related to https://github.com/elastic/elasticsearch/pull/21321#issuecomment-266432519 --- docs/java-api/query-dsl/script-query.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/java-api/query-dsl/script-query.asciidoc b/docs/java-api/query-dsl/script-query.asciidoc index 5378a6ae37e..a79af0f2cef 100644 --- a/docs/java-api/query-dsl/script-query.asciidoc +++ b/docs/java-api/query-dsl/script-query.asciidoc @@ -25,13 +25,13 @@ You can use it then with: -------------------------------------------------- QueryBuilder qb = scriptQuery( new Script( - "myscript", <1> - ScriptType.FILE, <2> - "painless", <3> + ScriptType.FILE, <1> + "painless", <2> + "myscript", <3> Collections.singletonMap("param1", 5)) <4> ); -------------------------------------------------- -<1> Script name -<2> Script type: either `ScriptType.FILE`, `ScriptType.INLINE` or `ScriptType.INDEXED` -<3> Scripting engine +<1> Script type: either `ScriptType.FILE`, `ScriptType.INLINE` or `ScriptType.INDEXED` +<2> Scripting engine +<3> Script name <4> Parameters as a `Map` of `` From 320dacd6d26be58ee3322521fd53d87e8f5065e3 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 14 Dec 2016 11:35:33 +0100 Subject: [PATCH 04/41] Update script metric aggregation for 5.0 From 5.0, we are now using painless. --- .../scripted-metric-aggregation.asciidoc | 39 +++++++------------ 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc index 4ac9701a2f4..b23a683b056 100644 --- a/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -12,43 +12,32 @@ Here is an example on how to create the aggregation request: [source,java] -------------------------------------------------- ScriptedMetricAggregationBuilder aggregation = AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("_agg['heights'] = []")) - .mapScript(new Script("if (doc['gender'].value == \"male\") " + - "{ _agg.heights.add(doc['height'].value) } " + - "else " + - "{ _agg.heights.add(-1 * doc['height'].value) }")); + .scriptedMetric("agg") + .initScript(new Script("params._agg.heights = []")) + .mapScript(new Script("params._agg.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")); -------------------------------------------------- You can also specify a `combine` script which will be executed on each shard: [source,java] -------------------------------------------------- -ScriptedMetricAggregationBuilder aggregation = - AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("_agg['heights'] = []")) - .mapScript(new Script("if (doc['gender'].value == \"male\") " + - "{ _agg.heights.add(doc['height'].value) } " + - "else " + - "{ _agg.heights.add(-1 * doc['height'].value) }")) - .combineScript(new Script("heights_sum = 0; for (t in _agg.heights) { heights_sum += t }; return heights_sum")); +ScriptedMetricAggregationBuilder aggregation = AggregationBuilders + .scriptedMetric("agg") + .initScript(new Script("params._agg.heights = []")) + .mapScript(new Script("params._agg.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")) + .combineScript(new Script("double heights_sum = 0.0; for (t in params._agg.heights) { heights_sum += t } return heights_sum")); -------------------------------------------------- You can also specify a `reduce` script which will be executed on the node which gets the request: [source,java] -------------------------------------------------- -ScriptedMetricAggregationBuilder aggregation = - AggregationBuilders - .scriptedMetric("agg") - .initScript(new Script("_agg['heights'] = []")) - .mapScript(new Script("if (doc['gender'].value == \"male\") " + - "{ _agg.heights.add(doc['height'].value) } " + - "else " + - "{ _agg.heights.add(-1 * doc['height'].value) }")) - .combineScript(new Script("heights_sum = 0; for (t in _agg.heights) { heights_sum += t }; return heights_sum")) - .reduceScript(new Script("heights_sum = 0; for (a in _aggs) { heights_sum += a }; return heights_sum")); +ScriptedMetricAggregationBuilder aggregation = AggregationBuilders + .scriptedMetric("agg") + .initScript(new Script("params._agg.heights = []")) + .mapScript(new Script("params._agg.heights.add(doc.gender.value == 'male' ? doc.height.value : -1.0 * doc.height.value)")) + .combineScript(new Script("double heights_sum = 0.0; for (t in params._agg.heights) { heights_sum += t } return heights_sum")) + .reduceScript(new Script("double heights_sum = 0.0; for (a in params._aggs) { heights_sum += a } return heights_sum")); -------------------------------------------------- From 3005366b1322a251a49eadd8d4dad2cb9644ee4f Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 15 Dec 2016 11:08:17 +0100 Subject: [PATCH 05/41] Fix boost_mode propagation when the function score query builder is rewritten (#22172) This change fixes the cloning of the FunctionScoreQueryBuilder when the inner query or functions are rewritten. Fixes #22138 --- .../functionscore/FunctionScoreQueryBuilder.java | 1 + .../FunctionScoreQueryBuilderTests.java | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 17422ce4f0a..b037261d1f0 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -425,6 +425,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder Date: Thu, 15 Dec 2016 12:41:50 +0100 Subject: [PATCH 06/41] Handle race-condition when connection is closed before handshake listener was added Today sending a message on a closed channel doesn't throw an exception. The channel might just swallow the exception and informs the internal async exception handler that a channel got disconnected. This change adds a safety check that we fail the handshake if we registered a handler but the channel has been closed already for instance due to a reset by peer. --- .../elasticsearch/transport/TcpTransport.java | 11 ++++++++- .../AbstractSimpleTransportTestCase.java | 24 ++++++++----------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 0617ecd64bb..0feca563c8d 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -570,6 +570,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } catch (IOException e) { logger.warn("failed to close channel", e); + } finally { + onChannelClosed(channel); } }); } @@ -1527,6 +1529,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i pendingHandshakes.put(requestId, handler); boolean success = false; try { + if (isOpen(channel) == false) { + // we have to protect ourself here since sendRequestToChannel won't barf if the channel is closed. + // it's weird but to change it will cause a lot of impact on the exception handling code all over the codebase. + // yet, if we don't check the state here we might have registered a pending handshake handler but the close + // listener calling #onChannelClosed might have already run and we are waiting on the latch below unitl we time out. + throw new IllegalStateException("handshake failed, channel already closed"); + } // for the request we use the minCompatVersion since we don't know what's the version of the node we talk to // we also have no payload on the request but the response will contain the actual version of the node we talk // to as the payload. @@ -1575,7 +1584,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } /** - * Called by sub-classes for each channel that is closed + * Called once the channel is closed for instance due to a disconnect or a closed socket etc. */ protected final void onChannelClosed(Channel channel) { Optional> first = pendingHandshakes.entrySet().stream() diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 1d19e538807..b13bde5d134 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -51,6 +50,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.nio.channels.ClosedChannelException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -67,11 +67,13 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @@ -828,15 +830,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); } - @TestLogging(value = "org.elasticsearch.test.transport.tracer:TRACE") public void testTracerLog() throws InterruptedException { - TransportRequestHandler handler = new TransportRequestHandler() { - @Override - public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { - channel.sendResponse(new StringMessageResponse("")); - } - }; - + TransportRequestHandler handler = (request, channel) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { @@ -1860,9 +1855,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Thread t = new Thread() { @Override public void run() { - try { - Socket accept = socket.accept(); - accept.close(); + try (Socket accept = socket.accept()) { + if (randomBoolean()) { // sometimes wait until the other side sends the message + accept.getInputStream().read(); + } } catch (IOException e) { throw new UncheckedIOException(e); } @@ -1879,8 +1875,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { builder.setHandshakeTimeout(TimeValue.timeValueHours(1)); ConnectTransportException ex = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(dummy, builder.build())); - assertEquals("[][" + dummy.getAddress() +"] general node connection failure", ex.getMessage()); - assertEquals("handshake failed", ex.getCause().getMessage()); + assertEquals(ex.getMessage(), "[][" + dummy.getAddress() +"] general node connection failure"); + assertThat(ex.getCause().getMessage(), startsWith("handshake failed")); t.join(); } } From ef610636b6a2e4b39dfd422d5156f4876b43b48a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 15 Dec 2016 12:47:01 +0100 Subject: [PATCH 07/41] Remove TCP handshake BWC from master (#22151) Since #22094 has been back-ported to 5.2 we can remove all BWC layers from master since all supported version will handle handshake requests. Relates to #22094 --- .../elasticsearch/transport/TcpTransport.java | 22 +++---------------- .../AbstractSimpleTransportTestCase.java | 9 +++----- 2 files changed, 6 insertions(+), 25 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 0feca563c8d..c3ad1f5afe1 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -245,7 +245,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i private static class HandshakeResponseHandler implements TransportResponseHandler { final AtomicReference versionRef = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); - final AtomicBoolean handshakeNotSupported = new AtomicBoolean(false); final AtomicReference exceptionRef = new AtomicReference<>(); final Channel channel; @@ -261,24 +260,15 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i @Override public void handleResponse(VersionHandshakeResponse response) { final boolean success = versionRef.compareAndSet(null, response.version); - assert success; latch.countDown(); + assert success; } @Override public void handleException(TransportException exp) { - Throwable cause = exp.getCause(); - if (cause != null - && cause instanceof ActionNotFoundTransportException - // this will happen if we talk to a node (pre 5.2) that doesn't have a handshake handler - // we will just treat the node as a 5.0.0 node unless the discovery node that is used to connect has a higher version. - && cause.getMessage().equals("No handler for action [internal:tcp/handshake]")) { - handshakeNotSupported.set(true); - } else { - final boolean success = exceptionRef.compareAndSet(null, exp); - assert success; - } + final boolean success = exceptionRef.compareAndSet(null, exp); latch.countDown(); + assert success; } @Override @@ -1546,12 +1536,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i throw new ConnectTransportException(node, "handshake_timeout[" + timeout + "]"); } success = true; - if (handler.handshakeNotSupported.get()) { - // this is a BWC layer, if we talk to a pre 5.2 node then the handshake is not supported - // this will go away in master once it's all ported to 5.2 but for now we keep this to make - // the backport straight forward - return null; - } if (exceptionRef.get() != null) { throw new IllegalStateException("handshake failed", exceptionRef.get()); } else { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b13bde5d134..2beff7890b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1805,12 +1805,9 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // this acts like a node that doesn't have support for handshakes DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); - serviceA.connectToNode(node); - TcpTransport.NodeChannels connection = originalTransport.getConnection(node); - Version version = originalTransport.executeHandshake(node, connection.channel(TransportRequestOptions.Type.PING), - TimeValue.timeValueSeconds(10)); - assertNull(version); - serviceA.disconnectFromNode(node); + ConnectTransportException exception = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(node)); + assertTrue(exception.getCause() instanceof IllegalStateException); + assertEquals("handshake failed", exception.getCause().getMessage()); } try (TransportService service = buildService("TS_TPC", Version.CURRENT, null)) { From 80d3d790ae291476bbb66671b0c5b551e4717c26 Mon Sep 17 00:00:00 2001 From: Aaron Spiegel Date: Thu, 15 Dec 2016 04:29:11 -0800 Subject: [PATCH 08/41] Fix handling of segment file sizes in stats API This commit addresses an issue in the stats APIs where include_segment_file_sizes was not being consumed leading to requests containing this parameter being rejected. Relates #21879 --- .../admin/cluster/RestNodesStatsAction.java | 5 +- .../admin/indices/RestIndicesStatsAction.java | 5 +- .../rest-api-spec/api/indices.stats.json | 5 + .../rest-api-spec/api/nodes.stats.json | 5 + .../test/indices.stats/11_metric.yaml | 43 ++++ .../test/nodes.stats/11_indices_metrics.yaml | 211 ++++++++++++++++++ 6 files changed, 268 insertions(+), 6 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index 93094844025..dce22c0139d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -180,8 +179,8 @@ public class RestNodesStatsAction extends BaseRestHandler { if (nodesStatsRequest.indices().isSet(Flag.Indexing) && (request.hasParam("types"))) { nodesStatsRequest.indices().types(request.paramAsStringArray("types", null)); } - if (nodesStatsRequest.indices().isSet(Flag.Segments) && (request.hasParam("include_segment_file_sizes"))) { - nodesStatsRequest.indices().includeSegmentFileSizes(true); + if (nodesStatsRequest.indices().isSet(Flag.Segments)) { + nodesStatsRequest.indices().includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } return channel -> client.admin().cluster().nodesStats(nodesStatsRequest, new NodesResponseRestListener<>(channel)); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index 041a15b4119..5fddbe0dff9 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -136,8 +135,8 @@ public class RestIndicesStatsAction extends BaseRestHandler { request.paramAsStringArray("fielddata_fields", request.paramAsStringArray("fields", Strings.EMPTY_ARRAY))); } - if (indicesStatsRequest.segments() && request.hasParam("include_segment_file_sizes")) { - indicesStatsRequest.includeSegmentFileSizes(true); + if (indicesStatsRequest.segments()) { + indicesStatsRequest.includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 7099f3e2fd2..de1e9246f7b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -52,6 +52,11 @@ "types" : { "type" : "list", "description" : "A comma-separated list of document types for the `indexing` index metric" + }, + "include_segment_file_sizes": { + "type": "boolean", + "description": "Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)", + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index 665d6bd7a2c..a5910c9f328 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -63,6 +63,11 @@ "timeout": { "type" : "time", "description" : "Explicit operation timeout" + }, + "include_segment_file_sizes": { + "type": "boolean", + "description": "Whether to report the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested)", + "default": false } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml index 7b88ac57080..0f373b7177c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml @@ -121,3 +121,46 @@ setup: - is_false: _all.total.translog - is_true: _all.total.recovery +--- +"Metric - _all include_segment_file_sizes": + - do: + indices.stats: { metric: _all, include_segment_file_sizes: true } + + - is_true: _all.total.docs + - is_true: _all.total.store + - is_true: _all.total.indexing + - is_true: _all.total.get + - is_true: _all.total.search + - is_true: _all.total.merges + - is_true: _all.total.refresh + - is_true: _all.total.flush + - is_true: _all.total.warmer + - is_true: _all.total.query_cache + - is_true: _all.total.fielddata + - is_true: _all.total.completion + - is_true: _all.total.segments + - is_true: _all.total.translog + - is_true: _all.total.recovery + - is_true: _all.total.segments.file_sizes + +--- +"Metric - segments include_segment_file_sizes": + - do: + indices.stats: { metric: segments, include_segment_file_sizes: true } + + - is_false: _all.total.docs + - is_false: _all.total.store + - is_false: _all.total.indexing + - is_false: _all.total.get + - is_false: _all.total.search + - is_false: _all.total.merges + - is_false: _all.total.refresh + - is_false: _all.total.flush + - is_false: _all.total.warmer + - is_false: _all.total.query_cache + - is_false: _all.total.fielddata + - is_false: _all.total.completion + - is_true: _all.total.segments + - is_false: _all.total.translog + - is_false: _all.total.recovery + - is_true: _all.total.segments.file_sizes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml new file mode 100644 index 00000000000..998909dd9cf --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml @@ -0,0 +1,211 @@ +--- +"Metric - blank": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: {} + + - is_true: nodes.$master.indices.docs + - is_true: nodes.$master.indices.store + - is_true: nodes.$master.indices.indexing + - is_true: nodes.$master.indices.get + - is_true: nodes.$master.indices.search + - is_true: nodes.$master.indices.merges + - is_true: nodes.$master.indices.refresh + - is_true: nodes.$master.indices.flush + - is_true: nodes.$master.indices.warmer + - is_true: nodes.$master.indices.query_cache + - is_true: nodes.$master.indices.fielddata + - is_true: nodes.$master.indices.completion + - is_true: nodes.$master.indices.segments + - is_true: nodes.$master.indices.translog + - is_true: nodes.$master.indices.recovery + +--- +"Metric - _all": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: _all } + + - is_true: nodes.$master.indices.docs + - is_true: nodes.$master.indices.store + - is_true: nodes.$master.indices.indexing + - is_true: nodes.$master.indices.get + - is_true: nodes.$master.indices.search + - is_true: nodes.$master.indices.merges + - is_true: nodes.$master.indices.refresh + - is_true: nodes.$master.indices.flush + - is_true: nodes.$master.indices.warmer + - is_true: nodes.$master.indices.query_cache + - is_true: nodes.$master.indices.fielddata + - is_true: nodes.$master.indices.completion + - is_true: nodes.$master.indices.segments + - is_true: nodes.$master.indices.translog + - is_true: nodes.$master.indices.recovery + +--- +"Metric - indices _all": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: _all } + + - is_true: nodes.$master.indices.docs + - is_true: nodes.$master.indices.store + - is_true: nodes.$master.indices.indexing + - is_true: nodes.$master.indices.get + - is_true: nodes.$master.indices.search + - is_true: nodes.$master.indices.merges + - is_true: nodes.$master.indices.refresh + - is_true: nodes.$master.indices.flush + - is_true: nodes.$master.indices.warmer + - is_true: nodes.$master.indices.query_cache + - is_true: nodes.$master.indices.fielddata + - is_true: nodes.$master.indices.completion + - is_true: nodes.$master.indices.segments + - is_true: nodes.$master.indices.translog + - is_true: nodes.$master.indices.recovery + +--- +"Metric - one": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: docs } + + - is_true: nodes.$master.indices.docs + - is_false: nodes.$master.indices.store + - is_false: nodes.$master.indices.indexing + - is_false: nodes.$master.indices.get + - is_false: nodes.$master.indices.search + - is_false: nodes.$master.indices.merges + - is_false: nodes.$master.indices.refresh + - is_false: nodes.$master.indices.flush + - is_false: nodes.$master.indices.warmer + - is_false: nodes.$master.indices.query_cache + - is_false: nodes.$master.indices.fielddata + - is_false: nodes.$master.indices.completion + - is_false: nodes.$master.indices.segments + - is_false: nodes.$master.indices.translog + - is_false: nodes.$master.indices.recovery + +--- +"Metric - multi": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: [ store, get, merge ] } + + - is_false: nodes.$master.indices.docs + - is_true: nodes.$master.indices.store + - is_false: nodes.$master.indices.indexing + - is_true: nodes.$master.indices.get + - is_false: nodes.$master.indices.search + - is_true: nodes.$master.indices.merges + - is_false: nodes.$master.indices.refresh + - is_false: nodes.$master.indices.flush + - is_false: nodes.$master.indices.warmer + - is_false: nodes.$master.indices.query_cache + - is_false: nodes.$master.indices.fielddata + - is_false: nodes.$master.indices.completion + - is_false: nodes.$master.indices.segments + - is_false: nodes.$master.indices.translog + - is_false: nodes.$master.indices.recovery + + +--- +"Metric - recovery": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: [ recovery ] } + + - is_false: nodes.$master.indices.docs + - is_false: nodes.$master.indices.store + - is_false: nodes.$master.indices.indexing + - is_false: nodes.$master.indices.get + - is_false: nodes.$master.indices.search + - is_false: nodes.$master.indices.merges + - is_false: nodes.$master.indices.refresh + - is_false: nodes.$master.indices.flush + - is_false: nodes.$master.indices.warmer + - is_false: nodes.$master.indices.query_cache + - is_false: nodes.$master.indices.fielddata + - is_false: nodes.$master.indices.completion + - is_false: nodes.$master.indices.segments + - is_false: nodes.$master.indices.translog + - is_true: nodes.$master.indices.recovery + +--- +"Metric - _all include_segment_file_sizes": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true } + + - is_true: nodes.$master.indices.docs + - is_true: nodes.$master.indices.store + - is_true: nodes.$master.indices.indexing + - is_true: nodes.$master.indices.get + - is_true: nodes.$master.indices.search + - is_true: nodes.$master.indices.merges + - is_true: nodes.$master.indices.refresh + - is_true: nodes.$master.indices.flush + - is_true: nodes.$master.indices.warmer + - is_true: nodes.$master.indices.query_cache + - is_true: nodes.$master.indices.fielddata + - is_true: nodes.$master.indices.completion + - is_true: nodes.$master.indices.segments + - is_true: nodes.$master.indices.translog + - is_true: nodes.$master.indices.recovery + - is_true: nodes.$master.indices.segments.file_sizes + +--- +"Metric - segments include_segment_file_sizes": + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true } + + - is_false: nodes.$master.indices.docs + - is_false: nodes.$master.indices.store + - is_false: nodes.$master.indices.indexing + - is_false: nodes.$master.indices.get + - is_false: nodes.$master.indices.search + - is_false: nodes.$master.indices.merges + - is_false: nodes.$master.indices.refresh + - is_false: nodes.$master.indices.flush + - is_false: nodes.$master.indices.warmer + - is_false: nodes.$master.indices.query_cache + - is_false: nodes.$master.indices.fielddata + - is_false: nodes.$master.indices.completion + - is_true: nodes.$master.indices.segments + - is_false: nodes.$master.indices.translog + - is_false: nodes.$master.indices.recovery + - is_true: nodes.$master.indices.segments.file_sizes + From 391d3a20f3554109cbf3326acd0f97f733802559 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 15 Dec 2016 16:12:33 +0100 Subject: [PATCH 09/41] Add unit tests for toXContent methods in ReplicationResponse (#22188) This commit adds unit tests for the toXContent() methods of the inner classes ReplicationResponse.ShardInfo and ReplicationResponse.ShardInfo.Failure. --- .../replication/ReplicationResponse.java | 56 +++--- .../replication/ReplicationResponseTests.java | 162 +++++++++++++++++- 2 files changed, 182 insertions(+), 36 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java index e2cd8e3a817..6e44c4f158c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationResponse.java @@ -67,6 +67,12 @@ public class ReplicationResponse extends ActionResponse { public static class ShardInfo implements Streamable, ToXContent { + private static final String _SHARDS = "_shards"; + private static final String TOTAL = "total"; + private static final String SUCCESSFUL = "successful"; + private static final String FAILED = "failed"; + private static final String FAILURES = "failures"; + private int total; private int successful; private Failure[] failures = EMPTY; @@ -165,12 +171,12 @@ public class ReplicationResponse extends ActionResponse { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - builder.field(Fields.TOTAL, total); - builder.field(Fields.SUCCESSFUL, successful); - builder.field(Fields.FAILED, getFailed()); + builder.startObject(_SHARDS); + builder.field(TOTAL, total); + builder.field(SUCCESSFUL, successful); + builder.field(FAILED, getFailed()); if (failures.length > 0) { - builder.startArray(Fields.FAILURES); + builder.startArray(FAILURES); for (Failure failure : failures) { failure.toXContent(builder, params); } @@ -197,6 +203,13 @@ public class ReplicationResponse extends ActionResponse { public static class Failure implements ShardOperationFailedException, ToXContent { + private static final String _INDEX = "_index"; + private static final String _SHARD = "_shard"; + private static final String _NODE = "_node"; + private static final String REASON = "reason"; + private static final String STATUS = "status"; + private static final String PRIMARY = "primary"; + private ShardId shardId; private String nodeId; private Exception cause; @@ -313,39 +326,18 @@ public class ReplicationResponse extends ActionResponse { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields._INDEX, shardId.getIndexName()); - builder.field(Fields._SHARD, shardId.id()); - builder.field(Fields._NODE, nodeId); - builder.field(Fields.REASON); + builder.field(_INDEX, shardId.getIndexName()); + builder.field(_SHARD, shardId.id()); + builder.field(_NODE, nodeId); + builder.field(REASON); builder.startObject(); ElasticsearchException.toXContent(builder, params, cause); builder.endObject(); - builder.field(Fields.STATUS, status); - builder.field(Fields.PRIMARY, primary); + builder.field(STATUS, status); + builder.field(PRIMARY, primary); builder.endObject(); return builder; } - - private static class Fields { - - private static final String _INDEX = "_index"; - private static final String _SHARD = "_shard"; - private static final String _NODE = "_node"; - private static final String REASON = "reason"; - private static final String STATUS = "status"; - private static final String PRIMARY = "primary"; - - } - } - - private static class Fields { - - private static final String _SHARDS = "_shards"; - private static final String TOTAL = "total"; - private static final String SUCCESSFUL = "successful"; - private static final String FAILED = "failed"; - private static final String FAILURES = "failures"; - } } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java index 0c805d6d32c..7d82b744133 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationResponseTests.java @@ -21,6 +21,11 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.RoutingMissingException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShardRecoveringException; import org.elasticsearch.index.shard.ShardId; @@ -28,13 +33,13 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.function.Supplier; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -import static org.hamcrest.CoreMatchers.equalTo; public class ReplicationResponseTests extends ESTestCase { @@ -42,9 +47,7 @@ public class ReplicationResponseTests extends ESTestCase { final int total = 5; final int successful = randomIntBetween(1, total); final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(total, successful); - assertThat( - shardInfo.toString(), - equalTo(String.format(Locale.ROOT, "ShardInfo{total=5, successful=%d, failures=[]}", successful))); + assertEquals(String.format(Locale.ROOT, "ShardInfo{total=5, successful=%d, failures=[]}", successful), shardInfo.toString()); } public void testShardInfoEqualsAndHashcode() { @@ -113,6 +116,157 @@ public class ReplicationResponseTests extends ESTestCase { checkEqualsAndHashCode(randomFailure(), copy, mutate); } + public void testShardInfoToXContent() throws IOException { + ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3); + + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (XContentBuilder builder = XContentBuilder.builder(xContent)) { + builder.startObject(); + shardInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + // Expected JSON is {"_shards":{"total":5,"successful":3,"failed":0}} + try (XContentParser parser = xContent.createParser(builder.bytes())) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("_shards", parser.currentName()); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("total", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getTotal(), parser.intValue()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("successful", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getSuccessful(), parser.intValue()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("failed", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getFailed(), parser.intValue()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + } + + public void testRandomShardInfoToXContent() throws IOException { + final ReplicationResponse.ShardInfo shardInfo = randomShardInfo(); + + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (XContentBuilder builder = XContentBuilder.builder(xContent)) { + builder.startObject(); + shardInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = xContent.createParser(builder.bytes())) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("_shards", parser.currentName()); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("total", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getTotal(), parser.intValue()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("successful", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getSuccessful(), parser.intValue()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("failed", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(shardInfo.getFailed(), parser.intValue()); + + if (shardInfo.getFailures() != null && shardInfo.getFailures().length > 0) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("failures", parser.currentName()); + assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); + + for (int i = 0; i < shardInfo.getFailures().length; i++) { + assertFailure(parser, shardInfo.getFailures()[i]); + } + assertEquals(XContentParser.Token.END_ARRAY, parser.nextToken()); + } + + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + } + + public void testRandomFailureToXContent() throws IOException { + ReplicationResponse.ShardInfo.Failure shardInfoFailure = randomFailure(); + + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (XContentBuilder builder = XContentBuilder.builder(xContent)) { + shardInfoFailure.toXContent(builder, ToXContent.EMPTY_PARAMS); + + try (XContentParser parser = xContent.createParser(builder.bytes())) { + assertFailure(parser, shardInfoFailure); + } + } + } + + private static void assertFailure(XContentParser parser, ReplicationResponse.ShardInfo.Failure failure) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("_index", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(failure.index(), parser.text()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("_shard", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + assertEquals(failure.shardId(), parser.intValue()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("_node", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(failure.nodeId(), parser.text()); + + Throwable cause = failure.getCause(); + if (cause != null) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("reason", parser.currentName()); + assertThrowable(parser, cause); + } + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("status", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(failure.status(), RestStatus.valueOf(parser.text())); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("primary", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_BOOLEAN, parser.nextToken()); + assertEquals(failure.primary(), parser.booleanValue()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + } + + private static void assertThrowable(XContentParser parser, Throwable cause) throws IOException { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("type", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(ElasticsearchException.getExceptionName(cause), parser.text()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("reason", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(cause.getMessage(), parser.text()); + if (cause instanceof ElasticsearchException) { + ElasticsearchException ex = (ElasticsearchException) cause; + for (String name : ex.getHeaderKeys()) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(name.replaceFirst("es.", ""), parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(ex.getHeader(name).get(0), parser.text()); + } + if (ex.getCause() != null) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("caused_by", parser.currentName()); + assertThrowable(parser, ex.getCause()); + } + } + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + } + private static ReplicationResponse.ShardInfo randomShardInfo() { int total = randomIntBetween(1, 10); int successful = randomIntBetween(0, total); From b6cbcc49ba590ead3cd6d95953eabe22f7d47fef Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 15 Dec 2016 17:06:25 +0100 Subject: [PATCH 10/41] ClusterService should expose "applied" cluster states (i.e., remove ClusterStateStatus) (#21817) `ClusterService` is responsible of updating the cluster state on every node (as a response to an API call on the master and when non-masters receive a new state from the master). When a new cluster state is processed, it is made visible via the `ClusterService#state` method and is sent to series of listeners. Those listeners come in two flavours - one is to change the state of the node in response to the new cluster state (call these cluster state appliers), the other is to start a secondary process. Examples for the later include an indexing operation waiting for a shard to be started or a master node action waiting for a master to be elected. The fact that we expose the state before applying it means that samplers of the cluster state had to worry about two things - working based on a stale CS and working based on a future, i.e., "being applied" CS. The `ClusterStateStatus` was used to allow distinguishing between the two. Working with a stale cluster state is not avoidable. How this PR changes things to make sure consumers don't need to worry about future CS, removing the need for the status and simplifying the waiting logic. This change does come with a price as "cluster state appliers" can't sample the cluster state from `ClusterService` whenever they want as the cluster state isn't exposed yet. However, recent clean ups made this is situation easier and this PR takes the last steps to remove such sampling. This also helps clarify the "information flow" and helps component separation (and thus potential unit testing). It also adds an assertion that will trigger if the cluster state is sampled by such listeners. Note that there are still many "appliers" that could be made a simpler, unrestricted "listener" but this can be done in smaller bits in the future. The commit also makes it clear what the `appliers` and what the `listeners` are by using dedicated interfaces. Also, since I had to change the listener types I went ahead and changed the data structure for temporary/timeout listeners (used for the observer) so addition and removal won't be an O(n) operation. --- .../health/TransportClusterHealthAction.java | 16 +- .../TransportRestoreSnapshotAction.java | 6 +- .../action/bulk/TransportBulkAction.java | 27 +-- .../action/index/TransportIndexAction.java | 4 +- .../action/ingest/IngestActionForwarder.java | 10 +- .../action/support/ActiveShardsObserver.java | 12 +- .../master/TransportMasterNodeAction.java | 30 +-- .../TransportReplicationAction.java | 2 +- ...ransportInstanceSingleOperationAction.java | 4 +- ...teStatus.java => ClusterStateApplier.java} | 18 +- .../cluster/ClusterStateObserver.java | 142 ++++------- .../cluster/InternalClusterInfoService.java | 19 +- .../cluster/MasterNodeChangePredicate.java | 37 +-- .../action/shard/ShardStateAction.java | 26 +- .../metadata/MetaDataCreateIndexService.java | 13 +- .../metadata/MetaDataIndexAliasesService.java | 4 +- .../MetaDataIndexTemplateService.java | 5 +- .../metadata/MetaDataMappingService.java | 5 +- .../MetaDataUpdateSettingsService.java | 2 +- .../routing/DelayedAllocationService.java | 4 +- .../cluster/service/ClusterService.java | 226 +++++++++--------- .../cluster/service/ClusterServiceState.java | 49 ---- .../zen/PublishClusterStateAction.java | 1 - .../gateway/DanglingIndicesState.java | 15 +- .../org/elasticsearch/gateway/Gateway.java | 10 +- .../gateway/GatewayAllocator.java | 31 +-- .../gateway/GatewayMetaState.java | 11 +- .../elasticsearch/gateway/GatewayService.java | 6 +- .../index/CompositeIndexEventListener.java | 37 +-- .../org/elasticsearch/index/IndexService.java | 1 - .../index/query/QueryRewriteContext.java | 15 +- .../index/query/QueryShardContext.java | 7 +- .../index/shard/IndexEventListener.java | 45 +--- .../elasticsearch/index/shard/IndexShard.java | 4 +- .../elasticsearch/indices/IndicesService.java | 64 +---- .../cluster/IndicesClusterStateService.java | 126 ++++++---- .../recovery/PeerRecoveryTargetService.java | 15 +- .../indices/store/IndicesStore.java | 25 +- .../ingest/PipelineExecutionService.java | 6 +- .../elasticsearch/ingest/PipelineStore.java | 20 +- .../java/org/elasticsearch/node/Node.java | 9 +- .../node/service/NodeService.java | 4 +- .../repositories/RepositoriesService.java | 8 +- .../elasticsearch/search/SearchService.java | 25 +- .../snapshots/RestoreService.java | 9 +- .../snapshots/SnapshotShardsService.java | 54 +++-- .../snapshots/SnapshotsService.java | 12 +- .../org/elasticsearch/tasks/TaskManager.java | 6 +- .../org/elasticsearch/tribe/TribeService.java | 2 +- .../node/tasks/TaskManagerTestCase.java | 2 +- .../bulk/TransportBulkActionIngestTests.java | 18 +- .../TransportIndexActionIngestTests.java | 14 +- .../health/ClusterStateHealthTests.java | 22 +- .../DelayedAllocationServiceTests.java | 2 +- .../cluster/service/ClusterServiceTests.java | 40 +++- .../DiscoveryWithServiceDisruptionsIT.java | 39 ++- .../discovery/zen/ZenDiscoveryIT.java | 2 +- .../gateway/DanglingIndicesStateTests.java | 17 +- .../elasticsearch/index/IndexModuleTests.java | 5 +- .../AbstractGeoFieldDataTestCase.java | 1 - .../index/mapper/DateFieldTypeTests.java | 18 +- .../index/mapper/RangeFieldTypeTests.java | 2 +- .../index/query/QueryShardContextTests.java | 3 +- .../index/query/RangeQueryRewriteTests.java | 6 +- .../index/query/SimpleQueryParserTests.java | 24 +- ...dicesLifecycleListenerSingleNodeTests.java | 58 +++-- ...actIndicesClusterStateServiceTestCase.java | 21 +- ...ClusterStateServiceRandomUpdatesTests.java | 23 +- .../search/SearchServiceTests.java | 3 +- .../bucket/histogram/ExtendedBoundsTests.java | 2 +- .../highlight/HighlightBuilderTests.java | 2 +- .../ShardSearchTransportRequestTests.java | 3 +- .../rescore/QueryRescoreBuilderTests.java | 2 +- .../search/sort/AbstractSortTestCase.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 4 +- .../SharedClusterSnapshotRestoreIT.java | 4 +- .../test/AbstractQueryTestCase.java | 6 +- .../test/MockIndexEventListener.java | 21 +- .../search/MockSearchServiceTests.java | 2 +- 79 files changed, 740 insertions(+), 857 deletions(-) rename core/src/main/java/org/elasticsearch/cluster/{service/ClusterStateStatus.java => ClusterStateApplier.java} (59%) delete mode 100644 core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 9773410aacc..7dd2c0df84c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -34,8 +34,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.ClusterServiceState; -import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,6 +44,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.function.Predicate; + public class TransportClusterHealthAction extends TransportMasterNodeReadAction { private final GatewayAllocator gatewayAllocator; @@ -142,19 +142,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< assert waitFor >= 0; final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext()); - final ClusterServiceState observedState = observer.observedState(); - final ClusterState state = observedState.getClusterState(); + final ClusterState state = observer.observedState(); if (request.timeout().millis() == 0) { listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); return; } final int concreteWaitFor = waitFor; - final ClusterStateObserver.ChangePredicate validationPredicate = new ClusterStateObserver.ValidationPredicate() { - @Override - protected boolean validate(ClusterServiceState newState) { - return newState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, newState.getClusterState(), concreteWaitFor); - } - }; + final Predicate validationPredicate = newState -> validateRequest(request, newState, concreteWaitFor); final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() { @Override @@ -174,7 +168,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< listener.onResponse(response); } }; - if (observedState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, state, concreteWaitFor)) { + if (validationPredicate.test(state)) { stateListener.onNewClusterState(state); } else { observer.waitForNextChange(stateListener, validationPredicate, request.timeout()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 3ef12dfff0e..5ff5bd17fe5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -101,10 +101,10 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction shards = prevEntry.shards(); assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); assert RestoreService.completed(shards) : "expected all restore entries to be completed"; @@ -121,7 +121,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexName); final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() { @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 47c3f6cf8e0..ecb03b5c222 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -46,6 +45,7 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportService; +import java.util.function.Predicate; import java.util.function.Supplier; /** @@ -111,14 +111,6 @@ public abstract class TransportMasterNodeAction listener) { this.task = task; this.request = request; @@ -134,7 +126,8 @@ public abstract class TransportMasterNodeAction masterChangePredicate = MasterNodeChangePredicate.build(clusterState); final DiscoveryNodes nodes = clusterState.nodes(); if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { // check for block, if blocked, retry, else, execute locally @@ -144,7 +137,10 @@ public abstract class TransportMasterNodeAction { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + }); } } else { ActionListener delegate = new ActionListener() { @@ -158,7 +154,7 @@ public abstract class TransportMasterNodeAction) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); - retry(t, MasterNodeChangePredicate.INSTANCE); + retry(t, masterChangePredicate); } else { listener.onFailure(t); } @@ -168,14 +164,14 @@ public abstract class TransportMasterNodeAction(listener, TransportMasterNodeAction.this::newResponse) { @@ -186,7 +182,7 @@ public abstract class TransportMasterNodeAction statePredicate) { observer.waitForNextChange( new ClusterStateObserver.Listener() { @Override @@ -214,7 +210,7 @@ public abstract class TransportMasterNodeAction) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } - }, changePredicate + }, statePredicate ); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 085b3c10018..8cff657e3dd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -630,7 +630,7 @@ public abstract class TransportReplicationAction< @Override protected void doRun() { setPhase(task, "routing"); - final ClusterState state = observer.observedState().getClusterState(); + final ClusterState state = observer.observedState(); if (handleBlockExceptions(state)) { return; } diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index c5014adf570..7670eab102a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -39,12 +39,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -124,7 +124,7 @@ public abstract class TransportInstanceSingleOperationAction MATCH_ALL_CHANGES_PREDICATE = state -> true; private final ClusterService clusterService; private final ThreadContext contextHolder; volatile TimeValue timeOutValue; - final AtomicReference lastObservedState; + final AtomicReference lastObservedState; final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); // observingContext is not null when waiting on cluster state changes final AtomicReference observingContext = new AtomicReference<>(null); @@ -70,8 +63,17 @@ public class ClusterStateObserver { * to wait indefinitely */ public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) { + this(clusterService.state(), clusterService, timeout, logger, contextHolder); + } + /** + * @param timeout a global timeout for this observer. After it has expired the observer + * will fail any existing or new #waitForNextChange calls. Set to null + * to wait indefinitely + */ + public ClusterStateObserver(ClusterState initialState, ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, + ThreadContext contextHolder) { this.clusterService = clusterService; - this.lastObservedState = new AtomicReference<>(clusterService.clusterServiceState()); + this.lastObservedState = new AtomicReference<>(initialState); this.timeOutValue = timeout; if (timeOutValue != null) { this.startTimeNS = System.nanoTime(); @@ -81,8 +83,8 @@ public class ClusterStateObserver { } /** last cluster state and status observed by this observer. Note that this may not be the current one */ - public ClusterServiceState observedState() { - ClusterServiceState state = lastObservedState.get(); + public ClusterState observedState() { + ClusterState state = lastObservedState.get(); assert state != null; return state; } @@ -100,18 +102,18 @@ public class ClusterStateObserver { waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE, timeOutValue); } - public void waitForNextChange(Listener listener, ChangePredicate changePredicate) { - waitForNextChange(listener, changePredicate, null); + public void waitForNextChange(Listener listener, Predicate statePredicate) { + waitForNextChange(listener, statePredicate, null); } /** - * Wait for the next cluster state which satisfies changePredicate + * Wait for the next cluster state which satisfies statePredicate * * @param listener callback listener - * @param changePredicate predicate to check whether cluster state changes are relevant and the callback should be called + * @param statePredicate predicate to check whether cluster state changes are relevant and the callback should be called * @param timeOutValue a timeout for waiting. If null the global observer timeout will be used. */ - public void waitForNextChange(Listener listener, ChangePredicate changePredicate, @Nullable TimeValue timeOutValue) { + public void waitForNextChange(Listener listener, Predicate statePredicate, @Nullable TimeValue timeOutValue) { if (observingContext.get() != null) { throw new ElasticsearchException("already waiting for a cluster state change"); @@ -128,7 +130,7 @@ public class ClusterStateObserver { logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry timedOut = true; - lastObservedState.set(clusterService.clusterServiceState()); + lastObservedState.set(clusterService.state()); listener.onTimeout(timeOutValue); return; } @@ -143,33 +145,24 @@ public class ClusterStateObserver { } // sample a new state - ClusterServiceState newState = clusterService.clusterServiceState(); - ClusterServiceState lastState = lastObservedState.get(); - if (changePredicate.apply(lastState, newState)) { + ClusterState newState = clusterService.state(); + ClusterState lastState = lastObservedState.get(); + if (newState != lastState && statePredicate.test(newState)) { // good enough, let's go. logger.trace("observer: sampled state accepted by predicate ({})", newState); lastObservedState.set(newState); - listener.onNewClusterState(newState.getClusterState()); + listener.onNewClusterState(newState); } else { logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState); - ObservingContext context = new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), changePredicate); + ObservingContext context = + new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), statePredicate); if (!observingContext.compareAndSet(null, context)) { throw new ElasticsearchException("already waiting for a cluster state change"); } - clusterService.add(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); + clusterService.addTimeoutListener(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); } } - /** - * reset this observer to the give cluster state. Any pending waits will be canceled. - */ - public void reset(ClusterServiceState state) { - if (observingContext.getAndSet(null) != null) { - clusterService.remove(clusterStateListener); - } - lastObservedState.set(state); - } - class ObserverClusterStateListener implements TimeoutClusterStateListener { @Override @@ -179,18 +172,18 @@ public class ClusterStateObserver { // No need to remove listener as it is the responsibility of the thread that set observingContext to null return; } - if (context.changePredicate.apply(event)) { + final ClusterState state = event.state(); + if (context.statePredicate.test(state)) { if (observingContext.compareAndSet(context, null)) { - clusterService.remove(this); - ClusterServiceState state = new ClusterServiceState(event.state(), ClusterStateStatus.APPLIED); + clusterService.removeTimeoutListener(this); logger.trace("observer: accepting cluster state change ({})", state); lastObservedState.set(state); - context.listener.onNewClusterState(state.getClusterState()); + context.listener.onNewClusterState(state); } else { - logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", event.state().version()); + logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version()); } } else { - logger.trace("observer: predicate rejected change (new cluster state version [{}])", event.state().version()); + logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version()); } } @@ -201,15 +194,15 @@ public class ClusterStateObserver { // No need to remove listener as it is the responsibility of the thread that set observingContext to null return; } - ClusterServiceState newState = clusterService.clusterServiceState(); - ClusterServiceState lastState = lastObservedState.get(); - if (context.changePredicate.apply(lastState, newState)) { + ClusterState newState = clusterService.state(); + ClusterState lastState = lastObservedState.get(); + if (newState != lastState && context.statePredicate.test(newState)) { // double check we're still listening if (observingContext.compareAndSet(context, null)) { logger.trace("observer: post adding listener: accepting current cluster state ({})", newState); - clusterService.remove(this); + clusterService.removeTimeoutListener(this); lastObservedState.set(newState); - context.listener.onNewClusterState(newState.getClusterState()); + context.listener.onNewClusterState(newState); } else { logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState); } @@ -224,7 +217,7 @@ public class ClusterStateObserver { if (context != null) { logger.trace("observer: cluster service closed. notifying listener."); - clusterService.remove(this); + clusterService.removeTimeoutListener(this); context.listener.onClusterServiceClose(); } } @@ -233,11 +226,11 @@ public class ClusterStateObserver { public void onTimeout(TimeValue timeout) { ObservingContext context = observingContext.getAndSet(null); if (context != null) { - clusterService.remove(this); + clusterService.removeTimeoutListener(this); long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS); logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry - lastObservedState.set(clusterService.clusterServiceState()); + lastObservedState.set(clusterService.state()); timedOut = true; context.listener.onTimeout(timeOutValue); } @@ -255,58 +248,13 @@ public class ClusterStateObserver { void onTimeout(TimeValue timeout); } - public interface ChangePredicate { - - /** - * a rough check used when starting to monitor for a new change. Called infrequently can be less accurate. - * - * @return true if newState should be accepted - */ - boolean apply(ClusterServiceState previousState, - ClusterServiceState newState); - - /** - * called to see whether a cluster change should be accepted - * - * @return true if changedEvent.state() should be accepted - */ - boolean apply(ClusterChangedEvent changedEvent); - } - - - public abstract static class ValidationPredicate implements ChangePredicate { - - @Override - public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) { - return (previousState.getClusterState() != newState.getClusterState() || - previousState.getClusterStateStatus() != newState.getClusterStateStatus()) && - validate(newState); - } - - protected abstract boolean validate(ClusterServiceState newState); - - @Override - public boolean apply(ClusterChangedEvent changedEvent) { - return changedEvent.previousState().version() != changedEvent.state().version() && - validate(new ClusterServiceState(changedEvent.state(), ClusterStateStatus.APPLIED)); - } - } - - public abstract static class EventPredicate implements ChangePredicate { - @Override - public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) { - return previousState.getClusterState() != newState.getClusterState() || previousState.getClusterStateStatus() != newState.getClusterStateStatus(); - } - - } - static class ObservingContext { public final Listener listener; - public final ChangePredicate changePredicate; + public final Predicate statePredicate; - public ObservingContext(Listener listener, ChangePredicate changePredicate) { + public ObservingContext(Listener listener, Predicate statePredicate) { this.listener = listener; - this.changePredicate = changePredicate; + this.statePredicate = statePredicate; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 70656bb56bd..b8ac2a5eb50 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,11 +19,6 @@ package org.elasticsearch.cluster; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -53,6 +48,11 @@ import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + /** * InternalClusterInfoService provides the ClusterInfoService interface, * routinely updated on a timer. The timer can be dynamically changed by @@ -64,7 +64,8 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException; * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. */ -public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { +public class InternalClusterInfoService extends AbstractComponent + implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), @@ -105,9 +106,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); // Add InternalClusterInfoService to listen for Master changes - this.clusterService.add((LocalNodeMasterListener)this); + this.clusterService.addLocalNodeMasterListener(this); // Add to listen for state changes (when nodes are added) - this.clusterService.add((ClusterStateListener)this); + this.clusterService.addListener(this); } private void setEnabled(boolean enabled) { @@ -167,7 +168,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } } - if (this.isMaster && dataNodeAdded && clusterService.state().getNodes().getDataNodes().size() > 1) { + if (this.isMaster && dataNodeAdded && event.state().getNodes().getDataNodes().size() > 1) { if (logger.isDebugEnabled()) { logger.debug("data node was added, retrieving new cluster info"); } diff --git a/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java index 0ee5c891282..2fec24ec488 100644 --- a/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java +++ b/core/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java @@ -19,23 +19,32 @@ package org.elasticsearch.cluster; -import org.elasticsearch.cluster.service.ClusterServiceState; +import java.util.function.Predicate; -public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate { - INSTANCE; +public final class MasterNodeChangePredicate { + + private MasterNodeChangePredicate() { - @Override - public boolean apply( - ClusterServiceState previousState, - ClusterServiceState newState) { - // checking if the masterNodeId changed is insufficient as the - // same master node might get re-elected after a disruption - return newState.getClusterState().nodes().getMasterNodeId() != null && - newState.getClusterState() != previousState.getClusterState(); } - @Override - public boolean apply(ClusterChangedEvent changedEvent) { - return changedEvent.nodesDelta().masterNodeChanged(); + /** + * builds a predicate that will accept a cluster state only if it was generated after the current has + * (re-)joined the master + */ + public static Predicate build(ClusterState currentState) { + final long currentVersion = currentState.version(); + final String currentMaster = currentState.nodes().getMasterNodeId(); + return newState -> { + final String newMaster = newState.nodes().getMasterNodeId(); + final boolean accept; + if (newMaster == null) { + accept = false; + } else if (newMaster.equals(currentMaster) == false){ + accept = true; + } else { + accept = newState.version() > currentVersion; + } + return accept; + }; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index d7964f0c429..0cf124612d0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -92,7 +92,7 @@ public class ShardStateAction extends AbstractComponent { } private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) { - DiscoveryNode masterNode = observer.observedState().getClusterState().nodes().getMasterNode(); + DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode(); if (masterNode == null) { logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry); waitForNewMasterAndRetry(actionName, observer, shardEntry, listener); @@ -142,18 +142,27 @@ public class ShardStateAction extends AbstractComponent { */ public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) { assert primaryTerm > 0L : "primary term should be strictly positive"; - shardFailed(shardId, allocationId, primaryTerm, message, failure, listener); + shardFailed(shardId, allocationId, primaryTerm, message, failure, listener, clusterService.state()); } /** * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. */ public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) { - shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener); + localShardFailed(shardRouting, message, failure, listener, clusterService.state()); } - private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) { - ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); + /** + * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. + */ + public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener, + final ClusterState currentState) { + shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener, currentState); + } + + private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, + @Nullable final Exception failure, Listener listener, ClusterState currentState) { + ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure); sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener); } @@ -180,7 +189,7 @@ public class ShardStateAction extends AbstractComponent { // we wait indefinitely for a new master assert false; } - }, MasterNodeChangePredicate.INSTANCE); + }, MasterNodeChangePredicate.build(observer.observedState())); } private static class ShardFailedTransportHandler implements TransportRequestHandler { @@ -342,7 +351,10 @@ public class ShardStateAction extends AbstractComponent { } public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) { - ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); + shardStarted(shardRouting, message, listener, clusterService.state()); + } + public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener, ClusterState currentState) { + ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null); sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index b1ffccf6aeb..422492e396b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -71,6 +71,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.IndexCreationException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.threadpool.ThreadPool; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -223,7 +224,8 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { Index createdIndex = null; - String removalReason = null; + String removalExtraInfo = null; + IndexRemovalReason removalReason = IndexRemovalReason.FAILURE; try { validate(request, currentState); @@ -356,7 +358,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { try { mapperService.merge(mappings, request.updateAllTypes()); } catch (MapperParsingException mpe) { - removalReason = "failed on parsing default mapping/mappings on index creation"; + removalExtraInfo = "failed on parsing default mapping/mappings on index creation"; throw mpe; } @@ -407,7 +409,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { try { indexMetaData = indexMetaDataBuilder.build(); } catch (Exception e) { - removalReason = "failed to build index metadata"; + removalExtraInfo = "failed to build index metadata"; throw e; } @@ -440,12 +442,13 @@ public class MetaDataCreateIndexService extends AbstractComponent { ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), "index [" + request.index() + "] created"); } - removalReason = "cleaning up after validating index on master"; + removalExtraInfo = "cleaning up after validating index on master"; + removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED; return updatedState; } finally { if (createdIndex != null) { // Index was already partially created - need to clean up - indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index"); + indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 0c0d3f576a5..c1de936d9c7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; @@ -50,6 +49,7 @@ import java.util.Set; import java.util.function.Function; import static java.util.Collections.emptyList; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** * Service responsible for submitting add and remove aliases requests @@ -172,7 +172,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { return currentState; } finally { for (Index index : indicesToClose) { - indicesService.removeIndex(index, "created for alias processing"); + indicesService.removeIndex(index, NO_LONGER_ASSIGNED, "created for alias processing"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 08ba1dea67e..020a1d75231 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -53,6 +52,8 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; + /** * Service responsible for submitting index templates updates */ @@ -225,7 +226,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } finally { if (createdIndex != null) { - indicesService.removeIndex(createdIndex, " created for parsing template mapping"); + indicesService.removeIndex(createdIndex, NO_LONGER_ASSIGNED, " created for parsing template mapping"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 15e384df3eb..4e9b114ff13 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -52,6 +52,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; + +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; + /** * Service responsible for submitting mapping changes */ @@ -158,7 +161,7 @@ public class MetaDataMappingService extends AbstractComponent { } } finally { if (removeIndex) { - indicesService.removeIndex(index, "created for mapping processing"); + indicesService.removeIndex(index, NO_LONGER_ASSIGNED, "created for mapping processing"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 2b937328106..6b19e2d4bf3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -71,7 +71,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements IndexScopedSettings indexScopedSettings, IndicesService indicesService) { super(settings); this.clusterService = clusterService; - this.clusterService.add(this); + this.clusterService.addListener(this); this.allocationService = allocationService; this.indexScopedSettings = indexScopedSettings; this.indicesService = indicesService; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java index 55c6750b825..4522dfcf98f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java @@ -133,7 +133,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme this.threadPool = threadPool; this.clusterService = clusterService; this.allocationService = allocationService; - clusterService.addFirst(this); + clusterService.addListener(this); } @Override @@ -146,7 +146,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme @Override protected void doClose() { - clusterService.remove(this); + clusterService.removeListener(this); removeTaskAndCancel(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 6d26918d012..d213cea4d33 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; @@ -77,6 +78,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.Future; @@ -87,6 +89,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -113,20 +116,22 @@ public class ClusterService extends AbstractLifecycleComponent { /** * Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine */ - private final Collection priorityClusterStateListeners = new CopyOnWriteArrayList<>(); - private final Collection clusterStateListeners = new CopyOnWriteArrayList<>(); - private final Collection lastClusterStateListeners = new CopyOnWriteArrayList<>(); + private final Collection highPriorityStateAppliers = new CopyOnWriteArrayList<>(); + private final Collection normalPriorityStateAppliers = new CopyOnWriteArrayList<>(); + private final Collection lowPriorityStateAppliers = new CopyOnWriteArrayList<>(); final Map> updateTasksPerExecutor = new HashMap<>(); - // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API - private final Collection postAppliedListeners = new CopyOnWriteArrayList<>(); - private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, - clusterStateListeners, lastClusterStateListeners); + private final Iterable clusterStateAppliers = Iterables.concat(highPriorityStateAppliers, + normalPriorityStateAppliers, lowPriorityStateAppliers); + + private final Collection clusterStateListeners = new CopyOnWriteArrayList<>(); + private final Collection timeoutClusterStateListeners = + Collections.newSetFromMap(new ConcurrentHashMap()); private final LocalNodeMasterListeners localNodeMasterListeners; private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); - private final AtomicReference state; + private final AtomicReference state; private final ClusterBlocks.Builder initialBlocks; @@ -140,7 +145,7 @@ public class ClusterService extends AbstractLifecycleComponent { this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); // will be replaced on doStart. - this.state = new AtomicReference<>(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN)); + this.state = new AtomicReference<>(ClusterState.builder(clusterName).build()); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); @@ -161,43 +166,15 @@ public class ClusterService extends AbstractLifecycleComponent { } public synchronized void setLocalNode(DiscoveryNode localNode) { - assert clusterServiceState().getClusterState().nodes().getLocalNodeId() == null : "local node is already set"; - updateState(css -> { - ClusterState clusterState = css.getClusterState(); + assert state().nodes().getLocalNodeId() == null : "local node is already set"; + updateState(clusterState -> { DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build(); - return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus()); + return ClusterState.builder(clusterState).nodes(nodes).build(); }); } - private void updateState(UnaryOperator updateFunction) { - this.state.getAndUpdate(oldClusterServiceState -> { - ClusterServiceState newClusterServiceState = updateFunction.apply(oldClusterServiceState); - assert validStateTransition(oldClusterServiceState, newClusterServiceState) : - "Invalid cluster service state transition from " + oldClusterServiceState + " to " + newClusterServiceState; - return newClusterServiceState; - }); - } - - private static boolean validStateTransition(ClusterServiceState oldClusterServiceState, ClusterServiceState newClusterServiceState) { - if (oldClusterServiceState == null || newClusterServiceState == null) { - return false; - } - ClusterStateStatus oldStatus = oldClusterServiceState.getClusterStateStatus(); - ClusterStateStatus newStatus = newClusterServiceState.getClusterStateStatus(); - // only go from UNKNOWN to UNKNOWN or BEING_APPLIED - if (oldStatus == ClusterStateStatus.UNKNOWN && newStatus == ClusterStateStatus.APPLIED) { - return false; - } - // only go from BEING_APPLIED to APPLIED - if (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus != ClusterStateStatus.APPLIED) { - return false; - } - // only go from APPLIED to BEING_APPLIED - if (oldStatus == ClusterStateStatus.APPLIED && newStatus != ClusterStateStatus.BEING_APPLIED) { - return false; - } - boolean identicalClusterState = oldClusterServiceState.getClusterState() == newClusterServiceState.getClusterState(); - return identicalClusterState == (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus == ClusterStateStatus.APPLIED); + private void updateState(UnaryOperator updateFunction) { + this.state.getAndUpdate(updateFunction); } public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { @@ -235,12 +212,10 @@ public class ClusterService extends AbstractLifecycleComponent { @Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); - Objects.requireNonNull(clusterServiceState().getClusterState().nodes().getLocalNode(), "please set the local node before starting"); + Objects.requireNonNull(state().nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); - add(localNodeMasterListeners); - updateState(css -> new ClusterServiceState( - ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(), - css.getClusterStateStatus())); + addListener(localNodeMasterListeners); + updateState(state -> ClusterState.builder(state).blocks(initialBlocks).build()); this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext()); } @@ -258,12 +233,8 @@ public class ClusterService extends AbstractLifecycleComponent { } ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS); // close timeout listeners that did not have an ongoing timeout - postAppliedListeners - .stream() - .filter(listener -> listener instanceof TimeoutClusterStateListener) - .map(listener -> (TimeoutClusterStateListener) listener) - .forEach(TimeoutClusterStateListener::onClose); - remove(localNodeMasterListeners); + timeoutClusterStateListeners.forEach(TimeoutClusterStateListener::onClose); + removeListener(localNodeMasterListeners); } @Override @@ -289,45 +260,59 @@ public class ClusterService extends AbstractLifecycleComponent { * The current cluster state. */ public ClusterState state() { - return clusterServiceState().getClusterState(); - } - - /** - * The current cluster service state comprising cluster state and cluster state status. - */ - public ClusterServiceState clusterServiceState() { + assert assertNotCalledFromClusterStateApplier("the applied cluster state is not yet available"); return this.state.get(); } /** - * Adds a priority listener for updated cluster states. + * Adds a high priority applier of updated cluster states. */ - public void addFirst(ClusterStateListener listener) { - priorityClusterStateListeners.add(listener); + public void addHighPriorityApplier(ClusterStateApplier applier) { + highPriorityStateAppliers.add(applier); } /** - * Adds last listener. + * Adds an applier which will be called after all high priority and normal appliers have been called. */ - public void addLast(ClusterStateListener listener) { - lastClusterStateListeners.add(listener); + public void addLowPriorityApplier(ClusterStateApplier applier) { + lowPriorityStateAppliers.add(applier); } /** - * Adds a listener for updated cluster states. + * Adds a applier of updated cluster states. */ - public void add(ClusterStateListener listener) { + public void addStateApplier(ClusterStateApplier applier) { + normalPriorityStateAppliers.add(applier); + } + + /** + * Removes an applier of updated cluster states. + */ + public void removeApplier(ClusterStateApplier applier) { + normalPriorityStateAppliers.remove(applier); + highPriorityStateAppliers.remove(applier); + lowPriorityStateAppliers.remove(applier); + } + + /** + * Add a listener for updated cluster states + */ + public void addListener(ClusterStateListener listener) { clusterStateListeners.add(listener); } /** * Removes a listener for updated cluster states. */ - public void remove(ClusterStateListener listener) { + public void removeListener(ClusterStateListener listener) { clusterStateListeners.remove(listener); - priorityClusterStateListeners.remove(listener); - lastClusterStateListeners.remove(listener); - postAppliedListeners.remove(listener); + } + + /** + * Removes a timeout listener for updated cluster states. + */ + public void removeTimeoutListener(TimeoutClusterStateListener listener) { + timeoutClusterStateListeners.remove(listener); for (Iterator it = onGoingTimeouts.iterator(); it.hasNext(); ) { NotifyTimeout timeout = it.next(); if (timeout.listener.equals(listener)) { @@ -340,25 +325,24 @@ public class ClusterService extends AbstractLifecycleComponent { /** * Add a listener for on/off local node master events */ - public void add(LocalNodeMasterListener listener) { + public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { localNodeMasterListeners.add(listener); } /** * Remove the given listener for on/off local master events */ - public void remove(LocalNodeMasterListener listener) { + public void removeLocalNodeMasterListener(LocalNodeMasterListener listener) { localNodeMasterListeners.remove(listener); } /** - * Adds a cluster state listener that will timeout after the provided timeout, - * and is executed after the clusterstate has been successfully applied ie. is - * in state {@link ClusterStateStatus#APPLIED} - * NOTE: a {@code null} timeout means that the listener will never be removed - * automatically + * Adds a cluster state listener that is expected to be removed during a short period of time. + * If provided, the listener will be notified once a specific time has elapsed. + * + * NOTE: the listener is not remmoved on timeout. This is the responsibility of the caller. */ - public void add(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { + public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { if (lifecycle.stoppedOrClosed()) { listener.onClose(); return; @@ -373,7 +357,7 @@ public class ClusterService extends AbstractLifecycleComponent { notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout); onGoingTimeouts.add(notifyTimeout); } - postAppliedListeners.add(listener); + timeoutClusterStateListeners.add(listener); listener.postAdded(); } }); @@ -572,6 +556,19 @@ public class ClusterService extends AbstractLifecycleComponent { return true; } + /** asserts that the current stack trace does NOT invlove a cluster state applier */ + private static boolean assertNotCalledFromClusterStateApplier(String reason) { + if (Thread.currentThread().getName().contains(UPDATE_THREAD_NAME)) { + for (StackTraceElement element: Thread.currentThread().getStackTrace()) { + if (element.getClassName().equals(ClusterService.class.getName()) + && element.getMethodName().equals("callClusterStateAppliers")) { + throw new AssertionError("should not be called by a cluster state applier. reason [" + reason + "]"); + } + } + } + return true; + } + public ClusterName getClusterName() { return clusterName; } @@ -596,8 +593,7 @@ public class ClusterService extends AbstractLifecycleComponent { } logger.debug("processing [{}]: execute", taskInputs.summary); - ClusterServiceState previousClusterServiceState = clusterServiceState(); - ClusterState previousClusterState = previousClusterServiceState.getClusterState(); + ClusterState previousClusterState = state(); if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyOnMaster()) { logger.debug("failing [{}]: local node is no longer master", taskInputs.summary); @@ -606,7 +602,7 @@ public class ClusterService extends AbstractLifecycleComponent { } long startTimeNS = currentTimeInNanos(); - TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterServiceState, startTimeNS); + TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState, startTimeNS); taskOutputs.notifyFailedTasks(); if (taskOutputs.clusterStateUnchanged()) { @@ -615,7 +611,7 @@ public class ClusterService extends AbstractLifecycleComponent { logger.debug("processing [{}]: took [{}] no change in cluster_state", taskInputs.summary, executionTime); warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary); } else { - ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState(); + ClusterState newClusterState = taskOutputs.newClusterState; if (logger.isTraceEnabled()) { logger.trace("cluster state updated, source [{}]\n{}", taskInputs.summary, newClusterState); } else if (logger.isDebugEnabled()) { @@ -646,8 +642,7 @@ public class ClusterService extends AbstractLifecycleComponent { } } - public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState, long startTimeNS) { - ClusterState previousClusterState = previousClusterServiceState.getClusterState(); + public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, long startTimeNS) { BatchResult batchResult = executeTasks(taskInputs, startTimeNS, previousClusterState); ClusterState newClusterState = batchResult.resultingState; // extract those that are waiting for results @@ -662,9 +657,7 @@ public class ClusterService extends AbstractLifecycleComponent { } newClusterState = patchVersions(previousClusterState, newClusterState); - ClusterServiceState newClusterServiceState = new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED); - - return new TaskOutputs(taskInputs, previousClusterServiceState, newClusterServiceState, nonFailedTasks, + return new TaskOutputs(taskInputs, previousClusterState, newClusterState, nonFailedTasks, batchResult.executionResults); } @@ -728,8 +721,8 @@ public class ClusterService extends AbstractLifecycleComponent { } private void publishAndApplyChanges(TaskInputs taskInputs, TaskOutputs taskOutputs) { - ClusterState previousClusterState = taskOutputs.previousClusterServiceState.getClusterState(); - ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState(); + ClusterState previousClusterState = taskOutputs.previousClusterState; + ClusterState newClusterState = taskOutputs.newClusterState; ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(taskInputs.summary, newClusterState, previousClusterState); // new cluster state, notify all listeners @@ -767,9 +760,7 @@ public class ClusterService extends AbstractLifecycleComponent { } } - // update the current cluster state - updateState(css -> taskOutputs.newClusterServiceState); - logger.debug("set local cluster state to version {}", newClusterState.version()); + logger.debug("applying cluster state version {}", newClusterState.version()); try { // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) { @@ -779,27 +770,22 @@ public class ClusterService extends AbstractLifecycleComponent { } catch (Exception ex) { logger.warn("failed to apply cluster settings", ex); } - for (ClusterStateListener listener : preAppliedListeners) { - try { - logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version()); - listener.clusterChanged(clusterChangedEvent); - } catch (Exception ex) { - logger.warn("failed to notify ClusterStateListener", ex); - } - } + + logger.debug("set local cluster state to version {}", newClusterState.version()); + callClusterStateAppliers(newClusterState, clusterChangedEvent); nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes()); - updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED)); + updateState(css -> newClusterState); - for (ClusterStateListener listener : postAppliedListeners) { + Stream.concat(clusterStateListeners.stream(), timeoutClusterStateListeners.stream()).forEach(listener -> { try { logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version()); listener.clusterChanged(clusterChangedEvent); } catch (Exception ex) { logger.warn("failed to notify ClusterStateListener", ex); } - } + }); //manual ack only from the master at the end of the publish if (newClusterState.nodes().isLocalNodeElectedMaster()) { @@ -826,6 +812,17 @@ public class ClusterService extends AbstractLifecycleComponent { } } + private void callClusterStateAppliers(ClusterState newClusterState, ClusterChangedEvent clusterChangedEvent) { + for (ClusterStateApplier applier : clusterStateAppliers) { + try { + logger.trace("calling [{}] with change to version [{}]", applier, newClusterState.version()); + applier.applyClusterState(clusterChangedEvent); + } catch (Exception ex) { + logger.warn("failed to notify ClusterStateApplier", ex); + } + } + } + /** * Represents a set of tasks to be processed together with their executor */ @@ -854,17 +851,17 @@ public class ClusterService extends AbstractLifecycleComponent { */ class TaskOutputs { public final TaskInputs taskInputs; - public final ClusterServiceState previousClusterServiceState; - public final ClusterServiceState newClusterServiceState; + public final ClusterState previousClusterState; + public final ClusterState newClusterState; public final List nonFailedTasks; public final Map executionResults; - public TaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState, - ClusterServiceState newClusterServiceState, List nonFailedTasks, + public TaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, + ClusterState newClusterState, List nonFailedTasks, Map executionResults) { this.taskInputs = taskInputs; - this.previousClusterServiceState = previousClusterServiceState; - this.newClusterServiceState = newClusterServiceState; + this.previousClusterState = previousClusterState; + this.newClusterState = newClusterState; this.nonFailedTasks = nonFailedTasks; this.executionResults = executionResults; } @@ -907,7 +904,7 @@ public class ClusterService extends AbstractLifecycleComponent { } public boolean clusterStateUnchanged() { - return previousClusterServiceState.getClusterState() == newClusterServiceState.getClusterState(); + return previousClusterState == newClusterState; } public void notifyFailedTasks() { @@ -922,13 +919,12 @@ public class ClusterService extends AbstractLifecycleComponent { } public void notifySuccessfulTasksOnUnchangedClusterState() { - ClusterState clusterState = newClusterServiceState.getClusterState(); nonFailedTasks.forEach(task -> { if (task.listener instanceof AckedClusterStateTaskListener) { //no need to wait for ack if nothing changed, the update can be counted as acknowledged ((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null); } - task.listener.clusterStateProcessed(task.source, clusterState, clusterState); + task.listener.clusterStateProcessed(task.source, newClusterState, newClusterState); }); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java deleted file mode 100644 index 3002941b482..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterServiceState.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.service; - -import org.elasticsearch.cluster.ClusterState; - -/** - * A simple immutable container class that comprises a cluster state and cluster state status. Used by {@link ClusterService} - * to provide a snapshot view on which cluster state is currently being applied / already applied. - */ -public class ClusterServiceState { - private final ClusterState clusterState; - private final ClusterStateStatus clusterStateStatus; - - public ClusterServiceState(ClusterState clusterState, ClusterStateStatus clusterStateStatus) { - this.clusterState = clusterState; - this.clusterStateStatus = clusterStateStatus; - } - - public ClusterState getClusterState() { - return clusterState; - } - - public ClusterStateStatus getClusterStateStatus() { - return clusterStateStatus; - } - - @Override - public String toString() { - return "version [" + clusterState.version() + "], status [" + clusterStateStatus + "]"; - } -} diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 58ba7bb177e..9f0d3576c4b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 0c829e88182..acfcadb2f51 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -20,9 +20,12 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -48,7 +51,7 @@ import static java.util.Collections.unmodifiableMap; * their state written on disk, but don't exists in the metadata of the cluster), and importing * them into the cluster. */ -public class DanglingIndicesState extends AbstractComponent { +public class DanglingIndicesState extends AbstractComponent implements ClusterStateListener { private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; @@ -58,11 +61,12 @@ public class DanglingIndicesState extends AbstractComponent { @Inject public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, - LocalAllocateDangledIndices allocateDangledIndices) { + LocalAllocateDangledIndices allocateDangledIndices, ClusterService clusterService) { super(settings); this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; this.allocateDangledIndices = allocateDangledIndices; + clusterService.addListener(this); } /** @@ -174,4 +178,11 @@ public class DanglingIndicesState extends AbstractComponent { logger.warn("failed to send allocate dangled", e); } } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.state().blocks().disableStatePersistence() == false) { + processDanglingIndices(event.state().metaData()); + } + } } diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index 3a6bfa7aec1..0d562fa4b2b 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -25,7 +25,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +40,7 @@ import java.util.Arrays; import java.util.Map; import java.util.function.Supplier; -public class Gateway extends AbstractComponent implements ClusterStateListener { +public class Gateway extends AbstractComponent implements ClusterStateApplier { private final ClusterService clusterService; @@ -60,7 +60,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { this.metaState = metaState; this.listGatewayMetaState = listGatewayMetaState; this.minimumMasterNodesProvider = discovery::getMinimumMasterNodes; - clusterService.addLast(this); + clusterService.addLowPriorityApplier(this); } public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException { @@ -176,10 +176,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { } @Override - public void clusterChanged(final ClusterChangedEvent event) { + public void applyClusterState(final ClusterChangedEvent event) { // order is important, first metaState, and then shardsState // so dangling indices will be recorded - metaState.clusterChanged(event); + metaState.applyClusterState(event); } public interface GatewayStateRecoveredListener { diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 5ab3179579b..605fe20a33a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -22,8 +22,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; @@ -79,24 +77,21 @@ public class GatewayAllocator extends AbstractComponent { public void setReallocation(final ClusterService clusterService, final RoutingService routingService) { this.routingService = routingService; - clusterService.add(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - boolean cleanCache = false; - DiscoveryNode localNode = event.state().nodes().getLocalNode(); - if (localNode != null) { - if (localNode.isMasterNode() && event.localNodeMaster() == false) { - cleanCache = true; - } - } else { + clusterService.addStateApplier(event -> { + boolean cleanCache = false; + DiscoveryNode localNode = event.state().nodes().getLocalNode(); + if (localNode != null) { + if (localNode.isMasterNode() && event.localNodeMaster() == false) { cleanCache = true; } - if (cleanCache) { - Releasables.close(asyncFetchStarted.values()); - asyncFetchStarted.clear(); - Releasables.close(asyncFetchStore.values()); - asyncFetchStore.clear(); - } + } else { + cleanCache = true; + } + if (cleanCache) { + Releasables.close(asyncFetchStarted.values()); + asyncFetchStarted.clear(); + Releasables.close(asyncFetchStore.values()); + asyncFetchStore.clear(); } }); } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 98b39cd2c8a..153ca8b3a87 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; @@ -54,11 +54,10 @@ import java.util.Set; import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableSet; -public class GatewayMetaState extends AbstractComponent implements ClusterStateListener { +public class GatewayMetaState extends AbstractComponent implements ClusterStateApplier { private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; - private final DanglingIndicesState danglingIndicesState; @Nullable private volatile MetaData previousMetaData; @@ -67,13 +66,12 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, - DanglingIndicesState danglingIndicesState, TransportNodesListGatewayMetaState nodesListGatewayMetaState, + TransportNodesListGatewayMetaState nodesListGatewayMetaState, MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader) throws Exception { super(settings); this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; - this.danglingIndicesState = danglingIndicesState; nodesListGatewayMetaState.init(this); if (DiscoveryNode.isDataNode(settings)) { @@ -117,7 +115,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { @@ -181,7 +179,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } - danglingIndicesState.processDanglingIndices(newMetaData); if (success) { previousMetaData = newMetaData; previouslyWrittenIndices = unmodifiableSet(relevantIndices); diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 2e351b2e6bf..c215585c45c 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -130,12 +129,13 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Override protected void doStart() { - clusterService.addLast(this); + // use post applied so that the state will be visible to the background recovery thread we spawn in performStateRecovery + clusterService.addListener(this); } @Override protected void doStop() { - clusterService.remove(this); + clusterService.removeListener(this); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 3b2cf5cbd07..90d8a205e8b 100644 --- a/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/core/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import java.util.ArrayList; import java.util.Collection; @@ -176,48 +177,24 @@ final class CompositeIndexEventListener implements IndexEventListener { } @Override - public void beforeIndexClosed(IndexService indexService) { + public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { for (IndexEventListener listener : listeners) { try { - listener.beforeIndexClosed(indexService); + listener.beforeIndexRemoved(indexService, reason); } catch (Exception e) { - logger.warn("failed to invoke before index closed callback", e); + logger.warn("failed to invoke before index removed callback", e); throw e; } } } @Override - public void beforeIndexDeleted(IndexService indexService) { + public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) { for (IndexEventListener listener : listeners) { try { - listener.beforeIndexDeleted(indexService); + listener.afterIndexRemoved(index, indexSettings, reason); } catch (Exception e) { - logger.warn("failed to invoke before index deleted callback", e); - throw e; - } - } - } - - @Override - public void afterIndexDeleted(Index index, Settings indexSettings) { - for (IndexEventListener listener : listeners) { - try { - listener.afterIndexDeleted(index, indexSettings); - } catch (Exception e) { - logger.warn("failed to invoke after index deleted callback", e); - throw e; - } - } - } - - @Override - public void afterIndexClosed(Index index, Settings indexSettings) { - for (IndexEventListener listener : listeners) { - try { - listener.afterIndexClosed(index, indexSettings); - } catch (Exception e) { - logger.warn("failed to invoke after index closed callback", e); + logger.warn("failed to invoke after index removed callback", e); throw e; } } diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index dce125cbdf4..5ba2889f4bb 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -471,7 +471,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust shardId, indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), similarityService(), scriptService, queryRegistry, client, indexReader, - clusterService.state(), nowInMillis); } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 090a49aa7a6..6521369453c 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -19,11 +19,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; @@ -33,9 +31,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptSettings; -import java.util.Collections; import java.util.function.LongSupplier; /** @@ -48,19 +44,17 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier { protected final IndicesQueriesRegistry indicesQueriesRegistry; protected final Client client; protected final IndexReader reader; - protected final ClusterState clusterState; protected final LongSupplier nowInMillis; public QueryRewriteContext(IndexSettings indexSettings, MapperService mapperService, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, Client client, IndexReader reader, - ClusterState clusterState, LongSupplier nowInMillis) { + LongSupplier nowInMillis) { this.mapperService = mapperService; this.scriptService = scriptService; this.indexSettings = indexSettings; this.indicesQueriesRegistry = indicesQueriesRegistry; this.client = client; this.reader = reader; - this.clusterState = clusterState; this.nowInMillis = nowInMillis; } @@ -98,13 +92,6 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier { return this.indexSettings.getParseFieldMatcher(); } - /** - * Returns the cluster state as is when the operation started. - */ - public ClusterState getClusterState() { - return clusterState; - } - /** * Returns a new {@link QueryParseContext} that wraps the provided parser, using the ParseFieldMatcher settings that * are configured in the index settings. The default script language will always default to Painless. diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index e71322eb0ad..1d5fc1bc9a1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -101,8 +100,8 @@ public class QueryShardContext extends QueryRewriteContext { public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService, ScriptService scriptService, final IndicesQueriesRegistry indicesQueriesRegistry, Client client, - IndexReader reader, ClusterState clusterState, LongSupplier nowInMillis) { - super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState, nowInMillis); + IndexReader reader, LongSupplier nowInMillis) { + super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, nowInMillis); this.shardId = shardId; this.indexSettings = indexSettings; this.similarityService = similarityService; @@ -118,7 +117,7 @@ public class QueryShardContext extends QueryRewriteContext { public QueryShardContext(QueryShardContext source) { this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client, - source.reader, source.clusterState, source.nowInMillis); + source.reader, source.nowInMillis); this.types = source.getTypes(); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index f5c6dca7d2f..24ed98e9aff 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -23,6 +23,8 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; /** * An index event listener is the primary extension point for plugins and build-in services @@ -103,31 +105,32 @@ public interface IndexEventListener { } - /** - * Called before the index shard gets created. - */ - default void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { - } - - /** * Called before the index get closed. * * @param indexService The index service + * @param reason the reason for index removal */ - default void beforeIndexClosed(IndexService indexService) { + default void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { } /** - * Called after the index has been closed. + * Called after the index has been removed. * * @param index The index + * @param reason the reason for index removal */ - default void afterIndexClosed(Index index, Settings indexSettings) { + default void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) { } + /** + * Called before the index shard gets created. + */ + default void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { + } + /** * Called before the index shard gets deleted from disk * Note: this method is only executed on the first attempt of deleting the shard. Retries are will not invoke @@ -149,28 +152,6 @@ public interface IndexEventListener { default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) { } - /** - * Called after the index has been deleted. - * This listener method is invoked after {@link #afterIndexClosed(org.elasticsearch.index.Index, org.elasticsearch.common.settings.Settings)} - * when an index is deleted - * - * @param index The index - */ - default void afterIndexDeleted(Index index, Settings indexSettings) { - - } - - /** - * Called before the index gets deleted. - * This listener method is invoked after - * {@link #beforeIndexClosed(org.elasticsearch.index.IndexService)} when an index is deleted - * - * @param indexService The index service - */ - default void beforeIndexDeleted(IndexService indexService) { - - } - /** * Called on the Master node only before the {@link IndexService} instances is created to simulate an index creation. * This happens right before the index and it's metadata is registered in the cluster state diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 45a72d5a8e7..fc6eac196d8 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1612,7 +1612,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } }); } else { - final Exception e; + final RuntimeException e; if (numShards == -1) { e = new IndexNotFoundException(mergeSourceIndex); } else { @@ -1620,7 +1620,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl + " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard " + shardId()); } - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + throw e; } break; default: diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 664743efaef..a3361f6b2ed 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -232,9 +232,7 @@ public class IndicesService extends AbstractLifecycleComponent for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { - removeIndex(index, "shutdown", false); - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to remove index on stop [{}]", index), e); + removeIndex(index, IndexRemovalReason.NO_LONGER_ASSIGNED, "shutdown"); } finally { latch.countDown(); } @@ -525,22 +523,8 @@ public class IndicesService extends AbstractLifecycleComponent return indexShard; } - /** - * Removes the given index from this service and releases all associated resources. Persistent parts of the index - * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. - * @param index the index to remove - * @param reason the high level reason causing this removal - */ @Override - public void removeIndex(Index index, String reason) { - try { - removeIndex(index, reason, false); - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to remove index ({})", reason), e); - } - } - - private void removeIndex(Index index, String reason, boolean delete) { + public void removeIndex(final Index index, final IndexRemovalReason reason, final String extraInfo) { final String indexName = index.getName(); try { final IndexService indexService; @@ -558,22 +542,18 @@ public class IndicesService extends AbstractLifecycleComponent listener = indexService.getIndexEventListener(); } - listener.beforeIndexClosed(indexService); - if (delete) { - listener.beforeIndexDeleted(indexService); - } - logger.debug("{} closing index service (reason [{}])", index, reason); - indexService.close(reason, delete); - logger.debug("{} closed... (reason [{}])", index, reason); - listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); - if (delete) { - final IndexSettings indexSettings = indexService.getIndexSettings(); - listener.afterIndexDeleted(indexService.index(), indexSettings.getSettings()); + listener.beforeIndexRemoved(indexService, reason); + logger.debug("{} closing index service (reason [{}][{}])", index, reason, extraInfo); + indexService.close(extraInfo, reason == IndexRemovalReason.DELETED); + logger.debug("{} closed... (reason [{}][{}])", index, reason, extraInfo); + final IndexSettings indexSettings = indexService.getIndexSettings(); + listener.afterIndexRemoved(indexService.index(), indexSettings, reason); + if (reason == IndexRemovalReason.DELETED) { // now we are done - try to wipe data on disk if possible - deleteIndexStore(reason, indexService.index(), indexSettings); + deleteIndexStore(extraInfo, indexService.index(), indexSettings); } - } catch (IOException ex) { - throw new ElasticsearchException("failed to remove index " + index, ex); + } catch (Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage("failed to remove index {} ([{}][{}])", index, reason, extraInfo), e); } } @@ -613,27 +593,9 @@ public class IndicesService extends AbstractLifecycleComponent } } - /** - * Deletes the given index. Persistent parts of the index - * like the shards files, state and transaction logs are removed once all resources are released. - * - * Equivalent to {@link #removeIndex(Index, String)} but fires - * different lifecycle events to ensure pending resources of this index are immediately removed. - * @param index the index to delete - * @param reason the high level reason causing this delete - */ - @Override - public void deleteIndex(Index index, String reason) { - try { - removeIndex(index, reason, true); - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete index ({})", reason), e); - } - } - /** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index - * but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)} + * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} */ @Override public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index aaecba91f3e..a49c2a97cb0 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -26,7 +26,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -68,7 +68,6 @@ import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.search.SearchService; -import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.threadpool.ThreadPool; @@ -86,7 +85,12 @@ import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.stream.Collectors; -public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateListener { +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.FAILURE; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; + +public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateApplier { final AllocatedIndices> indicesService; private final ClusterService clusterService; @@ -102,7 +106,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple // a list of shards that failed during recovery // we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update final ConcurrentMap failedShardsCache = ConcurrentCollections.newConcurrentMap(); - private final RestoreService restoreService; private final RepositoriesService repositoriesService; private final FailedShardHandler failedShardHandler = new FailedShardHandler(); @@ -115,13 +118,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple ThreadPool threadPool, PeerRecoveryTargetService recoveryTargetService, ShardStateAction shardStateAction, NodeMappingRefreshAction nodeMappingRefreshAction, - RepositoriesService repositoriesService, RestoreService restoreService, + RepositoriesService repositoriesService, SearchService searchService, SyncedFlushService syncedFlushService, PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, GlobalCheckpointSyncAction globalCheckpointSyncAction) { - this(settings, (AllocatedIndices>) indicesService, + this(settings, indicesService, clusterService, threadPool, recoveryTargetService, shardStateAction, - nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, peerRecoverySourceService, + nodeMappingRefreshAction, repositoriesService, searchService, syncedFlushService, peerRecoverySourceService, snapshotShardsService, globalCheckpointSyncAction::updateCheckpointForShard); } @@ -132,7 +135,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple ThreadPool threadPool, PeerRecoveryTargetService recoveryTargetService, ShardStateAction shardStateAction, NodeMappingRefreshAction nodeMappingRefreshAction, - RepositoriesService repositoriesService, RestoreService restoreService, + RepositoriesService repositoriesService, SearchService searchService, SyncedFlushService syncedFlushService, PeerRecoverySourceService peerRecoverySourceService, SnapshotShardsService snapshotShardsService, Consumer globalCheckpointSyncer) { @@ -145,7 +148,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple this.recoveryTargetService = recoveryTargetService; this.shardStateAction = shardStateAction; this.nodeMappingRefreshAction = nodeMappingRefreshAction; - this.restoreService = restoreService; this.repositoriesService = repositoriesService; this.sendRefreshMapping = this.settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); this.globalCheckpointSyncer = globalCheckpointSyncer; @@ -155,14 +157,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple protected void doStart() { // Doesn't make sense to manage shards on non-master and non-data nodes if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { - clusterService.addFirst(this); + clusterService.addHighPriorityApplier(this); } } @Override protected void doStop() { if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { - clusterService.remove(this); + clusterService.removeApplier(this); } } @@ -171,7 +173,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple } @Override - public synchronized void clusterChanged(final ClusterChangedEvent event) { + public synchronized void applyClusterState(final ClusterChangedEvent event) { if (!lifecycle.started()) { return; } @@ -183,7 +185,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple // TODO: feels hacky, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks? if (state.blocks().disableStatePersistence()) { for (AllocatedIndex indexService : indicesService) { - indicesService.removeIndex(indexService.index(), "cleaning index (disabled block persistence)"); // also cleans shards + indicesService.removeIndex(indexService.index(), NO_LONGER_ASSIGNED, + "cleaning index (disabled block persistence)"); // also cleans shards } return; } @@ -231,7 +234,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction? String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure"; logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message); - shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER); + shardStateAction.localShardFailed(matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER, state); } } } @@ -256,7 +259,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple final IndexSettings indexSettings; if (indexService != null) { indexSettings = indexService.getIndexSettings(); - indicesService.deleteIndex(index, "index no longer part of the metadata"); + indicesService.removeIndex(index, DELETED, "index no longer part of the metadata"); } else if (previousState.metaData().hasIndex(index.getName())) { // The deleted index was part of the previous cluster state, but not loaded on the local node final IndexMetaData metaData = previousState.metaData().index(index); @@ -330,11 +333,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple // to remove the in-memory structures for the index and not delete the // contents on disk because the index will later be re-imported as a // dangling index - assert state.metaData().index(index) != null || event.isNewCluster() : + final IndexMetaData indexMetaData = state.metaData().index(index); + assert indexMetaData != null || event.isNewCluster() : "index " + index + " does not exist in the cluster state, it should either " + "have been deleted or the cluster must be new"; - logger.debug("{} removing index, no shards allocated", index); - indicesService.removeIndex(index, "removing index (no shards allocated)"); + final AllocatedIndices.IndexRemovalReason reason = + indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE ? CLOSED : NO_LONGER_ASSIGNED; + logger.debug("{} removing index, [{}]", index, reason); + indicesService.removeIndex(index, reason, "removing index (no shards allocated)"); } } } @@ -355,7 +361,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple failedShardsCache.containsKey(shardId) == false && indicesService.getShardOrNull(shardId) == null) { // the master thinks we are active, but we don't have this shard at all, mark it as failed - sendFailShard(shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", null); + sendFailShard(shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", null, + state); } } } @@ -451,10 +458,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple failShardReason = "failed to create index"; } else { failShardReason = "failed to update mapping for index"; - indicesService.removeIndex(index, "removing index (mapping update failed)"); + indicesService.removeIndex(index, FAILURE, "removing index (mapping update failed)"); } for (ShardRouting shardRouting : entry.getValue()) { - sendFailShard(shardRouting, failShardReason, e); + sendFailShard(shardRouting, failShardReason, e, state); } } } @@ -480,14 +487,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple ); } } catch (Exception e) { - indicesService.removeIndex(indexService.index(), "removing index (mapping update failed)"); + indicesService.removeIndex(indexService.index(), FAILURE, "removing index (mapping update failed)"); // fail shards that would be created or updated by createOrUpdateShards RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); if (localRoutingNode != null) { for (final ShardRouting shardRouting : localRoutingNode) { if (shardRouting.index().equals(index) && failedShardsCache.containsKey(shardRouting.shardId()) == false) { - sendFailShard(shardRouting, "failed to update mapping for index", e); + sendFailShard(shardRouting, "failed to update mapping for index", e, state); } } } @@ -513,15 +520,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple Shard shard = indexService.getShardOrNull(shardId.id()); if (shard == null) { assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards"; - createShard(nodes, routingTable, shardRouting); + createShard(nodes, routingTable, shardRouting, state); } else { - updateShard(nodes, shardRouting, shard, routingTable); + updateShard(nodes, shardRouting, shard, routingTable, state); } } } } - private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting) { + private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting, ClusterState state) { assert shardRouting.initializing() : "only allow shard creation for initializing shard but was " + shardRouting; DiscoveryNode sourceNode = null; @@ -539,11 +546,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple indicesService.createShard(shardRouting, recoveryState, recoveryTargetService, new RecoveryListener(shardRouting), repositoriesService, failedShardHandler); } catch (Exception e) { - failAndRemoveShard(shardRouting, true, "failed to create shard", e); + failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); } } - private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard shard, RoutingTable routingTable) { + private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard shard, RoutingTable routingTable, + ClusterState clusterState) { final ShardRouting currentRoutingEntry = shard.routingEntry(); assert currentRoutingEntry.isSameAllocation(shardRouting) : "local shard has a different allocation id but wasn't cleaning by removeShards. " @@ -560,7 +568,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple shard.updateAllocationIdsFromMaster(activeIds, initializingIds); } } catch (Exception e) { - failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e); + failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); return; } @@ -576,7 +584,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple if (nodes.getMasterNode() != null) { shardStateAction.shardStarted(shardRouting, "master " + nodes.getMasterNode() + " marked shard as initializing, but shard state is [" + state + "], mark shard as started", - SHARD_STATE_ACTION_LISTENER); + SHARD_STATE_ACTION_LISTENER, clusterState); } } } @@ -632,10 +640,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple } private synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) { - failAndRemoveShard(shardRouting, sendShardFailure, "failed recovery", failure); + failAndRemoveShard(shardRouting, sendShardFailure, "failed recovery", failure, clusterService.state()); } - private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFailure, String message, @Nullable Exception failure) { + private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFailure, String message, @Nullable Exception failure, + ClusterState state) { try { AllocatedIndex indexService = indicesService.indexService(shardRouting.shardId().getIndex()); if (indexService != null) { @@ -654,17 +663,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple inner); } if (sendShardFailure) { - sendFailShard(shardRouting, message, failure); + sendFailShard(shardRouting, message, failure, state); } } - private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure) { + private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) { try { logger.warn( (Supplier) () -> new ParameterizedMessage( "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); - shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER); + shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); logger.warn( @@ -683,7 +692,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { synchronized (IndicesClusterStateService.this) { - failAndRemoveShard(shardRouting, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause); + failAndRemoveShard(shardRouting, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause, + clusterService.state()); } }); } @@ -783,20 +793,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple */ IndexMetaData verifyIndexIsDeleted(Index index, ClusterState clusterState); - /** - * Deletes the given index. Persistent parts of the index - * like the shards files, state and transaction logs are removed once all resources are released. - * - * Equivalent to {@link #removeIndex(Index, String)} but fires - * different lifecycle events to ensure pending resources of this index are immediately removed. - * @param index the index to delete - * @param reason the high level reason causing this delete - */ - void deleteIndex(Index index, String reason); /** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index - * but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)} + * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} */ void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState); @@ -804,9 +804,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple * Removes the given index from this service and releases all associated resources. Persistent parts of the index * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. * @param index the index to remove - * @param reason the high level reason causing this removal + * @param reason the reason to remove the index + * @param extraInfo extra information that will be used for logging and reporting */ - void removeIndex(Index index, String reason); + void removeIndex(Index index, IndexRemovalReason reason, String extraInfo); /** * Returns an IndexService for the specified index if exists otherwise returns null. @@ -833,5 +834,32 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, InterruptedException, ShardLockObtainFailedException; + + enum IndexRemovalReason { + /** + * Shard of this index were previously assigned to this node but all shards have been relocated. + * The index should be removed and all associated resources released. Persistent parts of the index + * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. + */ + NO_LONGER_ASSIGNED, + /** + * The index is deleted. Persistent parts of the index like the shards files, state and transaction logs are removed once + * all resources are released. + */ + DELETED, + + /** + * The index have been closed. The index should be removed and all associated resources released. Persistent parts of the index + * like the shards files, state and transaction logs are kept around in the case of a disaster recovery. + */ + CLOSED, + + /** + * Something around index management has failed and the index should be removed. + * Persistent parts of the index like the shards files, state and transaction logs are kept around in the + * case of a disaster recovery. + */ + FAILURE + } } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index e5f8b531f67..84f35ebca43 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -400,7 +399,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde private void waitForClusterState(long clusterStateVersion) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, TimeValue.timeValueMinutes(5), logger, threadPool.getThreadContext()); - final ClusterState clusterState = observer.observedState().getClusterState(); + final ClusterState clusterState = observer.observedState(); if (clusterState.getVersion() >= clusterStateVersion) { logger.trace("node has cluster state with version higher than {} (current: {})", clusterStateVersion, clusterState.getVersion()); @@ -424,23 +423,17 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde public void onTimeout(TimeValue timeout) { future.onFailure(new IllegalStateException("cluster state never updated to version " + clusterStateVersion)); } - }, new ClusterStateObserver.ValidationPredicate() { - - @Override - protected boolean validate(ClusterServiceState newState) { - return newState.getClusterState().getVersion() >= clusterStateVersion; - } - }); + }, newState -> newState.getVersion() >= clusterStateVersion); try { future.get(); logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, - observer.observedState().getClusterState().getVersion()); + observer.observedState().getVersion()); } catch (Exception e) { logger.debug( (Supplier) () -> new ParameterizedMessage( "failed waiting for cluster state with version {} (current: {})", clusterStateVersion, - observer.observedState().getClusterState().getVersion()), + observer.observedState().getVersion()), e); throw ExceptionsHelper.convertToRuntime(e); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 324903eb43a..589d8348981 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.ClusterServiceState; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -102,14 +101,17 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe this.deleteShardTimeout = INDICES_STORE_DELETE_SHARD_TIMEOUT.get(settings); // Doesn't make sense to delete shards on non-data nodes if (DiscoveryNode.isDataNode(settings)) { - clusterService.add(this); + // we double check nothing has changed when responses come back from other nodes. + // it's easier to do that check when the current cluster state is visible. + // also it's good in general to let things settle down + clusterService.addListener(this); } } @Override public void close() { if (DiscoveryNode.isDataNode(settings)) { - clusterService.remove(this); + clusterService.removeListener(this); } } @@ -358,16 +360,13 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } } - }, new ClusterStateObserver.ValidationPredicate() { - @Override - protected boolean validate(ClusterServiceState newState) { - // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified - // or the shard is active in which case we want to send back that the shard is active - // here we could also evaluate the cluster state and get the information from there. we - // don't do it because we would have to write another method for this that would have the same effect - IndexShard indexShard = getShard(request); - return indexShard == null || shardActive(indexShard); - } + }, newState -> { + // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified + // or the shard is active in which case we want to send back that the shard is active + // here we could also evaluate the cluster state and get the information from there. we + // don't do it because we would have to write another method for this that would have the same effect + IndexShard currentShard = getShard(request); + return currentShard == null || shardActive(currentShard); }); } } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 0ca89ea37b9..c1b46e49567 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -22,7 +22,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; @@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -public class PipelineExecutionService implements ClusterStateListener { +public class PipelineExecutionService implements ClusterStateApplier { private final PipelineStore store; private final ThreadPool threadPool; @@ -112,7 +112,7 @@ public class PipelineExecutionService implements ClusterStateListener { } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); if (ingestMetadata != null) { updatePipelineStats(ingestMetadata); diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java index 94850674e75..1e938581b8c 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -19,13 +19,6 @@ package org.elasticsearch.ingest; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -36,7 +29,7 @@ import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -45,7 +38,14 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -public class PipelineStore extends AbstractComponent implements ClusterStateListener { +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class PipelineStore extends AbstractComponent implements ClusterStateApplier { private final Pipeline.Factory factory = new Pipeline.Factory(); private final Map processorFactories; @@ -62,7 +62,7 @@ public class PipelineStore extends AbstractComponent implements ClusterStateList } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { innerUpdatePipelines(event.previousState(), event.state()); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 8756e09e8d8..ec64c0f710a 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -40,7 +40,6 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; -import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MetaData; @@ -322,7 +321,7 @@ public class Node implements Closeable { final NetworkService networkService = new NetworkService(settings, getCustomNameResolvers(pluginsService.filterPlugins(DiscoveryPlugin.class))); final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool); - clusterService.add(scriptModule.getScriptService()); + clusterService.addListener(scriptModule.getScriptService()); resourcesToClose.add(clusterService); final TribeService tribeService = new TribeService(settings, clusterService, nodeId, s -> newTribeClientNode(s, classpathPlugins)); @@ -578,7 +577,7 @@ public class Node implements Closeable { // playing nice with the life cycle interfaces clusterService.setLocalNode(localNode); transportService.setLocalNode(localNode); - clusterService.add(transportService.getTaskManager()); + clusterService.addStateApplier(transportService.getTaskManager()); clusterService.start(); @@ -591,7 +590,7 @@ public class Node implements Closeable { if (initialStateTimeout.millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, thread.getThreadContext()); - if (observer.observedState().getClusterState().nodes().getMasterNodeId() == null) { + if (observer.observedState().nodes().getMasterNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); final CountDownLatch latch = new CountDownLatch(1); observer.waitForNextChange(new ClusterStateObserver.Listener() { @@ -609,7 +608,7 @@ public class Node implements Closeable { initialStateTimeout); latch.countDown(); } - }, MasterNodeChangePredicate.INSTANCE, initialStateTimeout); + }, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout); try { latch.await(); diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index 36a399321e1..7d9a148b271 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -78,8 +78,8 @@ public class NodeService extends AbstractComponent implements Closeable { this.ingestService = ingestService; this.settingsFilter = settingsFilter; this.scriptService = scriptService; - clusterService.add(ingestService.getPipelineStore()); - clusterService.add(ingestService.getPipelineExecutionService()); + clusterService.addStateApplier(ingestService.getPipelineStore()); + clusterService.addStateApplier(ingestService.getPipelineExecutionService()); } public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool, diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index e5951d48a00..300ba811a1b 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.metadata.MetaData; @@ -53,7 +53,7 @@ import java.util.stream.Collectors; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. */ -public class RepositoriesService extends AbstractComponent implements ClusterStateListener { +public class RepositoriesService extends AbstractComponent implements ClusterStateApplier { private final Map typesRegistry; @@ -72,7 +72,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta // Doesn't make sense to maintain repositories on non-master and non-data nodes // Nothing happens there anyway if (DiscoveryNode.isDataNode(settings) || DiscoveryNode.isMasterNode(settings)) { - clusterService.add(this); + clusterService.addStateApplier(this); } this.verifyAction = new VerifyNodeRepositoryAction(settings, transportService, clusterService, this); } @@ -253,7 +253,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta * @param event cluster changed event */ @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { try { RepositoriesMetaData oldMetaData = event.previousState().getMetaData().custom(RepositoriesMetaData.TYPE); RepositoriesMetaData newMetaData = event.state().getMetaData().custom(RepositoriesMetaData.TYPE); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 55e4f100e76..fd2779d585d 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -27,7 +27,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -35,13 +34,13 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryShardContext; @@ -49,6 +48,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; @@ -183,23 +183,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } @Override - public void afterIndexClosed(Index index, Settings indexSettings) { - // once an index is closed we can just clean up all the pending search context information + public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) { + // once an index is removed due to deletion or closing, we can just clean up all the pending search context information + // if we then close all the contexts we can get some search failures along the way which are not expected. + // it's fine to keep the contexts open if the index is still "alive" + // unfortunately we don't have a clear way to signal today why an index is closed. // to release memory and let references to the filesystem go etc. - IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index); - if (idxMeta != null && idxMeta.getState() == IndexMetaData.State.CLOSE) { - // we need to check if it's really closed - // since sometimes due to a relocation we already closed the shard and that causes the index to be closed - // if we then close all the contexts we can get some search failures along the way which are not expected. - // it's fine to keep the contexts open if the index is still "alive" - // unfortunately we don't have a clear way to signal today why an index is closed. - afterIndexDeleted(index, indexSettings); + if (reason == IndexRemovalReason.DELETED || reason == IndexRemovalReason.CLOSED) { + freeAllContextForIndex(index); } - } - @Override - public void afterIndexDeleted(Index index, Settings indexSettings) { - freeAllContextForIndex(index); } protected void putContext(SearchContext context) { diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 7ef62ef2db5..7f84f2d66c8 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -83,7 +83,6 @@ import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; -import static java.util.Collections.min; import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; @@ -115,7 +114,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * which removes {@link RestoreInProgress} when all shards are completed. In case of * restore failure a normal recovery fail-over process kicks in. */ -public class RestoreService extends AbstractComponent implements ClusterStateListener { +public class RestoreService extends AbstractComponent implements ClusterStateApplier { private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet(newHashSet( SETTING_NUMBER_OF_SHARDS, @@ -160,7 +159,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.allocationService = allocationService; this.createIndexService = createIndexService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; - clusterService.add(this); + clusterService.addStateApplier(this); this.clusterSettings = clusterSettings; this.cleanRestoreStateTaskExecutor = new CleanRestoreStateTaskExecutor(logger); } @@ -793,7 +792,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { cleanupRestoreState(event); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index d939236732e..d6598eb3a12 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -26,10 +26,11 @@ import org.apache.lucene.index.IndexCommit; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -84,7 +85,7 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed; * This service runs on data and master nodes and controls currently snapshotted shards on these nodes. It is responsible for * starting and stopping shard level snapshots */ -public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { +public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateApplier, IndexEventListener { public static final String UPDATE_SNAPSHOT_ACTION_NAME = "internal:cluster/snapshot/update_snapshot"; @@ -118,8 +119,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements this.threadPool = threadPool; if (DiscoveryNode.isDataNode(settings)) { // this is only useful on the nodes that can hold data - // addLast to make sure that Repository will be created before snapshot - clusterService.addLast(this); + // addLowPriorityApplier to make sure that Repository will be created before snapshot + clusterService.addLowPriorityApplier(this); } if (DiscoveryNode.isMasterNode(settings)) { @@ -151,11 +152,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override protected void doClose() { - clusterService.remove(this); + clusterService.removeApplier(this); } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { try { SnapshotsInProgress prev = event.previousState().custom(SnapshotsInProgress.TYPE); SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE); @@ -234,19 +235,20 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements // snapshots in the future Map> newSnapshots = new HashMap<>(); // Now go through all snapshots and update existing or create missing - final String localNodeId = clusterService.localNode().getId(); + final String localNodeId = event.state().nodes().getLocalNodeId(); + final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); final Map> snapshotIndices = new HashMap<>(); if (snapshotsInProgress != null) { for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { snapshotIndices.put(entry.snapshot(), entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity()))); - if (entry.state() == SnapshotsInProgress.State.STARTED) { + if (entry.state() == State.STARTED) { Map startedShards = new HashMap<>(); SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); for (ObjectObjectCursor shard : entry.shards()) { // Add all new shards to start processing on if (localNodeId.equals(shard.value.nodeId())) { - if (shard.value.state() == SnapshotsInProgress.State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) { + if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) { logger.trace("[{}] - Adding shard to the queue", shard.key); startedShards.put(shard.key, new IndexShardSnapshotStatus()); } @@ -267,7 +269,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements survivors.put(entry.snapshot(), new SnapshotShards(unmodifiableMap(startedShards))); } } - } else if (entry.state() == SnapshotsInProgress.State.ABORTED) { + } else if (entry.state() == State.ABORTED) { // Abort all running shards for this snapshot SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { @@ -285,12 +287,12 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements case DONE: logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, updating status on the master", entry.snapshot(), shard.key); updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, - new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); + new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); break; case FAILURE: logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, updating status on the master", entry.snapshot(), shard.key); updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, - new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, snapshotStatus.failure())); + new ShardSnapshotStatus(localNodeId, State.FAILED, snapshotStatus.failure()), masterNode); break; default: throw new IllegalStateException("Unknown snapshot shard stage " + snapshotStatus.stage()); @@ -331,18 +333,21 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements @Override public void doRun() { snapshot(indexShard, entry.getKey(), indexId, shardEntry.getValue()); - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.SUCCESS)); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, + new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); } @Override public void onFailure(Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e); - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, + new ShardSnapshotStatus(localNodeId, State.FAILED, ExceptionsHelper.detailedMessage(e)), masterNode); } }); } catch (Exception e) { - updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, SnapshotsInProgress.State.FAILED, ExceptionsHelper.detailedMessage(e))); + updateIndexShardSnapshotStatus(entry.getKey(), shardId, + new ShardSnapshotStatus(localNodeId, State.FAILED, ExceptionsHelper.detailedMessage(e)), masterNode); } } } @@ -401,8 +406,10 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements if (snapshotsInProgress == null) { return; } + final String localNodeId = event.state().nodes().getLocalNodeId(); + final DiscoveryNode masterNode = event.state().nodes().getMasterNode(); for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) { - if (snapshot.state() == SnapshotsInProgress.State.STARTED || snapshot.state() == SnapshotsInProgress.State.ABORTED) { + if (snapshot.state() == State.STARTED || snapshot.state() == State.ABORTED) { Map localShards = currentSnapshotShards(snapshot.snapshot()); if (localShards != null) { ImmutableOpenMap masterShards = snapshot.shards(); @@ -416,12 +423,12 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, updating status on the master", snapshot.snapshot(), shardId); updateIndexShardSnapshotStatus(snapshot.snapshot(), shardId, - new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.SUCCESS)); + new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode); } else if (localShard.getValue().stage() == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, updating status on master", snapshot.snapshot(), shardId); updateIndexShardSnapshotStatus(snapshot.snapshot(), shardId, - new ShardSnapshotStatus(event.state().nodes().getLocalNodeId(), SnapshotsInProgress.State.FAILED, localShardStatus.failure())); + new ShardSnapshotStatus(localNodeId, State.FAILED, localShardStatus.failure()), masterNode); } } @@ -508,15 +515,10 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements /** * Updates the shard status */ - public void updateIndexShardSnapshotStatus(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status) { + public void updateIndexShardSnapshotStatus(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status, DiscoveryNode master) { UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status); try { - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { - innerUpdateSnapshotState(request); - } else { - transportService.sendRequest(clusterService.state().nodes().getMasterNode(), - UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); - } + transportService.sendRequest(master, UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } catch (Exception e) { logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e); } @@ -579,7 +581,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } else { // Snapshot is finished - mark it as done // TODO: Add PARTIAL_SUCCESS status? - SnapshotsInProgress.Entry updatedEntry = new SnapshotsInProgress.Entry(entry, SnapshotsInProgress.State.SUCCESS, shards.build()); + SnapshotsInProgress.Entry updatedEntry = new SnapshotsInProgress.Entry(entry, State.SUCCESS, shards.build()); entries.add(updatedEntry); // Finalize snapshot in the repository snapshotsService.endSnapshot(updatedEntry); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index d84e3c79f66..056b2e7b10d 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -30,7 +30,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -102,7 +102,7 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed; * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state * */ -public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateListener { +public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { private final ClusterService clusterService; @@ -123,8 +123,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus this.threadPool = threadPool; if (DiscoveryNode.isMasterNode(settings)) { - // addLast to make sure that Repository will be created before snapshot - clusterService.addLast(this); + // addLowPriorityApplier to make sure that Repository will be created before snapshot + clusterService.addLowPriorityApplier(this); } } @@ -592,7 +592,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { if (event.nodesRemoved()) { @@ -1273,7 +1273,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus @Override protected void doClose() { - clusterService.remove(this); + clusterService.removeApplier(this); } public RepositoriesService getRepositoriesService() { diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java index 0f2165824fe..61c36f9015d 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -27,7 +27,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; @@ -53,7 +53,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; /** * Task Manager service for keeping track of currently running tasks on the nodes */ -public class TaskManager extends AbstractComponent implements ClusterStateListener { +public class TaskManager extends AbstractComponent implements ClusterStateApplier { private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); private final ConcurrentMapLong tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); @@ -314,7 +314,7 @@ public class TaskManager extends AbstractComponent implements ClusterStateListen } @Override - public void clusterChanged(ClusterChangedEvent event) { + public void applyClusterState(ClusterChangedEvent event) { lastDiscoveryNodes = event.state().getNodes(); if (event.nodesRemoved()) { synchronized (banedParents) { diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index f8d2ceb56d0..d976f9229b8 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -293,7 +293,7 @@ public class TribeService extends AbstractLifecycleComponent { public void startNodes() { for (Node node : nodes) { try { - getClusterService(node).add(new TribeClusterStateListener(node)); + getClusterService(node).addListener(new TribeClusterStateListener(node)); node.start(); } catch (Exception e) { // calling close is safe for non started nodes, we can just iterate over all diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 19e38343efa..43adf182c8b 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -183,7 +183,7 @@ public abstract class TaskManagerTestCase extends ESTestCase { } }; transportService.start(); - clusterService.add(transportService.getTaskManager()); + clusterService.addStateApplier(transportService.getTaskManager()); discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), Version.CURRENT); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 4106ae2177d..132b770ca15 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -19,19 +19,13 @@ package org.elasticsearch.action.bulk; -import java.util.Collections; -import java.util.Iterator; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.BiConsumer; -import java.util.function.Consumer; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -49,6 +43,12 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.MockitoAnnotations; +import java.util.Collections; +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; @@ -134,9 +134,9 @@ public class TransportBulkActionIngestTests extends ESTestCase { doAnswer(invocation -> { ClusterChangedEvent event = mock(ClusterChangedEvent.class); when(event.state()).thenReturn(state); - ((ClusterStateListener)invocation.getArguments()[0]).clusterChanged(event); + ((ClusterStateApplier)invocation.getArguments()[0]).applyClusterState(event); return null; - }).when(clusterService).add(any(ClusterStateListener.class)); + }).when(clusterService).addStateApplier(any(ClusterStateApplier.class)); // setup the mocked ingest service for capturing calls ingestService = mock(IngestService.class); executionService = mock(PipelineExecutionService.class); diff --git a/core/src/test/java/org/elasticsearch/action/index/TransportIndexActionIngestTests.java b/core/src/test/java/org/elasticsearch/action/index/TransportIndexActionIngestTests.java index 2ac08f9e894..ebc765243c4 100644 --- a/core/src/test/java/org/elasticsearch/action/index/TransportIndexActionIngestTests.java +++ b/core/src/test/java/org/elasticsearch/action/index/TransportIndexActionIngestTests.java @@ -19,15 +19,11 @@ package org.elasticsearch.action.index; -import java.util.Collections; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -44,6 +40,10 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.MockitoAnnotations; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; @@ -126,9 +126,9 @@ public class TransportIndexActionIngestTests extends ESTestCase { doAnswer(invocation -> { ClusterChangedEvent event = mock(ClusterChangedEvent.class); when(event.state()).thenReturn(state); - ((ClusterStateListener)invocation.getArguments()[0]).clusterChanged(event); + ((ClusterStateApplier)invocation.getArguments()[0]).applyClusterState(event); return null; - }).when(clusterService).add(any(ClusterStateListener.class)); + }).when(clusterService).addStateApplier(any(ClusterStateApplier.class)); // setup the mocked ingest service for capturing calls ingestService = mock(IngestService.class); executionService = mock(PipelineExecutionService.class); diff --git a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index feaeee703b6..937658736d6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; @@ -68,6 +69,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -114,7 +116,11 @@ public class ClusterStateHealthTests extends ESTestCase { public void testClusterHealthWaitsForClusterStateApplication() throws InterruptedException, ExecutionException { final CountDownLatch applyLatch = new CountDownLatch(1); final CountDownLatch listenerCalled = new CountDownLatch(1); - clusterService.add(event -> { + + setState(clusterService, ClusterState.builder(clusterService.state()) + .nodes(DiscoveryNodes.builder(clusterService.state().nodes()).masterNodeId(null)).build()); + + clusterService.addStateApplier(event -> { listenerCalled.countDown(); try { applyLatch.await(); @@ -123,16 +129,23 @@ public class ClusterStateHealthTests extends ESTestCase { } }); - clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + logger.info("--> submit task to restore master"); + clusterService.submitStateUpdateTask("restore master", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { - return ClusterState.builder(currentState).build(); + final DiscoveryNodes nodes = currentState.nodes(); + return ClusterState.builder(currentState).nodes(DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId())).build(); } @Override public void onFailure(String source, Exception e) { logger.warn("unexpected failure", e); } + + @Override + public boolean runOnlyOnMaster() { + return false; + } }); logger.info("--> waiting for listener to be called and cluster state being blocked"); @@ -141,10 +154,11 @@ public class ClusterStateHealthTests extends ESTestCase { TransportClusterHealthAction action = new TransportClusterHealthAction(Settings.EMPTY, transportService, clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator()); PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(new ClusterHealthRequest(), listener); + action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener); assertFalse(listener.isDone()); + logger.info("--> realising task to restore master"); applyLatch.countDown(); listener.get(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index a41ecdec79e..994510ec56c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -70,7 +70,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { clusterService = mock(ClusterService.class); allocationService = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); delayedAllocationService = new TestDelayAllocationService(Settings.EMPTY, threadPool, clusterService, allocationService); - verify(clusterService).addFirst(delayedAllocationService); + verify(clusterService).addListener(delayedAllocationService); } @After diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index a5d4790f742..57086a0d1bf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -1152,7 +1152,7 @@ public class ClusterServiceTests extends ESTestCase { TimedClusterService timedClusterService = createTimedClusterService(false); AtomicBoolean isMaster = new AtomicBoolean(); - timedClusterService.add(new LocalNodeMasterListener() { + timedClusterService.addLocalNodeMasterListener(new LocalNodeMasterListener() { @Override public void onMaster() { isMaster.set(true); @@ -1190,6 +1190,44 @@ public class ClusterServiceTests extends ESTestCase { timedClusterService.close(); } + public void testClusterStateApplierCantSampleClusterState() throws InterruptedException { + AtomicReference error = new AtomicReference<>(); + AtomicBoolean applierCalled = new AtomicBoolean(); + clusterService.addStateApplier(event -> { + try { + applierCalled.set(true); + clusterService.state(); + error.set(new AssertionError("successfully sampled state")); + } catch (AssertionError e) { + if (e.getMessage().contains("should not be called by a cluster state applier") == false) { + error.set(e); + } + } + }); + + CountDownLatch latch = new CountDownLatch(1); + clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Exception e) { + error.compareAndSet(null, e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + }); + + latch.await(); + assertNull(error.get()); + assertTrue(applierCalled.get()); + } + private static class SimpleTask { private final int id; diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index ce57e7be05b..90d7e5e5a9e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -28,9 +28,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlock; @@ -43,8 +41,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.ClusterServiceState; -import org.elasticsearch.cluster.service.ClusterStateStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -689,29 +685,23 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { String>>>()); for (final String node : majoritySide) { masters.put(node, new ArrayList>()); - internalCluster().getInstance(ClusterService.class, node).add(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (!Objects.equals(previousMaster, currentMaster)) { - logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), - event.previousState()); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } + internalCluster().getInstance(ClusterService.class, node).addListener(event -> { + DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); + DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); + if (!Objects.equals(previousMaster, currentMaster)) { + logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), + event.previousState()); + String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; + String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; + masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); } }); } final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).add(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } + internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { + if (event.state().nodes().getMasterNodeId() == null) { + oldMasterNodeSteppedDown.countDown(); } }); @@ -1199,9 +1189,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. assertBusy(() -> { for (String masterNode : allMasterEligibleNodes) { - final ClusterServiceState masterState = internalCluster().clusterService(masterNode).clusterServiceState(); - assertTrue("index not deleted on " + masterNode, masterState.getClusterState().metaData().hasIndex(idxName) == false && - masterState.getClusterStateStatus() == ClusterStateStatus.APPLIED); + final ClusterState masterState = internalCluster().clusterService(masterNode).state(); + assertTrue("index not deleted on " + masterNode, masterState.metaData().hasIndex(idxName) == false); } }); internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 27fb48f764c..cc8d43cc79e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -133,7 +133,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master); final ArrayList statesFound = new ArrayList<>(); final CountDownLatch nodesStopped = new CountDownLatch(1); - clusterService.add(event -> { + clusterService.addStateApplier(event -> { statesFound.add(event.state()); try { // block until both nodes have stopped to accumulate node failures diff --git a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 581ef0f99a3..91d6d3f08fe 100644 --- a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -34,6 +35,7 @@ import java.nio.file.StandardCopyOption; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; public class DanglingIndicesStateTests extends ESTestCase { @@ -46,7 +48,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testCleanupWhenEmpty() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); MetaData metaData = MetaData.builder().build(); @@ -57,7 +59,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testDanglingIndicesDiscovery() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); assertTrue(danglingState.getDanglingIndices().isEmpty()); MetaData metaData = MetaData.builder().build(); @@ -75,7 +77,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testInvalidIndexFolder() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); final String uuid = "test1UUID"; @@ -99,7 +101,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testDanglingProcessing() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); MetaData metaData = MetaData.builder().build(); @@ -143,7 +145,7 @@ public class DanglingIndicesStateTests extends ESTestCase { public void testDanglingIndicesNotImportedWhenTombstonePresent() throws Exception { try (NodeEnvironment env = newNodeEnvironment()) { MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env); - DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build(); @@ -155,4 +157,9 @@ public class DanglingIndicesStateTests extends ESTestCase { } } + + private DanglingIndicesState createDanglingIndicesState(NodeEnvironment env, MetaStateService metaStateService) { + return new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null, + mock(ClusterService.class)); + } } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index d747e63a695..6b24c2e356c 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -61,6 +61,7 @@ import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; @@ -189,7 +190,7 @@ public class IndexModuleTests extends ESTestCase { final AtomicBoolean atomicBoolean = new AtomicBoolean(false); final IndexEventListener eventListener = new IndexEventListener() { @Override - public void beforeIndexDeleted(IndexService indexService) { + public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { atomicBoolean.set(true); } }; @@ -201,7 +202,7 @@ public class IndexModuleTests extends ESTestCase { IndexSettings x = indexService.getIndexSettings(); assertEquals(x.getSettings().getAsMap(), indexSettings.getSettings().getAsMap()); assertEquals(x.getIndex(), index); - indexService.getIndexEventListener().beforeIndexDeleted(null); + indexService.getIndexEventListener().beforeIndexRemoved(null, null); assertTrue(atomicBoolean.get()); indexService.close("simon says", false); } diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java index df7df5771cb..21d816e83ea 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java @@ -22,7 +22,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.StringField; -import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 8ca6aeba30a..b22b48767a8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -18,9 +18,6 @@ */ package org.elasticsearch.index.mapper; -import java.io.IOException; -import java.util.Locale; - import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; @@ -36,8 +33,6 @@ import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -46,6 +41,9 @@ import org.elasticsearch.index.query.QueryShardContext; import org.joda.time.DateTimeZone; import org.junit.Before; +import java.io.IOException; +import java.util.Locale; + public class DateFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -73,7 +71,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { } public void testIsFieldWithinQueryEmptyReader() throws IOException { - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis); + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, () -> nowInMillis); IndexReader reader = new MultiReader(); DateFieldType ft = new DateFieldType(); ft.setName("my_date"); @@ -83,7 +81,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, DateTimeZone zone, DateMathParser alternateFormat) throws IOException { - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis); + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, () -> nowInMillis); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", randomBoolean(), randomBoolean(), null, null, context)); assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2016-01-02", "2016-06-20", @@ -130,7 +128,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { DateFieldType ft2 = new DateFieldType(); ft2.setName("my_date2"); - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis); + QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, () -> nowInMillis); assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context)); IOUtils.close(reader, w, dir); } @@ -165,7 +163,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { QueryShardContext context = new QueryShardContext(0, new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), - null, null, null, null, null, null, null, null, null, () -> nowInMillis); + null, null, null, null, null, null, null, null, () -> nowInMillis); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date = "2015-10-12T14:10:55"; @@ -184,7 +182,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); QueryShardContext context = new QueryShardContext(0, new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), - null, null, null, null, null, null, null, null, null, () -> nowInMillis); + null, null, null, null, null, null, null, null, () -> nowInMillis); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date1 = "2015-10-12T14:10:55"; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index b4bdea30c30..9010164fe03 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -74,7 +74,7 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings); - QueryShardContext context = new QueryShardContext(0, idxSettings, null, null, null, null, null, null, null, null, null, + QueryShardContext context = new QueryShardContext(0, idxSettings, null, null, null, null, null, null, null, null, () -> nowInMillis); RangeFieldMapper.RangeFieldType ft = new RangeFieldMapper.RangeFieldType(type); ft.setName(FIELDNAME); diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java index 29fe3af19c0..73449dbf5d9 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryShardContextTests.java @@ -48,8 +48,7 @@ public class QueryShardContextTests extends ESTestCase { when(mapperService.getIndexSettings()).thenReturn(indexSettings); final long nowInMillis = randomPositiveLong(); QueryShardContext context = new QueryShardContext( - 0, indexSettings, null, null, mapperService, null, null, null, null, null, null, - () -> nowInMillis); + 0, indexSettings, null, null, mapperService, null, null, null, null, null, () -> nowInMillis); context.setAllowUnmappedFields(false); MappedFieldType fieldType = new TextFieldMapper.TextFieldType(); diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java index 6c5fe046a4b..fd6f0670a57 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryRewriteTests.java @@ -37,7 +37,7 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("test"); IndexReader reader = new MultiReader(); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), - null, null, null, null, reader, null, null); + null, null, null, null, reader, null); RangeQueryBuilder range = new RangeQueryBuilder("foo"); assertEquals(Relation.DISJOINT, range.getRelation(context)); } @@ -54,7 +54,7 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase { indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), - null, null, null, null, null, null, null); + null, null, null, null, null, null); RangeQueryBuilder range = new RangeQueryBuilder("foo"); // can't make assumptions on a missing reader, so it must return INTERSECT assertEquals(Relation.INTERSECTS, range.getRelation(context)); @@ -73,7 +73,7 @@ public class RangeQueryRewriteTests extends ESSingleNodeTestCase { new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); IndexReader reader = new MultiReader(); QueryRewriteContext context = new QueryShardContext(0, indexService.getIndexSettings(), null, null, indexService.mapperService(), - null, null, null, null, reader, null, null); + null, null, null, null, reader, null); RangeQueryBuilder range = new RangeQueryBuilder("foo"); // no values -> DISJOINT assertEquals(Relation.DISJOINT, range.getRelation(context)); diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java index 253bc95ffb8..5e971abb2bd 100644 --- a/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/SimpleQueryParserTests.java @@ -22,27 +22,23 @@ package org.elasticsearch.index.query; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.SynonymQuery; -import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MockFieldMapper; -import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; import java.util.Collections; import java.util.HashMap; @@ -152,7 +148,7 @@ public class SimpleQueryParserTests extends ESTestCase { IndexMetaData indexState = IndexMetaData.builder("index").settings(indexSettings).build(); IndexSettings settings = new IndexSettings(indexState, Settings.EMPTY); QueryShardContext mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null, System::currentTimeMillis) { + null, null, System::currentTimeMillis) { @Override public MappedFieldType fieldMapper(String name) { return new MockFieldMapper.FakeFieldType(); @@ -166,7 +162,7 @@ public class SimpleQueryParserTests extends ESTestCase { // Now check what happens if foo.quote does not exist mockShardContext = new QueryShardContext(0, settings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null, System::currentTimeMillis) { + null, null, System::currentTimeMillis) { @Override public MappedFieldType fieldMapper(String name) { if (name.equals("foo.quote")) { diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 46cd30112b5..a2a90a8c753 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -28,9 +28,11 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -41,11 +43,12 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCase { - public void testCloseDeleteCallback() throws Throwable { + public void testStartDeleteIndexEventCallback() throws Throwable { IndicesService indicesService = getInstanceFromNode(IndicesService.class); assertAcked(client().admin().indices().prepareCreate("test") .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0)); @@ -55,45 +58,70 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas ShardRouting shardRouting = indicesService.indexService(idx).getShard(0).routingEntry(); final AtomicInteger counter = new AtomicInteger(1); IndexEventListener countingListener = new IndexEventListener() { + @Override - public void afterIndexClosed(Index index, Settings indexSettings) { - assertEquals(counter.get(), 5); + public void beforeIndexCreated(Index index, Settings indexSettings) { + assertEquals("test", index.getName()); + assertEquals(1, counter.get()); counter.incrementAndGet(); } @Override - public void beforeIndexClosed(IndexService indexService) { - assertEquals(counter.get(), 1); + public void afterIndexCreated(IndexService indexService) { + assertEquals("test", indexService.index().getName()); + assertEquals(2, counter.get()); counter.incrementAndGet(); } @Override - public void afterIndexDeleted(Index index, Settings indexSettings) { - assertEquals(counter.get(), 6); + public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { + assertEquals(3, counter.get()); counter.incrementAndGet(); } @Override - public void beforeIndexDeleted(IndexService indexService) { - assertEquals(counter.get(), 2); + public void afterIndexShardCreated(IndexShard indexShard) { + assertEquals(4, counter.get()); + counter.incrementAndGet(); + } + + @Override + public void afterIndexShardStarted(IndexShard indexShard) { + assertEquals(5, counter.get()); + counter.incrementAndGet(); + } + + @Override + public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { + assertEquals(DELETED, reason); + assertEquals(6, counter.get()); counter.incrementAndGet(); } @Override public void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) { - assertEquals(counter.get(), 3); + assertEquals(7, counter.get()); counter.incrementAndGet(); } @Override public void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) { - assertEquals(counter.get(), 4); + assertEquals(8, counter.get()); counter.incrementAndGet(); } + + @Override + public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) { + assertEquals(DELETED, reason); + assertEquals(9, counter.get()); + counter.incrementAndGet(); + } + }; - indicesService.deleteIndex(idx, "simon says"); + indicesService.removeIndex(idx, DELETED, "simon says"); try { IndexService index = indicesService.createIndex(metaData, Arrays.asList(countingListener), s -> {}); + assertEquals(3, counter.get()); idx = index.index(); ShardRouting newRouting = shardRouting; String nodeId = newRouting.currentNodeId(); @@ -103,16 +131,18 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting); shard.updateRoutingEntry(newRouting); + assertEquals(5, counter.get()); final DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("store", new RecoveryState(newRouting, localNode, null)); shard.recoverFromStore(); newRouting = ShardRoutingHelper.moveToStarted(newRouting); shard.updateRoutingEntry(newRouting); + assertEquals(6, counter.get()); } finally { - indicesService.deleteIndex(idx, "simon says"); + indicesService.removeIndex(idx, DELETED, "simon says"); } - assertEquals(7, counter.get()); + assertEquals(10, counter.get()); } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 97b3924303d..2449fdfc290 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -197,23 +197,12 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC } @Override - public synchronized void deleteIndex(Index index, String reason) { - if (hasIndex(index) == false) { - return; + public synchronized void removeIndex(Index index, IndexRemovalReason reason, String extraInfo) { + if (hasIndex(index)) { + Map newIndices = new HashMap<>(indices); + newIndices.remove(index.getUUID()); + indices = unmodifiableMap(newIndices); } - Map newIndices = new HashMap<>(indices); - newIndices.remove(index.getUUID()); - indices = unmodifiableMap(newIndices); - } - - @Override - public synchronized void removeIndex(Index index, String reason) { - if (hasIndex(index) == false) { - return; - } - Map newIndices = new HashMap<>(indices); - newIndices.remove(index.getUUID()); - indices = unmodifiableMap(newIndices); } @Override diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index fdbd2d0548a..4b21b8820c7 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -104,7 +104,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node); final ClusterChangedEvent event = new ClusterChangedEvent("simulated change " + i, localState, previousLocalState); try { - indicesClusterStateService.clusterChanged(event); + indicesClusterStateService.applyClusterState(event); } catch (AssertionError error) { logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", @@ -127,7 +127,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice * are all removed but the on disk contents of those indices remain so that they can later be * imported as dangling indices. Normally, the first cluster state update that the node * receives from the new cluster would contain a cluster block that would cause all in-memory - * structures to be removed (see {@link IndicesClusterStateService#clusterChanged(ClusterChangedEvent)}), + * structures to be removed (see {@link IndicesClusterStateService#applyClusterState(ClusterChangedEvent)}), * but in the case where the node joined and was a few cluster state updates behind, it would * not have received the cluster block, in which case we still need to remove the in-memory * structures while ensuring the data remains on disk. This test executes this particular @@ -155,7 +155,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node); IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new); indicesCSSvc.start(); - indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); + indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); // create a new empty cluster state with a brand new cluster UUID ClusterState newClusterState = ClusterState.builder(initialState) @@ -165,8 +165,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // simulate the cluster state change on the node localState = adaptClusterStateToLocalNode(newClusterState, node); previousLocalState = adaptClusterStateToLocalNode(stateWithIndex, node); - indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change with a new cluster UUID (and doesn't contain the index)", - localState, previousLocalState)); + indicesCSSvc.applyClusterState(new ClusterChangedEvent( + "cluster state change with a new cluster UUID (and doesn't contain the index)", localState, previousLocalState)); // check that in memory data structures have been removed once the new cluster state is applied, // but the persistent data is still there @@ -388,7 +388,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice null, null, null, - null, shardId -> {}); } @@ -396,11 +395,13 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice private Set deletedIndices = Collections.emptySet(); @Override - public synchronized void deleteIndex(Index index, String reason) { - super.deleteIndex(index, reason); - Set newSet = Sets.newHashSet(deletedIndices); - newSet.add(index); - deletedIndices = Collections.unmodifiableSet(newSet); + public synchronized void removeIndex(Index index, IndexRemovalReason reason, String extraInfo) { + super.removeIndex(index, reason, extraInfo); + if (reason == IndexRemovalReason.DELETED) { + Set newSet = Sets.newHashSet(deletedIndices); + newSet.add(index); + deletedIndices = Collections.unmodifiableSet(newSet); + } } public synchronized boolean isDeleted(Index index) { diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 2db44c9d0ba..0d4d740aa35 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -61,6 +61,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.singletonList; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; @@ -152,7 +153,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase { public void run() { startGun.countDown(); while(running.get()) { - service.afterIndexDeleted(indexService.index(), indexService.getIndexSettings().getSettings()); + service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED); if (randomBoolean()) { // here we trigger some refreshes to ensure the IR go out of scope such that we hit ACE if we access a search // context in a non-sane way. diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java index cd19785f3e0..c232068fffd 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java @@ -100,7 +100,7 @@ public class ExtendedBoundsTests extends ESTestCase { SearchContext context = mock(SearchContext.class); QueryShardContext qsc = new QueryShardContext(0, new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), null, null, null, null, - null, null, null, null, null, () -> now); + null, null, null, null, () -> now); when(context.getQueryShardContext()).thenReturn(qsc); FormatDateTimeFormatter formatter = Joda.forPattern("dateOptionalTime"); DocValueFormat format = new DocValueFormat.DateTime(formatter, DateTimeZone.UTC); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 88f696f1697..07a70c91ef9 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -266,7 +266,7 @@ public class HighlightBuilderTests extends ESTestCase { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null, System::currentTimeMillis) { + null, null, System::currentTimeMillis) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); diff --git a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index 168d8f869ba..735c84a3319 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -197,8 +197,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase { IndexSettings indexSettings = new IndexSettings(indexMetadata.build(), Settings.EMPTY); final long nowInMillis = randomPositiveLong(); QueryShardContext context = new QueryShardContext( - 0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, null, - () -> nowInMillis); + 0, indexSettings, null, null, null, null, null, queriesRegistry, null, null, () -> nowInMillis); readRequest.rewrite(context); QueryBuilder queryBuilder = readRequest.filteringAliases(); assertEquals(queryBuilder, QueryBuilders.boolQuery() diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java index 5fc9e6c6dae..22f2dd53808 100644 --- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java @@ -139,7 +139,7 @@ public class QueryRescoreBuilderTests extends ESTestCase { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, indicesQueriesRegistry, - null, null, null, () -> nowInMillis) { + null, null, () -> nowInMillis) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index ec29911d8ca..d545a082b55 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -211,7 +211,7 @@ public abstract class AbstractSortTestCase> extends EST }); long nowInMillis = randomPositiveLong(); return new QueryShardContext(0, idxSettings, bitsetFilterCache, ifds, null, null, scriptService, - indicesQueriesRegistry, null, null, null, () -> nowInMillis) { + indicesQueriesRegistry, null, null, () -> nowInMillis) { @Override public MappedFieldType fieldMapper(String name) { return provideMappedFieldType(name); diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b1c46b35b62..1d0ddf39b01 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -714,7 +714,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest final ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); BlockingClusterStateListener snapshotListener = new BlockingClusterStateListener(clusterService, "update_snapshot [", "update snapshot state", Priority.HIGH); try { - clusterService.addFirst(snapshotListener); + clusterService.addListener(snapshotListener); logger.info("--> snapshot"); dataNodeClient().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); @@ -728,7 +728,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest snapshotListener.unblock(); } finally { - clusterService.remove(snapshotListener); + clusterService.removeListener(snapshotListener); } logger.info("--> wait until the snapshot is done"); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 4765292be1d..dda634023b6 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2335,7 +2335,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas final ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); BlockingClusterStateListener snapshotListener = new BlockingClusterStateListener(clusterService, "update_snapshot [", "update snapshot state", Priority.HIGH); try { - clusterService.addFirst(snapshotListener); + clusterService.addListener(snapshotListener); logger.info("--> snapshot"); ListenableActionFuture snapshotFuture = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute(); @@ -2350,7 +2350,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertEquals(numberOfShards, createSnapshotResponse.getSnapshotInfo().successfulShards()); } finally { - clusterService.remove(snapshotListener); + clusterService.removeListener(snapshotListener); } // Check that we didn't timeout diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index ba4a33d4496..2055cde567e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.fasterxml.jackson.core.io.JsonStringEncoder; - import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -36,8 +35,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParseFieldMatcher; @@ -1135,9 +1132,8 @@ public abstract class AbstractQueryTestCase> } QueryShardContext createShardContext() { - ClusterState state = ClusterState.builder(new ClusterName("_name")).build(); return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, - scriptService, indicesQueriesRegistry, this.client, null, state, () -> nowInMillis); + scriptService, indicesQueriesRegistry, this.client, null, () -> nowInMillis); } ScriptModule createScriptModule(List scriptPlugins) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java index c4a9515f545..13caec4adc8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java @@ -24,14 +24,15 @@ import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.plugins.Plugin; import java.util.Arrays; @@ -133,13 +134,13 @@ public final class MockIndexEventListener { } @Override - public void beforeIndexClosed(IndexService indexService) { - delegate.beforeIndexClosed(indexService); + public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) { + delegate.beforeIndexRemoved(indexService, reason); } @Override - public void afterIndexClosed(Index index, Settings indexSettings) { - delegate.afterIndexClosed(index, indexSettings); + public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) { + delegate.afterIndexRemoved(index, indexSettings, reason); } @Override @@ -152,16 +153,6 @@ public final class MockIndexEventListener { delegate.afterIndexShardDeleted(shardId, indexSettings); } - @Override - public void afterIndexDeleted(Index index, Settings indexSettings) { - delegate.afterIndexDeleted(index, indexSettings); - } - - @Override - public void beforeIndexDeleted(IndexService indexService) { - delegate.beforeIndexDeleted(indexService); - } - @Override public void beforeIndexAddedToCluster(Index index, Settings indexSettings) { delegate.beforeIndexAddedToCluster(index, indexSettings); diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index 45ea62f31ea..0776fa1edb2 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -35,7 +35,7 @@ public class MockSearchServiceTests extends ESTestCase { public void testAssertNoInFlightContext() { final long nowInMillis = randomPositiveLong(); SearchContext s = new TestSearchContext(new QueryShardContext(0, new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null, - null, null, null, null, null, null, null, () -> nowInMillis)) { + null, null, null, null, null, null, () -> nowInMillis)) { @Override public SearchShardTarget shardTarget() { return new SearchShardTarget("node", new Index("idx", "ignored"), 0); From 2aa89820f3ee2bcaff02d05e9740b478c07f054d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 15 Dec 2016 11:19:51 -0500 Subject: [PATCH 11/41] Don't use null charset in RequestLogger (#22197) If the response comes back with a content type with a `null` charset we were blindly using it, causing `NullPointerException`s. Closes #22190 --- .../elasticsearch/client/RequestLogger.java | 2 +- .../client/RequestLoggerTests.java | 21 +++++++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java index 7312d33c9fb..ad2348762dd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -152,7 +152,7 @@ final class RequestLogger { httpResponse.setEntity(entity); ContentType contentType = ContentType.get(entity); Charset charset = StandardCharsets.UTF_8; - if (contentType != null) { + if (contentType != null && contentType.getCharset() != null) { charset = contentType.getCharset(); } try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index b66a7bcea2d..68717dfe223 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -44,6 +44,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import static org.hamcrest.CoreMatchers.equalTo; @@ -51,7 +52,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; public class RequestLoggerTests extends RestClientTestCase { - public void testTraceRequest() throws IOException, URISyntaxException { HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https"); String expectedEndpoint = "/index/type/_api"; @@ -69,7 +69,7 @@ public class RequestLoggerTests extends RestClientTestCase { expected += " -d '" + requestBody + "'"; HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; - switch(randomIntBetween(0, 3)) { + switch(randomIntBetween(0, 4)) { case 0: entity = new StringEntity(requestBody, StandardCharsets.UTF_8); break; @@ -82,6 +82,10 @@ public class RequestLoggerTests extends RestClientTestCase { case 3: entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8)); break; + case 4: + // Evil entity without a charset + entity = new StringEntity(requestBody, (Charset) null); + break; default: throw new UnsupportedOperationException(); } @@ -116,11 +120,20 @@ public class RequestLoggerTests extends RestClientTestCase { expected += "\n# \"field\": \"value\""; expected += "\n# }"; HttpEntity entity; - if (getRandom().nextBoolean()) { + switch(randomIntBetween(0, 2)) { + case 0: entity = new StringEntity(responseBody, StandardCharsets.UTF_8); - } else { + break; + case 1: //test a non repeatable entity entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8))); + break; + case 2: + // Evil entity without a charset + entity = new StringEntity(responseBody, (Charset) null); + break; + default: + throw new UnsupportedOperationException(); } httpResponse.setEntity(entity); } From df43c268dad30bdb1c406763a92ca3c926b58a74 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 15 Dec 2016 13:21:49 -0500 Subject: [PATCH 12/41] Eagerly initialize Netty 4 Today we initialize Netty in a static initializer. We trigger this method via static initializers from Netty-related classes, but we can trigger this method earlier than we do to ensure that Netty is initialized how we want it to be. --- .../main/java/org/elasticsearch/transport/Netty4Plugin.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index 08f87a2779e..32170a7d499 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -32,6 +32,7 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.Netty4Transport; +import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Arrays; import java.util.Collections; @@ -41,6 +42,10 @@ import java.util.function.Supplier; public class Netty4Plugin extends Plugin implements NetworkPlugin { + static { + Netty4Utils.setup(); + } + public static final String NETTY_TRANSPORT_NAME = "netty4"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; From 61597f2c20aa668d75f3330b49b05f64ed4ee293 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 15 Dec 2016 13:35:14 -0500 Subject: [PATCH 13/41] Send error_trace by default when testing (#22195) Sends the `error_trace` parameter with all requests sent by the yaml test framework, including the doc snippet tests. This can be overridden by settings `error_trace: false`. While this drift's core's handling of the yaml tests from the client's slightly this should only be a problem for tests that rely on the default value, both of which I've fixed by setting the value explicitly. This also escapes `\n` and `\t` in the `Stash dump on failure` so the `stack_trace` is more readable. Also fixes `RestUpdateSettingsAction` to not think of the `error_trace` parameter as a setting. --- .../action/admin/indices/RestUpdateSettingsAction.java | 1 + .../rest/action/search/RestExplainAction.java | 2 -- docs/reference/api-conventions.asciidoc | 3 ++- .../modules/scripting/painless-debugging.asciidoc | 6 ++++-- .../elasticsearch/test/rest/yaml/ClientYamlTestClient.java | 7 +++++-- .../test/rest/yaml/ClientYamlTestExecutionContext.java | 1 + .../test/rest/yaml/ESClientYamlSuiteTestCase.java | 4 +++- 7 files changed, 16 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 4ba011e698e..4c6730d5b15 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -40,6 +40,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; public class RestUpdateSettingsAction extends BaseRestHandler { private static final Set VALUES_TO_EXCLUDE = unmodifiableSet(newHashSet( + "error_trace", "pretty", "timeout", "master_timeout", diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java index 95c429d19dc..7d31ed7cad5 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestExplainAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.search; import org.apache.lucene.search.Explanation; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.client.node.NodeClient; @@ -28,7 +27,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.query.IndicesQueriesRegistry; diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 0398b99f25e..ef0b8f37958 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -600,7 +600,8 @@ invalid `size` parameter to the `_search` API: POST /twitter/_search?size=surprise_me ---------------------------------------------------------------------- // CONSOLE -// TEST[catch:request] +// TEST[s/surprise_me/surprise_me&error_trace=false/ catch:request] +// Since the test system sends error_trace=true by default we have to override The response looks like: diff --git a/docs/reference/modules/scripting/painless-debugging.asciidoc b/docs/reference/modules/scripting/painless-debugging.asciidoc index 00fe16600fb..eafe291d259 100644 --- a/docs/reference/modules/scripting/painless-debugging.asciidoc +++ b/docs/reference/modules/scripting/painless-debugging.asciidoc @@ -33,7 +33,9 @@ POST /hockey/player/1/_explain } --------------------------------------------------------- // CONSOLE -// TEST[catch:/painless_explain_error/] +// TEST[s/_explain/_explain?error_trace=false/ catch:/painless_explain_error/] +// The test system sends error_trace=true by default for easier debugging so +// we have to override it to get a normal shaped response Which shows that the class of `doc.first` is `org.elasticsearch.index.fielddata.ScriptDocValues$Longs` by responding with: @@ -63,7 +65,7 @@ POST /hockey/player/1/_update } --------------------------------------------------------- // CONSOLE -// TEST[continued catch:/painless_explain_error/] +// TEST[continued s/_update/_update?error_trace=false/ catch:/painless_explain_error/] The response looks like: diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index ff829aafee8..7b216a148be 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -55,8 +55,11 @@ import java.util.Set; */ public class ClientYamlTestClient { private static final Logger logger = Loggers.getLogger(ClientYamlTestClient.class); - //query_string params that don't need to be declared in the spec, they are supported by default - private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path"); + /** + * Query params that don't need to be declared in the spec, they are supported by default. + */ + private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet( + "error_trace", "filter_path", "pretty", "source"); private final ClientYamlSuiteRestSpec restSpec; private final RestClient restClient; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 5bc380c3c2d..8215105887d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -55,6 +55,7 @@ public class ClientYamlTestExecutionContext { Map headers) throws IOException { //makes a copy of the parameters before modifying them for this specific request HashMap requestParams = new HashMap<>(params); + requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params for (Map.Entry entry : requestParams.entrySet()) { if (stash.containsStashedValue(entry.getValue())) { entry.setValue(stash.getValue(entry.getValue()).toString()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index a9b61aae54f..24f977c2740 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -180,7 +180,9 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { @Override protected void afterIfFailed(List errors) { - logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash())); + // Dump the stash on failure. Instead of dumping it in true json we escape `\n`s so stack traces are easier to read + logger.info("Stash dump on failure [{}]", + XContentHelper.toString(restTestExecutionContext.stash()).replace("\\n", "\n").replace("\\t", "\t")); super.afterIfFailed(errors); } From 43f71015a8d117265a54e01658302f9ad7903821 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 15 Dec 2016 21:02:09 -0500 Subject: [PATCH 14/41] Add skip for include segment file sizes REST tests This commit adds a skip for the include segment file sizes REST tests on nodes less than or equal to version 5.1.1 as the stats APIs did not correctly account for this parameter prior to version 5.1.2. Relates #21879 --- .../rest-api-spec/test/indices.stats/11_metric.yaml | 6 ++++++ .../rest-api-spec/test/nodes.stats/11_indices_metrics.yaml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml index 0f373b7177c..e030a27165d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml @@ -123,6 +123,9 @@ setup: --- "Metric - _all include_segment_file_sizes": + - skip: + version: " - 5.1.1" + reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: indices.stats: { metric: _all, include_segment_file_sizes: true } @@ -145,6 +148,9 @@ setup: --- "Metric - segments include_segment_file_sizes": + - skip: + version: " - 5.1.1" + reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: indices.stats: { metric: segments, include_segment_file_sizes: true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml index 998909dd9cf..c9046758b35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yaml @@ -157,6 +157,9 @@ --- "Metric - _all include_segment_file_sizes": + - skip: + version: " - 5.1.1" + reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: cluster.state: {} @@ -184,6 +187,9 @@ --- "Metric - segments include_segment_file_sizes": + - skip: + version: " - 5.1.1" + reason: including segment file sizes triggered an unrecognized parameter in <= 5.1.1 - do: cluster.state: {} From 41ffb008ad285735f8bc3c9b97b23c6589a17fef Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 15 Dec 2016 23:20:03 -0500 Subject: [PATCH 15/41] Fix doc bug for cgroup cpuacct usage metric This commit fixes a silly doc bug where the field that represents the total CPU time consumed by all tasks in the same cgroup was mistakenly reported as "usage" instead of "usage_nanos". Relates #21029 --- docs/reference/cluster/nodes-stats.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 3cae4302922..46474c9a436 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -220,7 +220,7 @@ the operating system: The `cpuacct` control group to which the Elasticsearch process belongs -`os.cgroup.cpuacct.usage` (Linux only):: +`os.cgroup.cpuacct.usage_nanos` (Linux only):: The total CPU time (in nanoseconds) consumed by all tasks in the same cgroup as the Elasticsearch process From 6fe83fb52498b3bdfd902b14ca73b1fb220ecd68 Mon Sep 17 00:00:00 2001 From: Masaru Hasegawa Date: Fri, 16 Dec 2016 16:19:34 +0900 Subject: [PATCH 16/41] Don't print empty indices_boost --- .../search/builder/SearchSourceBuilder.java | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index f84740a8b09..f7ffcfea137 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -1171,13 +1171,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(SLICE.getPreferredName(), sliceBuilder); } - builder.startArray(INDICES_BOOST_FIELD.getPreferredName()); - for (IndexBoost ib : indexBoosts) { - builder.startObject(); - builder.field(ib.index, ib.boost); - builder.endObject(); + if (!indexBoosts.isEmpty()) { + builder.startArray(INDICES_BOOST_FIELD.getPreferredName()); + for (IndexBoost ib : indexBoosts) { + builder.startObject(); + builder.field(ib.index, ib.boost); + builder.endObject(); + } + builder.endArray(); } - builder.endArray(); if (aggregations != null) { builder.field(AGGREGATIONS_FIELD.getPreferredName(), aggregations); From 25b79cd46bd274a7dd142da8a384a80ff1616412 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 16 Dec 2016 09:03:19 +0100 Subject: [PATCH 17/41] Only notify handshake handler onClose if it can be successfully removed Depending on how the connection is closed the `#onChannelClosed` callback might be invoked more than once or the handler has been processed by the response of the handshake already. This commit only notifies the handler if was removed from the pending map. --- .../org/elasticsearch/transport/TcpTransport.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index c3ad1f5afe1..cd23fc9b074 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -1571,13 +1571,16 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i * Called once the channel is closed for instance due to a disconnect or a closed socket etc. */ protected final void onChannelClosed(Channel channel) { - Optional> first = pendingHandshakes.entrySet().stream() - .filter((entry) -> entry.getValue().channel == channel).findFirst(); + final Optional first = pendingHandshakes.entrySet().stream() + .filter((entry) -> entry.getValue().channel == channel).map((e) -> e.getKey()).findFirst(); if(first.isPresent()) { - final Long requestId = first.get().getKey(); - HandshakeResponseHandler handler = first.get().getValue(); - pendingHandshakes.remove(requestId); - handler.handleException(new TransportException("connection reset")); + final Long requestId = first.get(); + final HandshakeResponseHandler handler = pendingHandshakes.remove(requestId); + if (handler != null) { + // there might be a race removing this or this method might be called twice concurrently depending on how + // the channel is closed ie. due to connection reset or broken pipes + handler.handleException(new TransportException("connection reset")); + } } } } From 0e18782d11b6641a42707e7498bedad8453ba6b6 Mon Sep 17 00:00:00 2001 From: Florian Hopf Date: Fri, 16 Dec 2016 12:38:51 +0100 Subject: [PATCH 18/41] Update bucket-script-aggregation.asciidoc (#22219) Example is missing "params." for painless --- .../aggregations/pipeline/bucket-script-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 6a010650e5c..74dee3f9ca5 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -18,7 +18,7 @@ A `bucket_script` aggregation looks like this in isolation: "my_var1": "the_sum", <1> "my_var2": "the_value_count" }, - "script": "my_var1 / my_var2" + "script": "params.my_var1 / params.my_var2" } } -------------------------------------------------- From 0c0353fc7d333a6f3eec3eca0706f3fe82f3f534 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 16 Dec 2016 14:25:17 +0100 Subject: [PATCH 19/41] [TEST] Add some testlogging --- .../transport/AbstractSimpleTransportTestCase.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 2beff7890b3..542dcfb8b8b 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; @@ -30,6 +31,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -38,6 +40,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -830,6 +833,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertTrue(inFlight.tryAcquire(Integer.MAX_VALUE, 10, TimeUnit.SECONDS)); } + @TestLogging("org.elasticsearch.transport:DEBUG") public void testTracerLog() throws InterruptedException { TransportRequestHandler handler = (request, channel) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = new TransportRequestHandler() { @@ -943,7 +947,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public volatile boolean sawResponseReceived; public AtomicReference expectedEvents = new AtomicReference<>(); - + private final Logger logger = Loggers.getLogger(getClass()); @Override public void receivedRequest(long requestId, String action) { @@ -962,6 +966,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { @Override public void responseSent(long requestId, String action) { super.responseSent(requestId, action); + logger.debug("#### responseSent for action: {}", action); sawResponseSent = true; expectedEvents.get().countDown(); } From 152efe95e667edf546103958bfde2e2519abe38a Mon Sep 17 00:00:00 2001 From: Pablo Musa Date: Fri, 16 Dec 2016 14:50:06 +0100 Subject: [PATCH 20/41] Small typo fix in the docs. (#22180) There is a small typo in the convert processor code example. --- docs/reference/ingest/ingest-node.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 808fe549ae9..ce92e7c8e74 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -767,7 +767,7 @@ such a case, `target_field` will still be updated with the unconverted field val -------------------------------------------------- { "convert": { - "field" : "foo" + "field" : "foo", "type": "integer" } } From 14976555f2d37a2469638ef4aaa576b309129b6a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 16 Dec 2016 15:25:41 +0100 Subject: [PATCH 21/41] Fix Link to virtual hosting of buckets Was using markdown style instead of asciidoc --- docs/plugins/repository-s3.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 9c4e13560c7..fcac3105930 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -262,7 +262,7 @@ The following settings are supported: `path_style_access`:: - Activate path style access for [virtual hosting of buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). + Activate path style access for http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html[virtual hosting of buckets]. The default behaviour is to detect which access style to use based on the configured endpoint (an IP will result in path-style access) and the bucket being accessed (some buckets are not valid DNS names). From e32c7f1d72ff8eca8b1529c5bfc7a83aff8c6c9b Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 16 Dec 2016 16:45:56 +0100 Subject: [PATCH 22/41] Explain how to use bulk processor in a test context When using a bulk processor in test, you might write something like: ```java BulkProcessor bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { @Override public void beforeBulk(long executionId, BulkRequest request) {} @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {} @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) {} }) .setBulkActions(10000) .setFlushInterval(TimeValue.timeValueSeconds(10)) .build(); for (int i = 0; i < 10000; i++) { bulkProcessor.add(new IndexRequest("foo", "bar", "doc_" + i) .source(jsonBuilder().startObject().field("foo", "bar").endObject() )); } bulkProcessor.flush(); client.admin().indices().prepareRefresh("foo").get(); SearchResponse response = client.prepareSearch("foo").get(); // response does not contain any hit ``` The problem is that by default bulkProcessor defines the number of concurrent requests to 1 which is using behind the scene an Async BulkRequestHandler. When you call `flush()` in a test, you expect it to flush all the content of the bulk so you can search for your docs. But because of the async handling, there is a great chance that none of the documents has been indexed yet when you call the `refresh` method. We should advice in our Java guide to explicitly set concurrent requests to `0` so users will use behind the scene the Sync BulkRequestHandler. ```java BulkProcessor bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { @Override public void beforeBulk(long executionId, BulkRequest request) {} @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {} @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) {} }) .setBulkActions(5000) .setFlushInterval(TimeValue.timeValueSeconds(10)) .setConcurrentRequests(0) .build(); ``` Closes #22158. --- docs/java-api/docs/bulk.asciidoc | 52 +++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 288bd8415ab..07849164a68 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -87,13 +87,24 @@ BulkProcessor bulkProcessor = BulkProcessor.builder( <5> We want to execute the bulk every 10 000 requests <6> We want to flush the bulk every 5mb <7> We want to flush the bulk every 5 seconds whatever the number of requests -<8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be +<8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed while accumulating new bulk requests. <9> Set a custom backoff policy which will initially wait for 100ms, increase exponentially and retries up to three times. A retry is attempted whenever one or more bulk item requests have failed with an `EsRejectedExecutionException` which indicates that there were too little compute resources available for processing the request. To disable backoff, pass `BackoffPolicy.noBackoff()`. +By default, `BulkProcessor`: + +* sets bulkActions to `1000` +* sets bulkSize to `5mb` +* does not set flushInterval +* sets concurrentRequests to 1, which means an asynchronous execution of the flush operation. +* sets backoffPolicy to an exponential backoff with 8 retries and a start delay of 50ms. The total wait time is roughly 5.1 seconds. + +[[java-docs-bulk-processor-requests]] +==== Add requests + Then you can simply add your requests to the `BulkProcessor`: [source,java] @@ -102,13 +113,8 @@ bulkProcessor.add(new IndexRequest("twitter", "tweet", "1").source(/* your doc h bulkProcessor.add(new DeleteRequest("twitter", "tweet", "2")); -------------------------------------------------- -By default, `BulkProcessor`: - -* sets bulkActions to `1000` -* sets bulkSize to `5mb` -* does not set flushInterval -* sets concurrentRequests to 1 -* sets backoffPolicy to an exponential backoff with 8 retries and a start delay of 50ms. The total wait time is roughly 5.1 seconds. +[[java-docs-bulk-processor-close]] +==== Closing the Bulk Processor When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: @@ -129,3 +135,33 @@ Both methods flush any remaining documents and disable all other scheduled flush all bulk requests to complete then returns `true`, if the specified waiting time elapses before all bulk requests complete, `false` is returned. The `close` method doesn't wait for any remaining bulk requests to complete and exits immediately. +[[java-docs-bulk-processor-tests]] +==== Using Bulk Processor in tests + +If you are running tests with elasticsearch and are using the `BulkProcessor` to populate your dataset +you should better set the number of concurrent requests to `0` so the flush operation of the bulk will be executed +in a synchronous manner: + +[source,java] +-------------------------------------------------- +BulkProcessor bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { /* Listener methods */ }) + .setBulkActions(10000) + .setConcurrentRequests(0) + .build(); + +// Add your requests +bulkProcessor.add(/* Your requests */); + +// Flush any remaining requests +bulkProcessor.flush(); + +// Or close the bulkProcessor if you don't need it anymore +bulkProcessor.close(); + +// Refresh your indices +client.admin().indices().prepareRefresh().get(); + +// Now you can start searching! +client.prepareSearch().get(); +-------------------------------------------------- + From d44de0cecc0a94a40c06b233336272a5ae6e0f01 Mon Sep 17 00:00:00 2001 From: Areek Zillur Date: Fri, 16 Dec 2016 12:06:02 -0500 Subject: [PATCH 23/41] Remove deprecated _suggest endpoint (#22203) In #20305, _suggest endpoint was deprecated in favour of using _search endpoint. This commit removes the dedicated _suggest endpoint entirely from master. --- .../elasticsearch/action/ActionModule.java | 2 - .../rest/action/search/RestSuggestAction.java | 97 ------ .../elasticsearch/search/suggest/Suggest.java | 10 +- .../resources/rest-api-spec/api/suggest.json | 44 --- .../rest-api-spec/test/suggest/10_basic.yaml | 16 - .../test/suggest/110_completion.yaml | 314 ------------------ 6 files changed, 1 insertion(+), 482 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/suggest.json delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/suggest/110_completion.yaml diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 1d0f816c52e..a24ed5f8083 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -310,7 +310,6 @@ import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; -import org.elasticsearch.rest.action.search.RestSuggestAction; import org.elasticsearch.threadpool.ThreadPool; import static java.util.Collections.unmodifiableList; @@ -550,7 +549,6 @@ public class ActionModule extends AbstractModule { registerRestHandler(handlers, RestMultiGetAction.class); registerRestHandler(handlers, RestDeleteAction.class); registerRestHandler(handlers, org.elasticsearch.rest.action.document.RestCountAction.class); - registerRestHandler(handlers, RestSuggestAction.class); registerRestHandler(handlers, RestTermVectorsAction.class); registerRestHandler(handlers, RestMultiTermVectorsAction.class); registerRestHandler(handlers, RestBulkAction.class); diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java deleted file mode 100644 index e1b4f945e89..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSuggestAction.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.search; - -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.search.SearchRequestParsers; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.search.suggest.SuggestBuilder; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; - -public class RestSuggestAction extends BaseRestHandler { - - private final SearchRequestParsers searchRequestParsers; - - @Inject - public RestSuggestAction(Settings settings, RestController controller, - SearchRequestParsers searchRequestParsers) { - super(settings); - this.searchRequestParsers = searchRequestParsers; - controller.registerAsDeprecatedHandler(POST, "/_suggest", this, - "[POST /_suggest] is deprecated! Use [POST /_search] instead.", deprecationLogger); - controller.registerAsDeprecatedHandler(GET, "/_suggest", this, - "[GET /_suggest] is deprecated! Use [GET /_search] instead.", deprecationLogger); - controller.registerAsDeprecatedHandler(POST, "/{index}/_suggest", this, - "[POST /{index}/_suggest] is deprecated! Use [POST /{index}/_search] instead.", deprecationLogger); - controller.registerAsDeprecatedHandler(GET, "/{index}/_suggest", this, - "[GET /{index}/_suggest] is deprecated! Use [GET /{index}/_search] instead.", deprecationLogger); - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final SearchRequest searchRequest = new SearchRequest( - Strings.splitStringByCommaToArray(request.param("index")), new SearchSourceBuilder()); - searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); - try (XContentParser parser = request.contentOrSourceParamParser()) { - final QueryParseContext context = new QueryParseContext(searchRequestParsers.queryParsers, parser, parseFieldMatcher); - searchRequest.source().suggest(SuggestBuilder.fromXContent(context, searchRequestParsers.suggesters)); - } - searchRequest.routing(request.param("routing")); - searchRequest.preference(request.param("preference")); - return channel -> client.search(searchRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(SearchResponse response, XContentBuilder builder) throws Exception { - RestStatus restStatus = RestStatus.status(response.getSuccessfulShards(), - response.getTotalShards(), response.getShardFailures()); - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response.getTotalShards(), - response.getSuccessfulShards(), response.getFailedShards(), response.getShardFailures()); - Suggest suggest = response.getSuggest(); - if (suggest != null) { - suggest.toInnerXContent(builder, request); - } - builder.endObject(); - return new BytesRestResponse(restStatus, builder); - } - }); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java index c40b1441000..fc372ee6b2d 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -150,18 +150,10 @@ public class Suggest implements IterableNAME object - */ - public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException { for (Suggestion suggestion : suggestions) { suggestion.toXContent(builder, params); } + builder.endObject(); return builder; } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/suggest.json b/rest-api-spec/src/main/resources/rest-api-spec/api/suggest.json deleted file mode 100644 index 72ed3aa6db1..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/suggest.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "suggest": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-suggesters.html", - "methods": ["POST"], - "url": { - "path": "/_suggest", - "paths": ["/_suggest", "/{index}/_suggest"], - "parts": { - "index": { - "type" : "list", - "description" : "A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices" - } - }, - "params": { - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "preference": { - "type" : "string", - "description" : "Specify the node or shard the operation should be performed on (default: random)" - }, - "routing": { - "type" : "string", - "description" : "Specific routing value" - } - } - }, - "body": { - "description" : "The request definition", - "required" : true - } - } -} \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/10_basic.yaml index 8df87865aae..44ba197f9e0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/10_basic.yaml @@ -24,19 +24,3 @@ setup: - match: {suggest.test_suggestion.1.options.0.text: amsterdam} - match: {suggest.test_suggestion.2.options.0.text: meetup} ---- -"Suggest API should have deprecation warning": - - skip: - features: 'warnings' - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - test_suggestion: - text: "The Amsterdma meetpu" - term: - field: body - - - match: {test_suggestion.1.options.0.text: amsterdam} - - match: {test_suggestion.2.options.0.text: meetup} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/110_completion.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/110_completion.yaml deleted file mode 100644 index dbc0b5381ad..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/110_completion.yaml +++ /dev/null @@ -1,314 +0,0 @@ -# This test creates one huge mapping in the setup -# Every test should use its own field to make sure it works - -setup: - - - do: - indices.create: - index: test - body: - mappings: - test: - "properties": - "suggest_1": - "type" : "completion" - "suggest_2": - "type" : "completion" - "suggest_3": - "type" : "completion" - "suggest_4": - "type" : "completion" - "suggest_5a": - "type" : "completion" - "suggest_5b": - "type" : "completion" - "suggest_6": - "type" : "completion" - title: - type: keyword - ---- -"Simple suggestion should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_1: "bar" - - - do: - index: - index: test - type: test - id: 2 - body: - suggest_1: "baz" - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_1 - - - length: { result: 1 } - - length: { result.0.options: 2 } - ---- -"Simple suggestion array should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_2: ["bar", "foo"] - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "f" - completion: - field: suggest_2 - - - length: { result: 1 } - - length: { result.0.options: 1 } - - match: { result.0.options.0.text: "foo" } - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_2 - - - length: { result: 1 } - - length: { result.0.options: 1 } - - match: { result.0.options.0.text: "bar" } - ---- -"Suggestion entry should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_3: - input: "bar" - weight: 2 - - - do: - index: - index: test - type: test - id: 2 - body: - suggest_3: - input: "baz" - weight: 3 - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_3 - - - length: { result: 1 } - - length: { result.0.options: 2 } - - match: { result.0.options.0.text: "baz" } - - match: { result.0.options.1.text: "bar" } - ---- -"Suggestion entry array should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_4: - - input: "bar" - weight: 3 - - input: "fo" - weight: 3 - - - do: - index: - index: test - type: test - id: 2 - body: - suggest_4: - - input: "baz" - weight: 2 - - input: "foo" - weight: 1 - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_4 - - - length: { result: 1 } - - length: { result.0.options: 2 } - - match: { result.0.options.0.text: "bar" } - - match: { result.0.options.1.text: "baz" } - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "f" - completion: - field: suggest_4 - - - length: { result: 1 } - - length: { result.0.options: 2 } - - match: { result.0.options.0.text: "fo" } - - match: { result.0.options.1.text: "foo" } - ---- -"Multiple Completion fields should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_5a: "bar" - suggest_5b: "baz" - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_5a - - - length: { result: 1 } - - length: { result.0.options: 1 } - - match: { result.0.options.0.text: "bar" } - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_5b - - - length: { result: 1 } - - length: { result.0.options: 1 } - - match: { result.0.options.0.text: "baz" } - ---- -"Suggestions with source should work": - - skip: - features: 'warnings' - - - do: - index: - index: test - type: test - id: 1 - body: - suggest_6: - input: "bar" - weight: 2 - title: "title_bar" - count: 4 - - - do: - index: - index: test - type: test - id: 2 - body: - suggest_6: - input: "baz" - weight: 3 - title: "title_baz" - count: 3 - - - do: - indices.refresh: {} - - - do: - warnings: - - "[POST /_suggest] is deprecated! Use [POST /_search] instead." - suggest: - body: - result: - text: "b" - completion: - field: suggest_6 - - - length: { result: 1 } - - length: { result.0.options: 2 } - - match: { result.0.options.0.text: "baz" } - - match: { result.0.options.0._index: "test" } - - match: { result.0.options.0._type: "test" } - - match: { result.0.options.0._source.title: "title_baz" } - - match: { result.0.options.0._source.count: 3 } - - match: { result.0.options.1.text: "bar" } - - match: { result.0.options.1._index: "test" } - - match: { result.0.options.1._type: "test" } - - match: { result.0.options.1._source.title: "title_bar" } - - match: { result.0.options.1._source.count: 4 } From bb3716794632b4ac91d76434089415eb825f157a Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 16 Dec 2016 10:17:27 -0800 Subject: [PATCH 24/41] Enables the ability to inject serialized json fields into root of document. (#22179) The JSON processor has an optional field called "target_field". If you don't specify target_field then target_field becomes what you specified as "field". There isn't anyway to add the fields to the root of a document. By setting `add_to_root`, now serialized fields will be inserted into the top-level fields of the ingest document. Closes #21898. --- docs/reference/ingest/ingest-node.asciidoc | 1 + .../ingest/common/JsonProcessor.java | 33 ++++++++++++++++--- .../common/JsonProcessorFactoryTests.java | 25 ++++++++++++++ .../ingest/common/JsonProcessorTests.java | 29 ++++++++++++++-- 4 files changed, 81 insertions(+), 7 deletions(-) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index ce92e7c8e74..3cdcfd5d2cd 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1473,6 +1473,7 @@ Converts a JSON string into a structured JSON object. | Name | Required | Default | Description | `field` | yes | - | The field to be parsed | `target_field` | no | `field` | The field to insert the converted structured object into +| `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. |====== [source,js] diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index 024c3aef941..cb734e7bef4 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -28,6 +28,8 @@ import org.elasticsearch.ingest.Processor; import java.util.Map; +import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; + /** * Processor that serializes a string-valued field into a * map of maps. @@ -38,11 +40,13 @@ public final class JsonProcessor extends AbstractProcessor { private final String field; private final String targetField; + private final boolean addToRoot; - JsonProcessor(String tag, String field, String targetField) { + JsonProcessor(String tag, String field, String targetField, boolean addToRoot) { super(tag); this.field = field; this.targetField = targetField; + this.addToRoot = addToRoot; } public String getField() { @@ -53,12 +57,22 @@ public final class JsonProcessor extends AbstractProcessor { return targetField; } + boolean isAddToRoot() { + return addToRoot; + } + @Override public void execute(IngestDocument document) throws Exception { String stringValue = document.getFieldValue(field, String.class); try { Map mapValue = JsonXContent.jsonXContent.createParser(stringValue).map(); - document.setFieldValue(targetField, mapValue); + if (addToRoot) { + for (Map.Entry entry : mapValue.entrySet()) { + document.setFieldValue(entry.getKey(), entry.getValue()); + } + } else { + document.setFieldValue(targetField, mapValue); + } } catch (JsonParseException e) { throw new IllegalArgumentException(e); } @@ -74,8 +88,19 @@ public final class JsonProcessor extends AbstractProcessor { public JsonProcessor create(Map registry, String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); - String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field); - return new JsonProcessor(processorTag, field, targetField); + String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); + boolean addToRoot = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "add_to_root", false); + + if (addToRoot && targetField != null) { + throw newConfigurationException(TYPE, processorTag, "target_field", + "Cannot set a target field while also setting `add_to_root` to true"); + } + + if (targetField == null) { + targetField = field; + } + + return new JsonProcessor(processorTag, field, targetField, addToRoot); } } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java index 6b935b8795c..456b31f8720 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java @@ -48,6 +48,19 @@ public class JsonProcessorFactoryTests extends ESTestCase { assertThat(jsonProcessor.getTargetField(), equalTo(randomTargetField)); } + public void testCreateWithAddToRoot() throws Exception { + String processorTag = randomAsciiOfLength(10); + String randomField = randomAsciiOfLength(10); + Map config = new HashMap<>(); + config.put("field", randomField); + config.put("add_to_root", true); + JsonProcessor jsonProcessor = FACTORY.create(null, processorTag, config); + assertThat(jsonProcessor.getTag(), equalTo(processorTag)); + assertThat(jsonProcessor.getField(), equalTo(randomField)); + assertThat(jsonProcessor.getTargetField(), equalTo(randomField)); + assertTrue(jsonProcessor.isAddToRoot()); + } + public void testCreateWithDefaultTarget() throws Exception { String processorTag = randomAsciiOfLength(10); String randomField = randomAsciiOfLength(10); @@ -66,4 +79,16 @@ public class JsonProcessorFactoryTests extends ESTestCase { () -> FACTORY.create(null, processorTag, config)); assertThat(exception.getMessage(), equalTo("[field] required property is missing")); } + + public void testCreateWithBothTargetFieldAndAddToRoot() throws Exception { + String randomField = randomAsciiOfLength(10); + String randomTargetField = randomAsciiOfLength(5); + Map config = new HashMap<>(); + config.put("field", randomField); + config.put("target_field", randomTargetField); + config.put("add_to_root", true); + ElasticsearchException exception = expectThrows(ElasticsearchParseException.class, + () -> FACTORY.create(null, randomAsciiOfLength(10), config)); + assertThat(exception.getMessage(), equalTo("[target_field] Cannot set a target field while also setting `add_to_root` to true")); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java index c62ebbb12ab..2b2c521417c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java @@ -39,7 +39,7 @@ public class JsonProcessorTests extends ESTestCase { String processorTag = randomAsciiOfLength(3); String randomField = randomAsciiOfLength(3); String randomTargetField = randomAsciiOfLength(2); - JsonProcessor jsonProcessor = new JsonProcessor(processorTag, randomField, randomTargetField); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, randomField, randomTargetField, false); Map document = new HashMap<>(); Map randomJsonMap = RandomDocumentPicks.randomSource(random()); @@ -54,7 +54,7 @@ public class JsonProcessorTests extends ESTestCase { } public void testInvalidJson() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field"); + JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field", false); Map document = new HashMap<>(); document.put("field", "invalid json"); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -66,11 +66,34 @@ public class JsonProcessorTests extends ESTestCase { } public void testFieldMissing() { - JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field"); + JsonProcessor jsonProcessor = new JsonProcessor("tag", "field", "target_field", false); Map document = new HashMap<>(); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); Exception exception = expectThrows(IllegalArgumentException.class, () -> jsonProcessor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [field] not present as part of path [field]")); } + + @SuppressWarnings("unchecked") + public void testAddToRoot() throws Exception { + String processorTag = randomAsciiOfLength(3); + String randomTargetField = randomAsciiOfLength(2); + JsonProcessor jsonProcessor = new JsonProcessor(processorTag, "a", randomTargetField, true); + Map document = new HashMap<>(); + + String json = "{\"a\": 1, \"b\": 2}"; + document.put("a", json); + document.put("c", "see"); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + jsonProcessor.execute(ingestDocument); + + Map expected = new HashMap<>(); + expected.put("a", 1); + expected.put("b", 2); + expected.put("c", "see"); + IngestDocument expectedIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), expected); + + assertIngestDocument(ingestDocument, expectedIngestDocument); + } } From 2265be69d244b04b9c36dde6c6f66aa295691c67 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 16 Dec 2016 19:33:12 +0100 Subject: [PATCH 25/41] Deprecate XContentType auto detection methods in XContentFactory (#22181) With recent changes to our parsing code we have drastically reduced the places where we auto-detect the content type from the input. The usage of these methods spread in our codebase for no reason, given that in most of the cases we know the content type upfront and we don't need any auto-detection mechanism. Deprecating these methods is a way to try and make sure that these methods are carefully used, and hopefully not introduced in newly written code. We have yet to fix the REST layer to read the Content-Type header, which is the long term solution, but for now we just want to make sure that the usage of these methods doesn't spread any further. Relates to #19388 --- .../common/xcontent/XContentFactory.java | 53 +++++++++++++++++-- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index a5350e3c662..dc9d1c493a3 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -141,7 +141,12 @@ public class XContentFactory { /** * Guesses the content type based on the provided char sequence. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContentType xContentType(CharSequence content) { int length = content.length() < GUESS_HEADER_LENGTH ? content.length() : GUESS_HEADER_LENGTH; if (length == 0) { @@ -174,8 +179,13 @@ public class XContentFactory { } /** - * Guesses the content (type) based on the provided char sequence. + * Guesses the content (type) based on the provided char sequence and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContent xContent(CharSequence content) { XContentType type = xContentType(content); if (type == null) { @@ -185,15 +195,24 @@ public class XContentFactory { } /** - * Guesses the content type based on the provided bytes. + * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContent xContent(byte[] data) { return xContent(data, 0, data.length); } /** - * Guesses the content type based on the provided bytes. + * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} + * + * @deprecated guessing the content type should not be needed ideally. We should rather know the content type upfront or read it + * from headers. Till we fixed the REST layer to read the Content-Type header, that should be the only place where guessing is needed. */ + @Deprecated public static XContent xContent(byte[] data, int offset, int length) { XContentType type = xContentType(data, offset, length); if (type == null) { @@ -204,14 +223,24 @@ public class XContentFactory { /** * Guesses the content type based on the provided bytes. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContentType xContentType(byte[] data) { return xContentType(data, 0, data.length); } /** * Guesses the content type based on the provided input stream without consuming it. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContentType xContentType(InputStream si) throws IOException { if (si.markSupported() == false) { throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass()); @@ -228,11 +257,24 @@ public class XContentFactory { /** * Guesses the content type based on the provided bytes. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContentType xContentType(byte[] data, int offset, int length) { return xContentType(new BytesArray(data, offset, length)); } + /** + * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated public static XContent xContent(BytesReference bytes) { XContentType type = xContentType(bytes); if (type == null) { @@ -243,7 +285,12 @@ public class XContentFactory { /** * Guesses the content type based on the provided bytes. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. */ + @Deprecated public static XContentType xContentType(BytesReference bytes) { int length = bytes.length(); if (length == 0) { From 30806af6bdde6ef0cf26a633b822de769161d2c0 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 16 Dec 2016 18:20:11 -0500 Subject: [PATCH 26/41] Rename bootstrap.seccomp to bootstrap.system_call_filter We try to install a system call filter on various operating systems (Linux, macOS, BSD, Solaris, and Windows) but the setting (bootstrap.seccomp) to control this is named after the Linux implementation (seccomp). This commit replaces this setting with bootstrap.system_call_filter. For backwards compatibility reasons, we fallback to bootstrap.seccomp and log a deprecation message if bootstrap.seccomp is set. We intend to remove this fallback in 6.0.0. Note that now is the time to make this change it's likely that most users are not making this setting anyway as prior to version 5.2.0 (currently unreleased) it was not necessary to configure anything to enable a node to start up if the system call filter failed to install (we marched on anyway) but starting in 5.2.0 it will be necessary in this case. Relates #22226 --- .../main/java/org/elasticsearch/bootstrap/Bootstrap.java | 6 +++++- .../java/org/elasticsearch/bootstrap/BootstrapChecks.java | 6 +++--- .../java/org/elasticsearch/bootstrap/BootstrapSettings.java | 4 ++-- .../org/elasticsearch/common/settings/ClusterSettings.java | 2 +- .../org/elasticsearch/bootstrap/BootstrapCheckTests.java | 6 +++--- .../org/elasticsearch/bootstrap/BootstrapSettingsTests.java | 2 +- docs/reference/setup/bootstrap-checks.asciidoc | 2 +- 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index aadb0c3593a..b440ece38cd 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -30,11 +30,13 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.PidFile; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.logging.Loggers; @@ -56,7 +58,9 @@ import java.net.URISyntaxException; import java.nio.file.Path; import java.security.NoSuchAlgorithmException; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.concurrent.CountDownLatch; /** @@ -177,7 +181,7 @@ final class Bootstrap { initializeNatives( environment.tmpFile(), BootstrapSettings.MEMORY_LOCK_SETTING.get(settings), - BootstrapSettings.SECCOMP_SETTING.get(settings), + BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings), BootstrapSettings.CTRLHANDLER_SETTING.get(settings)); // initialize probes before the security manager is installed diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index bd62db6a9d8..930c6afdc90 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -166,7 +166,7 @@ final class BootstrapChecks { } checks.add(new ClientJvmCheck()); checks.add(new UseSerialGCCheck()); - checks.add(new SystemCallFilterCheck(BootstrapSettings.SECCOMP_SETTING.get(settings))); + checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings))); checks.add(new OnErrorCheck()); checks.add(new OnOutOfMemoryErrorCheck()); checks.add(new G1GCCheck()); @@ -521,7 +521,7 @@ final class BootstrapChecks { "OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError", onError(), - BootstrapSettings.SECCOMP_SETTING.getKey()); + BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey()); } } @@ -546,7 +546,7 @@ final class BootstrapChecks { "OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError", onOutOfMemoryError(), - BootstrapSettings.SECCOMP_SETTING.getKey()); + BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey()); } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index e8015d83af3..fce50ef2e6f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -33,8 +33,8 @@ public final class BootstrapSettings { public static final Setting MEMORY_LOCK_SETTING = Setting.boolSetting("bootstrap.memory_lock", false, Property.NodeScope); - public static final Setting SECCOMP_SETTING = - Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope); + public static final Setting SYSTEM_CALL_FILTER_SETTING = + Setting.boolSetting("bootstrap.system_call_filter", true, Property.NodeScope); public static final Setting CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope); diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index e00b4bc44f1..c9106b8cdba 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -390,7 +390,7 @@ public final class ClusterSettings extends AbstractScopedSettings { PluginsService.MANDATORY_SETTING, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING, BootstrapSettings.MEMORY_LOCK_SETTING, - BootstrapSettings.SECCOMP_SETTING, + BootstrapSettings.SYSTEM_CALL_FILTER_SETTING, BootstrapSettings.CTRLHANDLER_SETTING, IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING, IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING, diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index 95d5afcb402..c1f97438894 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -492,8 +492,8 @@ public class BootstrapCheckTests extends ESTestCase { e -> assertThat( e.getMessage(), containsString( - "OnError [" + command + "] requires forking but is prevented by system call filters ([bootstrap.seccomp=true]);" - + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError"))); + "OnError [" + command + "] requires forking but is prevented by system call filters " + + "([bootstrap.system_call_filter=true]); upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError"))); } public void testOnOutOfMemoryErrorCheck() throws NodeValidationException { @@ -521,7 +521,7 @@ public class BootstrapCheckTests extends ESTestCase { e.getMessage(), containsString( "OnOutOfMemoryError [" + command + "]" - + " requires forking but is prevented by system call filters ([bootstrap.seccomp=true]);" + + " requires forking but is prevented by system call filters ([bootstrap.system_call_filter=true]);" + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError"))); } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java index 128c82e9533..fb3d3628dd1 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapSettingsTests.java @@ -27,7 +27,7 @@ public class BootstrapSettingsTests extends ESTestCase { public void testDefaultSettings() { assertTrue(BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(Settings.EMPTY)); assertFalse(BootstrapSettings.MEMORY_LOCK_SETTING.get(Settings.EMPTY)); - assertTrue(BootstrapSettings.SECCOMP_SETTING.get(Settings.EMPTY)); + assertTrue(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(Settings.EMPTY)); assertTrue(BootstrapSettings.CTRLHANDLER_SETTING.get(Settings.EMPTY)); } diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 834cb7b2f43..f766e2dacb2 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -156,7 +156,7 @@ The system call filter check ensures that if system call filters are enabled, then they were successfully installed. To pass the system call filter check you must either fix any configuration errors on your system that prevented system call filters from installing (check your logs), or *at your own risk* disable -system call filters by setting `bootstrap.seccomp` to `false`. +system call filters by setting `bootstrap.system_call_filter` to `false`. === OnError and OnOutOfMemoryError checks From f7d43132b2d050e795508e9218e68229843ff386 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 16 Dec 2016 18:30:19 -0500 Subject: [PATCH 27/41] Refer to system call filter instead of seccomp Today in the codebase we refer to seccomp everywhere instead of system call filter even if we are not specifically referring to Linux. This commit is a purely mechanical change to refer to system call filter where appropriate instead of the general seccomp, and only leaves seccomp in place when actually referring to the Linux implementation. Relates #22243 --- .../elasticsearch/bootstrap/Bootstrap.java | 8 +-- .../bootstrap/BootstrapChecks.java | 12 ++-- .../bootstrap/BootstrapInfo.java | 20 +++---- .../elasticsearch/bootstrap/JNANatives.java | 14 ++--- .../org/elasticsearch/bootstrap/Natives.java | 10 ++-- .../org/elasticsearch/bootstrap/Spawner.java | 2 +- .../{Seccomp.java => SystemCallFilter.java} | 7 +-- .../bootstrap/BootstrapCheckTests.java | 57 +++++++++---------- .../elasticsearch/bootstrap/SpawnerTests.java | 3 +- ...pTests.java => SystemCallFilterTests.java} | 20 +++---- .../bootstrap/SpawnerNoBootstrapTests.java | 5 +- 11 files changed, 78 insertions(+), 80 deletions(-) rename core/src/main/java/org/elasticsearch/bootstrap/{Seccomp.java => SystemCallFilter.java} (99%) rename qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/{SeccompTests.java => SystemCallFilterTests.java} (89%) diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index b440ece38cd..7cac8415e6e 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -97,7 +97,7 @@ final class Bootstrap { } /** initialize native resources */ - public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) { + public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean systemCallFilter, boolean ctrlHandler) { final Logger logger = Loggers.getLogger(Bootstrap.class); // check if the user is running as root, and bail @@ -105,9 +105,9 @@ final class Bootstrap { throw new RuntimeException("can not run elasticsearch as root"); } - // enable secure computing mode - if (seccomp) { - Natives.trySeccomp(tmpFile); + // enable system call filter + if (systemCallFilter) { + Natives.tryInstallSystemCallFilter(tmpFile); } // mlockall if requested diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 930c6afdc90..cb1b93aef39 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -463,12 +463,12 @@ final class BootstrapChecks { @Override public boolean check() { - return areSystemCallFiltersEnabled && !isSeccompInstalled(); + return areSystemCallFiltersEnabled && !isSystemCallFilterInstalled(); } // visible for testing - boolean isSeccompInstalled() { - return Natives.isSeccompInstalled(); + boolean isSystemCallFilterInstalled() { + return Natives.isSystemCallFilterInstalled(); } @Override @@ -483,12 +483,12 @@ final class BootstrapChecks { @Override public boolean check() { - return isSeccompInstalled() && mightFork(); + return isSystemCallFilterInstalled() && mightFork(); } // visible for testing - boolean isSeccompInstalled() { - return Natives.isSeccompInstalled(); + boolean isSystemCallFilterInstalled() { + return Natives.isSystemCallFilterInstalled(); } // visible for testing diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index 791836bf8a4..3ff6639de9a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -24,16 +24,16 @@ import org.elasticsearch.common.SuppressForbidden; import java.util.Dictionary; import java.util.Enumeration; -/** - * Exposes system startup information +/** + * Exposes system startup information */ @SuppressForbidden(reason = "exposes read-only view of system properties") public final class BootstrapInfo { /** no instantiation */ private BootstrapInfo() {} - - /** + + /** * Returns true if we successfully loaded native libraries. *

* If this returns false, then native operations such as locking @@ -42,19 +42,19 @@ public final class BootstrapInfo { public static boolean isNativesAvailable() { return Natives.JNA_AVAILABLE; } - - /** + + /** * Returns true if we were able to lock the process's address space. */ public static boolean isMemoryLocked() { return Natives.isMemoryLocked(); } - + /** - * Returns true if secure computing mode is enabled (supported systems only) + * Returns true if system call filter is installed (supported systems only) */ - public static boolean isSeccompInstalled() { - return Natives.isSeccompInstalled(); + public static boolean isSystemCallFilterInstalled() { + return Natives.isSystemCallFilterInstalled(); } /** diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index 5f3e357ff5f..d4e11af71ac 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -43,11 +43,11 @@ class JNANatives { // Set to true, in case native mlockall call was successful static boolean LOCAL_MLOCKALL = false; - // Set to true, in case native seccomp call was successful - static boolean LOCAL_SECCOMP = false; + // Set to true, in case native system call filter install was successful + static boolean LOCAL_SYSTEM_CALL_FILTER = false; // Set to true, in case policy can be applied to all threads of the process (even existing ones) // otherwise they are only inherited for new threads (ES app threads) - static boolean LOCAL_SECCOMP_ALL = false; + static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false; // set to the maximum number of threads that can be created for // the user ID that owns the running Elasticsearch process static long MAX_NUMBER_OF_THREADS = -1; @@ -210,12 +210,12 @@ class JNANatives { } } - static void trySeccomp(Path tmpFile) { + static void tryInstallSystemCallFilter(Path tmpFile) { try { - int ret = Seccomp.init(tmpFile); - LOCAL_SECCOMP = true; + int ret = SystemCallFilter.init(tmpFile); + LOCAL_SYSTEM_CALL_FILTER = true; if (ret == 1) { - LOCAL_SECCOMP_ALL = true; + LOCAL_SYSTEM_CALL_FILTER_ALL = true; } } catch (Exception e) { // this is likely to happen unless the kernel is newish, its a best effort at the moment diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java index 9fad34e329f..ad6ec985ca1 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Natives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Natives.java @@ -91,12 +91,12 @@ final class Natives { return JNANatives.LOCAL_MLOCKALL; } - static void trySeccomp(Path tmpFile) { + static void tryInstallSystemCallFilter(Path tmpFile) { if (!JNA_AVAILABLE) { - logger.warn("cannot install syscall filters because JNA is not available"); + logger.warn("cannot install system call filter because JNA is not available"); return; } - JNANatives.trySeccomp(tmpFile); + JNANatives.tryInstallSystemCallFilter(tmpFile); } static void trySetMaxNumberOfThreads() { @@ -115,10 +115,10 @@ final class Natives { JNANatives.trySetMaxSizeVirtualMemory(); } - static boolean isSeccompInstalled() { + static boolean isSystemCallFilterInstalled() { if (!JNA_AVAILABLE) { return false; } - return JNANatives.LOCAL_SECCOMP; + return JNANatives.LOCAL_SYSTEM_CALL_FILTER; } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java index a518f32bb40..44cf2d2b0aa 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -34,7 +34,7 @@ import java.util.List; import java.util.Locale; /** - * Spawns native plugin controller processes if present. Will only work prior to seccomp being set up. + * Spawns native plugin controller processes if present. Will only work prior to a system call filter being installed. */ final class Spawner implements Closeable { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java similarity index 99% rename from core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java rename to core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java index a510e964b7e..38951c510db 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java @@ -43,8 +43,7 @@ import java.util.List; import java.util.Map; /** - * Installs a limited form of secure computing mode, - * to filters system calls to block process execution. + * Installs a system call filter to block process execution. *

* This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows. *

@@ -91,8 +90,8 @@ import java.util.Map; * https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html */ // not an example of how to write code!!! -final class Seccomp { - private static final Logger logger = Loggers.getLogger(Seccomp.class); +final class SystemCallFilter { + private static final Logger logger = Loggers.getLogger(SystemCallFilter.class); // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java index c1f97438894..bc810254920 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java @@ -409,11 +409,11 @@ public class BootstrapCheckTests extends ESTestCase { } public void testSystemCallFilterCheck() throws NodeValidationException { - final AtomicBoolean isSecompInstalled = new AtomicBoolean(); + final AtomicBoolean isSystemCallFilterInstalled = new AtomicBoolean(); final BootstrapChecks.SystemCallFilterCheck systemCallFilterEnabledCheck = new BootstrapChecks.SystemCallFilterCheck(true) { @Override - boolean isSeccompInstalled() { - return isSecompInstalled.get(); + boolean isSystemCallFilterInstalled() { + return isSystemCallFilterInstalled.get(); } }; @@ -425,28 +425,28 @@ public class BootstrapCheckTests extends ESTestCase { containsString("system call filters failed to install; " + "check the logs and fix your configuration or disable system call filters at your own risk")); - isSecompInstalled.set(true); + isSystemCallFilterInstalled.set(true); BootstrapChecks.check(true, Collections.singletonList(systemCallFilterEnabledCheck), "testSystemCallFilterCheck"); final BootstrapChecks.SystemCallFilterCheck systemCallFilterNotEnabledCheck = new BootstrapChecks.SystemCallFilterCheck(false) { @Override - boolean isSeccompInstalled() { - return isSecompInstalled.get(); + boolean isSystemCallFilterInstalled() { + return isSystemCallFilterInstalled.get(); } }; - isSecompInstalled.set(false); + isSystemCallFilterInstalled.set(false); BootstrapChecks.check(true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); - isSecompInstalled.set(true); + isSystemCallFilterInstalled.set(true); BootstrapChecks.check(true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck"); } public void testMightForkCheck() throws NodeValidationException { - final AtomicBoolean isSeccompInstalled = new AtomicBoolean(); + final AtomicBoolean isSystemCallFilterInstalled = new AtomicBoolean(); final AtomicBoolean mightFork = new AtomicBoolean(); final BootstrapChecks.MightForkCheck check = new BootstrapChecks.MightForkCheck() { @Override - boolean isSeccompInstalled() { - return isSeccompInstalled.get(); + boolean isSystemCallFilterInstalled() { + return isSystemCallFilterInstalled.get(); } @Override @@ -462,19 +462,19 @@ public class BootstrapCheckTests extends ESTestCase { runMightForkTest( check, - isSeccompInstalled, + isSystemCallFilterInstalled, () -> mightFork.set(false), () -> mightFork.set(true), e -> assertThat(e.getMessage(), containsString("error"))); } public void testOnErrorCheck() throws NodeValidationException { - final AtomicBoolean isSeccompInstalled = new AtomicBoolean(); + final AtomicBoolean isSystemCallFilterInstalled = new AtomicBoolean(); final AtomicReference onError = new AtomicReference<>(); final BootstrapChecks.MightForkCheck check = new BootstrapChecks.OnErrorCheck() { @Override - boolean isSeccompInstalled() { - return isSeccompInstalled.get(); + boolean isSystemCallFilterInstalled() { + return isSystemCallFilterInstalled.get(); } @Override @@ -486,7 +486,7 @@ public class BootstrapCheckTests extends ESTestCase { final String command = randomAsciiOfLength(16); runMightForkTest( check, - isSeccompInstalled, + isSystemCallFilterInstalled, () -> onError.set(randomBoolean() ? "" : null), () -> onError.set(command), e -> assertThat( @@ -497,12 +497,12 @@ public class BootstrapCheckTests extends ESTestCase { } public void testOnOutOfMemoryErrorCheck() throws NodeValidationException { - final AtomicBoolean isSeccompInstalled = new AtomicBoolean(); + final AtomicBoolean isSystemCallFilterInstalled = new AtomicBoolean(); final AtomicReference onOutOfMemoryError = new AtomicReference<>(); final BootstrapChecks.MightForkCheck check = new BootstrapChecks.OnOutOfMemoryErrorCheck() { @Override - boolean isSeccompInstalled() { - return isSeccompInstalled.get(); + boolean isSystemCallFilterInstalled() { + return isSystemCallFilterInstalled.get(); } @Override @@ -514,7 +514,7 @@ public class BootstrapCheckTests extends ESTestCase { final String command = randomAsciiOfLength(16); runMightForkTest( check, - isSeccompInstalled, + isSystemCallFilterInstalled, () -> onOutOfMemoryError.set(randomBoolean() ? "" : null), () -> onOutOfMemoryError.set(command), e -> assertThat( @@ -527,15 +527,15 @@ public class BootstrapCheckTests extends ESTestCase { private void runMightForkTest( final BootstrapChecks.MightForkCheck check, - final AtomicBoolean isSeccompInstalled, + final AtomicBoolean isSystemCallFilterInstalled, final Runnable disableMightFork, final Runnable enableMightFork, final Consumer consumer) throws NodeValidationException { final String methodName = Thread.currentThread().getStackTrace()[2].getMethodName(); - // if seccomp is disabled, nothing should happen - isSeccompInstalled.set(false); + // if system call filter is disabled, nothing should happen + isSystemCallFilterInstalled.set(false); if (randomBoolean()) { disableMightFork.run(); } else { @@ -543,16 +543,15 @@ public class BootstrapCheckTests extends ESTestCase { } BootstrapChecks.check(true, Collections.singletonList(check), methodName); - // if seccomp is enabled, but we will not fork, nothing should + // if system call filter is enabled, but we will not fork, nothing should // happen - isSeccompInstalled.set(true); + isSystemCallFilterInstalled.set(true); disableMightFork.run(); BootstrapChecks.check(true, Collections.singletonList(check), methodName); - // if seccomp is enabled, and we might fork, the check should - // be enforced, regardless of bootstrap checks being enabled or - // not - isSeccompInstalled.set(true); + // if system call filter is enabled, and we might fork, the check should be enforced, regardless of bootstrap checks being enabled + // or not + isSystemCallFilterInstalled.set(true); enableMightFork.run(); final NodeValidationException e = expectThrows( diff --git a/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java b/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java index 680bd4f5562..58c112ba96d 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Locale; /** - * Doesn't actually test spawning a process, as seccomp is installed before tests run and forbids it. + * Doesn't actually test spawning a process, as a system call filter is installed before tests run and forbids it. */ public class SpawnerTests extends ESTestCase { @@ -48,4 +48,5 @@ public class SpawnerTests extends ESTestCase { assertEquals("windows-x86_64", Spawner.makePlatformName("Windows 8.1", "amd64")); assertEquals("sunos-x86_64", Spawner.makePlatformName("SunOS", "amd64")); } + } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java similarity index 89% rename from qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java rename to qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java index d028dfd573a..244f98356d9 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SystemCallFilterTests.java @@ -22,30 +22,30 @@ package org.elasticsearch.bootstrap; import org.apache.lucene.util.Constants; import org.elasticsearch.test.ESTestCase; -/** Simple tests seccomp filter is working. */ -public class SeccompTests extends ESTestCase { - +/** Simple tests system call filter is working. */ +public class SystemCallFilterTests extends ESTestCase { + /** command to try to run in tests */ static final String EXECUTABLE = Constants.WINDOWS ? "calc" : "ls"; @Override public void setUp() throws Exception { super.setUp(); - assumeTrue("requires seccomp filter installation", Natives.isSeccompInstalled()); + assumeTrue("requires system call filter installation", Natives.isSystemCallFilterInstalled()); // otherwise security manager will block the execution, no fun assumeTrue("cannot test with security manager enabled", System.getSecurityManager() == null); // otherwise, since we don't have TSYNC support, rules are not applied to the test thread // (randomizedrunner class initialization happens in its own thread, after the test thread is created) // instead we just forcefully run it for the test thread here. - if (!JNANatives.LOCAL_SECCOMP_ALL) { + if (!JNANatives.LOCAL_SYSTEM_CALL_FILTER_ALL) { try { - Seccomp.init(createTempDir()); + SystemCallFilter.init(createTempDir()); } catch (Exception e) { - throw new RuntimeException("unable to forcefully apply seccomp to test thread", e); + throw new RuntimeException("unable to forcefully apply system call filter to test thread", e); } } } - + public void testNoExecution() throws Exception { try { Runtime.getRuntime().exec(EXECUTABLE); @@ -63,11 +63,11 @@ public class SeccompTests extends ESTestCase { at java.lang.UNIXProcess.(UNIXProcess.java:248) at java.lang.ProcessImpl.start(ProcessImpl.java:134) at java.lang.ProcessBuilder.start(ProcessBuilder.java:1029) - ... + ... */ } } - + // make sure thread inherits this too (its documented that way) public void testNoExecutionFromThread() throws Exception { Thread t = new Thread() { diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index d1556d02758..743d2408b9d 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -39,9 +39,8 @@ import java.util.concurrent.TimeUnit; /** * Create a simple "daemon controller", put it in the right place and check that it runs. * - * Extends LuceneTestCase rather than ESTestCase as ESTestCase installs seccomp, and that - * prevents the Spawner class doing its job. Also needs to run in a separate JVM to other - * tests that extend ESTestCase for the same reason. + * Extends LuceneTestCase rather than ESTestCase as ESTestCase installs a system call filter, and that prevents the Spawner class doing its + * job. Also needs to run in a separate JVM to other tests that extend ESTestCase for the same reason. */ public class SpawnerNoBootstrapTests extends LuceneTestCase { From 9e5cedae2385750e3f01b85e9bc83a25a865195c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 16 Dec 2016 22:18:56 -0800 Subject: [PATCH 28/41] Fix line lengths in renamed seccomp file --- .../resources/checkstyle_suppressions.xml | 1 - .../bootstrap/SystemCallFilter.java | 27 ++++++++++++------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 4bbd52affdd..eba6dbfc819 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -216,7 +216,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java index 38951c510db..e6c5b2e6dd1 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/SystemCallFilter.java @@ -268,7 +268,8 @@ final class SystemCallFilter { // we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug if (linux_libc == null) { - throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"); + throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ " + + "with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"); } // pure paranoia: @@ -318,7 +319,8 @@ final class SystemCallFilter { switch (errno) { case ENOSYS: break; // ok case EINVAL: break; // ok - default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno)); + default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + + JNACLibrary.strerror(errno)); } } @@ -345,7 +347,8 @@ final class SystemCallFilter { int errno = Native.getLastError(); if (errno == EINVAL) { // friendly error, this will be the typical case for an old kernel - throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"); + throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with" + + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"); } else { throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno)); } @@ -357,7 +360,8 @@ final class SystemCallFilter { default: int errno = Native.getLastError(); if (errno == EINVAL) { - throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"); + throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," + + " CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"); } else { throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno)); } @@ -367,7 +371,8 @@ final class SystemCallFilter { int errno = Native.getLastError(); switch (errno) { case EFAULT: break; // available - case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"); + case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not" + + " compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed"); default: throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno)); } } @@ -379,10 +384,12 @@ final class SystemCallFilter { // check it worked if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { - throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError())); + throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + + JNACLibrary.strerror(Native.getLastError())); } - // BPF installed to check arch, limit, then syscall. See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. + // BPF installed to check arch, limit, then syscall. + // See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details. SockFilter insns[] = { /* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), // /* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail; @@ -407,7 +414,8 @@ final class SystemCallFilter { method = 0; int errno1 = Native.getLastError(); if (logger.isDebugEnabled()) { - logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1)); + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", + JNACLibrary.strerror(errno1)); } if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { int errno2 = Native.getLastError(); @@ -418,7 +426,8 @@ final class SystemCallFilter { // now check that the filter was really installed, we should be in filter mode. if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) { - throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + JNACLibrary.strerror(Native.getLastError())); + throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + + JNACLibrary.strerror(Native.getLastError())); } logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app" ); From 0b338bf52394e49c6f4e653e8a6b7479dc71b561 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sat, 17 Dec 2016 11:45:55 +0100 Subject: [PATCH 29/41] Cleanup random stats serialization code (#22223) Some of our stats serialization code duplicates complicated seriazliation logic or could use existing building blocks from StreamOutput/Input. This commit cleans up some of the serialization code. --- .../admin/indices/stats/CommonStats.java | 199 +++--------------- .../indices/stats/IndicesStatsResponse.java | 10 +- .../admin/indices/stats/ShardStats.java | 3 +- .../node/TransportBroadcastByNodeAction.java | 31 +-- .../common/FieldMemoryStats.java | 132 ++++++++++++ .../common/io/stream/StreamOutput.java | 30 ++- .../index/cache/query/QueryCacheStats.java | 7 - .../index/engine/CommitStats.java | 7 - .../index/engine/SegmentsStats.java | 11 +- .../index/fielddata/FieldDataStats.java | 93 +++----- .../index/fielddata/ShardFieldData.java | 4 +- .../elasticsearch/index/flush/FlushStats.java | 6 - .../org/elasticsearch/index/get/GetStats.java | 6 - .../elasticsearch/index/merge/MergeStats.java | 6 - .../index/refresh/RefreshStats.java | 6 - .../index/search/stats/SearchStats.java | 34 +-- .../elasticsearch/index/shard/DocsStats.java | 6 - .../index/shard/IndexingStats.java | 30 +-- .../elasticsearch/index/store/StoreStats.java | 6 - .../index/warmer/WarmerStats.java | 6 - .../org/elasticsearch/ingest/IngestStats.java | 2 +- .../completion/CompletionFieldStats.java | 3 +- .../suggest/completion/CompletionStats.java | 83 ++------ .../common/FieldMemoryStatsTests.java | 102 +++++++++ .../common/io/stream/BytesStreamsTests.java | 16 ++ .../index/fielddata/FieldDataStatsTests.java | 45 ++++ .../suggest/stats/CompletionsStatsTests.java | 45 ++++ .../indices/stats/IndexStatsIT.java | 33 ++- .../suggest/CompletionSuggestSearchIT.java | 3 +- 29 files changed, 488 insertions(+), 477 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/FieldMemoryStats.java create mode 100644 core/src/test/java/org/elasticsearch/common/FieldMemoryStatsTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/fielddata/FieldDataStatsTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/suggest/stats/CompletionsStatsTests.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index ce90858f49a..b5e91ddf2a1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -46,6 +46,9 @@ import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Stream; public class CommonStats implements Writeable, ToXContent { @@ -225,45 +228,19 @@ public class CommonStats implements Writeable, ToXContent { } public CommonStats(StreamInput in) throws IOException { - if (in.readBoolean()) { - docs = DocsStats.readDocStats(in); - } - if (in.readBoolean()) { - store = StoreStats.readStoreStats(in); - } - if (in.readBoolean()) { - indexing = IndexingStats.readIndexingStats(in); - } - if (in.readBoolean()) { - get = GetStats.readGetStats(in); - } - if (in.readBoolean()) { - search = SearchStats.readSearchStats(in); - } - if (in.readBoolean()) { - merge = MergeStats.readMergeStats(in); - } - if (in.readBoolean()) { - refresh = RefreshStats.readRefreshStats(in); - } - if (in.readBoolean()) { - flush = FlushStats.readFlushStats(in); - } - if (in.readBoolean()) { - warmer = WarmerStats.readWarmerStats(in); - } - if (in.readBoolean()) { - queryCache = QueryCacheStats.readQueryCacheStats(in); - } - if (in.readBoolean()) { - fieldData = FieldDataStats.readFieldDataStats(in); - } - if (in.readBoolean()) { - completion = CompletionStats.readCompletionStats(in); - } - if (in.readBoolean()) { - segments = SegmentsStats.readSegmentsStats(in); - } + docs = in.readOptionalStreamable(DocsStats::new); + store = in.readOptionalStreamable(StoreStats::new); + indexing = in.readOptionalStreamable(IndexingStats::new); + get = in.readOptionalStreamable(GetStats::new); + search = in.readOptionalStreamable(SearchStats::new); + merge = in.readOptionalStreamable(MergeStats::new); + refresh = in.readOptionalStreamable(RefreshStats::new); + flush = in.readOptionalStreamable(FlushStats::new); + warmer = in.readOptionalStreamable(WarmerStats::new); + queryCache = in.readOptionalStreamable(QueryCacheStats::new); + fieldData = in.readOptionalStreamable(FieldDataStats::new); + completion = in.readOptionalStreamable(CompletionStats::new); + segments = in.readOptionalStreamable(SegmentsStats::new); translog = in.readOptionalStreamable(TranslogStats::new); requestCache = in.readOptionalStreamable(RequestCacheStats::new); recoveryStats = in.readOptionalStreamable(RecoveryStats::new); @@ -271,84 +248,19 @@ public class CommonStats implements Writeable, ToXContent { @Override public void writeTo(StreamOutput out) throws IOException { - if (docs == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - docs.writeTo(out); - } - if (store == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - store.writeTo(out); - } - if (indexing == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - indexing.writeTo(out); - } - if (get == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - get.writeTo(out); - } - if (search == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - search.writeTo(out); - } - if (merge == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - merge.writeTo(out); - } - if (refresh == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - refresh.writeTo(out); - } - if (flush == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - flush.writeTo(out); - } - if (warmer == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - warmer.writeTo(out); - } - if (queryCache == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - queryCache.writeTo(out); - } - if (fieldData == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - fieldData.writeTo(out); - } - if (completion == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - completion.writeTo(out); - } - if (segments == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - segments.writeTo(out); - } + out.writeOptionalStreamable(docs); + out.writeOptionalStreamable(store); + out.writeOptionalStreamable(indexing); + out.writeOptionalStreamable(get); + out.writeOptionalStreamable(search); + out.writeOptionalStreamable(merge); + out.writeOptionalStreamable(refresh); + out.writeOptionalStreamable(flush); + out.writeOptionalStreamable(warmer); + out.writeOptionalStreamable(queryCache); + out.writeOptionalStreamable(fieldData); + out.writeOptionalStreamable(completion); + out.writeOptionalStreamable(segments); out.writeOptionalStreamable(translog); out.writeOptionalStreamable(requestCache); out.writeOptionalStreamable(recoveryStats); @@ -590,53 +502,12 @@ public class CommonStats implements Writeable, ToXContent { // note, requires a wrapping object @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (docs != null) { - docs.toXContent(builder, params); - } - if (store != null) { - store.toXContent(builder, params); - } - if (indexing != null) { - indexing.toXContent(builder, params); - } - if (get != null) { - get.toXContent(builder, params); - } - if (search != null) { - search.toXContent(builder, params); - } - if (merge != null) { - merge.toXContent(builder, params); - } - if (refresh != null) { - refresh.toXContent(builder, params); - } - if (flush != null) { - flush.toXContent(builder, params); - } - if (warmer != null) { - warmer.toXContent(builder, params); - } - if (queryCache != null) { - queryCache.toXContent(builder, params); - } - if (fieldData != null) { - fieldData.toXContent(builder, params); - } - if (completion != null) { - completion.toXContent(builder, params); - } - if (segments != null) { - segments.toXContent(builder, params); - } - if (translog != null) { - translog.toXContent(builder, params); - } - if (requestCache != null) { - requestCache.toXContent(builder, params); - } - if (recoveryStats != null) { - recoveryStats.toXContent(builder, params); + final Stream stream = Arrays.stream(new ToXContent[] { + docs, store, indexing, get, search, merge, refresh, flush, warmer, queryCache, + fieldData, completion, segments, translog, requestCache, recoveryStats}) + .filter(Objects::nonNull); + for (ToXContent toXContent : ((Iterable)stream::iterator)) { + toXContent.toXContent(builder, params); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 839c27e0b8a..5b2c024c6b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -135,19 +135,13 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shards = new ShardStats[in.readVInt()]; - for (int i = 0; i < shards.length; i++) { - shards[i] = ShardStats.readShardStats(in); - } + shards = in.readArray(ShardStats::readShardStats, (size) -> new ShardStats[size]); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(shards.length); - for (ShardStats shard : shards) { - shard.writeTo(out); - } + out.writeArray(shards); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index c503da12317..877db0579a0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.engine.CommitStats; @@ -32,7 +33,7 @@ import org.elasticsearch.index.shard.ShardPath; import java.io.IOException; -public class ShardStats implements Streamable, ToXContent { +public class ShardStats implements Streamable, Writeable, ToXContent { private ShardRouting shardRouting; private CommonStats commonStats; @Nullable diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 98c962b3eec..9f11b9b5a70 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -505,11 +505,7 @@ public abstract class TransportBroadcastByNodeAction(size); - for (int i = 0; i < size; i++) { - shards.add(new ShardRouting(in)); - } + shards = in.readList(ShardRouting::new); nodeId = in.readString(); } @@ -517,11 +513,7 @@ public abstract class TransportBroadcastByNodeAction(resultsSize); - for (; resultsSize > 0; resultsSize--) { - final ShardOperationResult result = in.readBoolean() ? readShardResult(in) : null; - results.add(result); - } + results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null); if (in.readBoolean()) { - int failureShards = in.readVInt(); - exceptions = new ArrayList<>(failureShards); - for (int i = 0; i < failureShards; i++) { - exceptions.add(new BroadcastShardOperationFailedException(in)); - } + exceptions = in.readList(BroadcastShardOperationFailedException::new); } else { exceptions = null; } @@ -594,11 +577,7 @@ public abstract class TransportBroadcastByNodeActionfield -> memory size mappings + */ +public final class FieldMemoryStats implements Writeable, Iterable>{ + + private final ObjectLongHashMap stats; + + /** + * Creates a new FieldMemoryStats instance + */ + public FieldMemoryStats(ObjectLongHashMap stats) { + this.stats = Objects.requireNonNull(stats, "status must be non-null"); + assert !stats.containsKey(null); + } + + /** + * Creates a new FieldMemoryStats instance from a stream + */ + public FieldMemoryStats(StreamInput input) throws IOException { + int size = input.readVInt(); + stats = new ObjectLongHashMap<>(size); + for (int i = 0; i < size; i++) { + stats.put(input.readString(), input.readVLong()); + } + } + + /** + * Adds / merges the given field memory stats into this stats instance + */ + public void add(FieldMemoryStats fieldMemoryStats) { + for (ObjectLongCursor entry : fieldMemoryStats.stats) { + stats.addTo(entry.key, entry.value); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(stats.size()); + for (ObjectLongCursor entry : stats) { + out.writeString(entry.key); + out.writeVLong(entry.value); + } + } + + /** + * Generates x-content into the given builder for each of the fields in this stats instance + * @param builder the builder to generated on + * @param key the top level key for this stats object + * @param rawKey the raw byte key for each of the fields byte sizes + * @param readableKey the readable key for each of the fields byte sizes + */ + public void toXContent(XContentBuilder builder, String key, String rawKey, String readableKey) throws IOException { + builder.startObject(key); + for (ObjectLongCursor entry : stats) { + builder.startObject(entry.key); + builder.byteSizeField(rawKey, readableKey, entry.value); + builder.endObject(); + } + builder.endObject(); + } + + /** + * Creates a deep copy of this stats instance + */ + public FieldMemoryStats copy() { + return new FieldMemoryStats(stats.clone()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FieldMemoryStats that = (FieldMemoryStats) o; + return Objects.equals(stats, that.stats); + } + + @Override + public int hashCode() { + return Objects.hash(stats); + } + + @Override + public Iterator> iterator() { + return stats.iterator(); + } + + /** + * Returns the fields value in bytes or 0 if it's not present in the stats + */ + public long get(String field) { + return stats.get(field); + } + + /** + * Returns true iff the given field is in the stats + */ + public boolean containsField(String field) { + return stats.containsKey(field); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 1fdd3e72c83..4fc253cf45d 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -467,16 +467,32 @@ public abstract class StreamOutput extends OutputStream { * @param keyWriter The key writer * @param valueWriter The value writer */ - public void writeMapOfLists(final Map> map, final Writer keyWriter, final Writer valueWriter) + public final void writeMapOfLists(final Map> map, final Writer keyWriter, final Writer valueWriter) throws IOException { - writeVInt(map.size()); - - for (final Map.Entry> entry : map.entrySet()) { - keyWriter.write(this, entry.getKey()); - writeVInt(entry.getValue().size()); - for (final V value : entry.getValue()) { + writeMap(map, keyWriter, (stream, list) -> { + writeVInt(list.size()); + for (final V value : list) { valueWriter.write(this, value); } + }); + } + + /** + * Write a {@link Map} of {@code K}-type keys to {@code V}-type. + *


+     * Map<String, String> map = ...;
+     * out.writeMap(map, StreamOutput::writeString, StreamOutput::writeString);
+     * 
+ * + * @param keyWriter The key writer + * @param valueWriter The value writer + */ + public final void writeMap(final Map map, final Writer keyWriter, final Writer valueWriter) + throws IOException { + writeVInt(map.size()); + for (final Map.Entry entry : map.entrySet()) { + keyWriter.write(this, entry.getKey()); + valueWriter.write(this, entry.getValue()); } } diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index 33b61a35138..1eff321b47f 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -106,13 +106,6 @@ public class QueryCacheStats implements Streamable, ToXContent { return cacheCount - cacheSize; } - public static QueryCacheStats readQueryCacheStats(StreamInput in) throws IOException { - QueryCacheStats stats = new QueryCacheStats(); - stats.readFrom(in); - return stats; - } - - @Override public void readFrom(StreamInput in) throws IOException { ramBytesUsed = in.readLong(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/core/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 48fb8a80eeb..eb2e35a5a23 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/core/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -49,13 +49,6 @@ public final class CommitStats implements Streamable, ToXContent { } private CommitStats() { - - } - - public static CommitStats readCommitStatsFrom(StreamInput in) throws IOException { - CommitStats commitStats = new CommitStats(); - commitStats.readFrom(in); - return commitStats; } public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 637beebfec8..ed8e150cd6c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -286,12 +286,6 @@ public class SegmentsStats implements Streamable, ToXContent { return maxUnsafeAutoIdTimestamp; } - public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException { - SegmentsStats stats = new SegmentsStats(); - stats.readFrom(in); - return stats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.SEGMENTS); @@ -391,10 +385,9 @@ public class SegmentsStats implements Streamable, ToXContent { out.writeLong(maxUnsafeAutoIdTimestamp); out.writeVInt(fileSizes.size()); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { out.writeString(entry.key); - out.writeLong(entry.value); + out.writeLong(entry.value.longValue()); } } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java index 56fe03d4395..6cd2eda5530 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.fielddata; -import com.carrotsearch.hppc.ObjectLongHashMap; +import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,19 +29,25 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; public class FieldDataStats implements Streamable, ToXContent { + private static final String FIELDDATA = "fielddata"; + private static final String MEMORY_SIZE = "memory_size"; + private static final String MEMORY_SIZE_IN_BYTES = "memory_size_in_bytes"; + private static final String EVICTIONS = "evictions"; + private static final String FIELDS = "fields"; long memorySize; long evictions; @Nullable - ObjectLongHashMap fields; + FieldMemoryStats fields; public FieldDataStats() { } - public FieldDataStats(long memorySize, long evictions, @Nullable ObjectLongHashMap fields) { + public FieldDataStats(long memorySize, long evictions, @Nullable FieldMemoryStats fields) { this.memorySize = memorySize; this.evictions = evictions; this.fields = fields; @@ -52,16 +58,9 @@ public class FieldDataStats implements Streamable, ToXContent { this.evictions += stats.evictions; if (stats.fields != null) { if (fields == null) { - fields = stats.fields.clone(); + fields = stats.fields.copy(); } else { - assert !stats.fields.containsKey(null); - final Object[] keys = stats.fields.keys; - final long[] values = stats.fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - fields.addTo((String) keys[i], values[i]); - } - } + fields.add(stats.fields); } } } @@ -79,78 +78,48 @@ public class FieldDataStats implements Streamable, ToXContent { } @Nullable - public ObjectLongHashMap getFields() { + public FieldMemoryStats getFields() { return fields; } - public static FieldDataStats readFieldDataStats(StreamInput in) throws IOException { - FieldDataStats stats = new FieldDataStats(); - stats.readFrom(in); - return stats; - } - @Override public void readFrom(StreamInput in) throws IOException { memorySize = in.readVLong(); evictions = in.readVLong(); - if (in.readBoolean()) { - int size = in.readVInt(); - fields = new ObjectLongHashMap<>(size); - for (int i = 0; i < size; i++) { - fields.put(in.readString(), in.readVLong()); - } - } + fields = in.readOptionalWriteable(FieldMemoryStats::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(memorySize); out.writeVLong(evictions); - if (fields == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(fields.size()); - assert !fields.containsKey(null); - final Object[] keys = fields.keys; - final long[] values = fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - out.writeString((String) keys[i]); - out.writeVLong(values[i]); - } - } - } + out.writeOptionalWriteable(fields); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.FIELDDATA); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); - builder.field(Fields.EVICTIONS, getEvictions()); + builder.startObject(FIELDDATA); + builder.byteSizeField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, memorySize); + builder.field(EVICTIONS, getEvictions()); if (fields != null) { - builder.startObject(Fields.FIELDS); - assert !fields.containsKey(null); - final Object[] keys = fields.keys; - final long[] values = fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - builder.startObject((String) keys[i]); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, values[i]); - builder.endObject(); - } - } - builder.endObject(); + fields.toXContent(builder, FIELDS, MEMORY_SIZE_IN_BYTES, MEMORY_SIZE); } builder.endObject(); return builder; } - static final class Fields { - static final String FIELDDATA = "fielddata"; - static final String MEMORY_SIZE = "memory_size"; - static final String MEMORY_SIZE_IN_BYTES = "memory_size_in_bytes"; - static final String EVICTIONS = "evictions"; - static final String FIELDS = "fields"; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FieldDataStats that = (FieldDataStats) o; + return memorySize == that.memorySize && + evictions == that.evictions && + Objects.equals(fields, that.fields); + } + + @Override + public int hashCode() { + return Objects.hash(memorySize, evictions, fields); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index d8eaaaf448e..6dd9552b690 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.fielddata; import com.carrotsearch.hppc.ObjectLongHashMap; import org.apache.lucene.util.Accountable; +import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -45,7 +46,8 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { } } } - return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals); + return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals == null ? null : + new FieldMemoryStats(fieldTotals)); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/flush/FlushStats.java b/core/src/main/java/org/elasticsearch/index/flush/FlushStats.java index 600651ad306..ac9a4a5c9a1 100644 --- a/core/src/main/java/org/elasticsearch/index/flush/FlushStats.java +++ b/core/src/main/java/org/elasticsearch/index/flush/FlushStats.java @@ -81,12 +81,6 @@ public class FlushStats implements Streamable, ToXContent { return new TimeValue(totalTimeInMillis); } - public static FlushStats readFlushStats(StreamInput in) throws IOException { - FlushStats flushStats = new FlushStats(); - flushStats.readFrom(in); - return flushStats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.FLUSH); diff --git a/core/src/main/java/org/elasticsearch/index/get/GetStats.java b/core/src/main/java/org/elasticsearch/index/get/GetStats.java index ed7057d33f0..5a386b85330 100644 --- a/core/src/main/java/org/elasticsearch/index/get/GetStats.java +++ b/core/src/main/java/org/elasticsearch/index/get/GetStats.java @@ -134,12 +134,6 @@ public class GetStats implements Streamable, ToXContent { static final String CURRENT = "current"; } - public static GetStats readGetStats(StreamInput in) throws IOException { - GetStats stats = new GetStats(); - stats.readFrom(in); - return stats; - } - @Override public void readFrom(StreamInput in) throws IOException { existsCount = in.readVLong(); diff --git a/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java index 845b035623d..b129d8e8db9 100644 --- a/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java +++ b/core/src/main/java/org/elasticsearch/index/merge/MergeStats.java @@ -182,12 +182,6 @@ public class MergeStats implements Streamable, ToXContent { return new ByteSizeValue(currentSizeInBytes); } - public static MergeStats readMergeStats(StreamInput in) throws IOException { - MergeStats stats = new MergeStats(); - stats.readFrom(in); - return stats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.MERGES); diff --git a/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java index 9b9b4673acc..3a3edd10dcc 100644 --- a/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java +++ b/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java @@ -81,12 +81,6 @@ public class RefreshStats implements Streamable, ToXContent { return new TimeValue(totalTimeInMillis); } - public static RefreshStats readRefreshStats(StreamInput in) throws IOException { - RefreshStats refreshStats = new RefreshStats(); - refreshStats.readFrom(in); - return refreshStats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REFRESH); diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index 3959a697fd0..824ca598ae2 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.search.stats; +import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -32,7 +33,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -public class SearchStats implements Streamable, ToXContent { +public class SearchStats extends ToXContentToBytes implements Streamable { public static class Stats implements Streamable, ToXContent { @@ -338,22 +339,12 @@ public class SearchStats implements Streamable, ToXContent { static final String SUGGEST_CURRENT = "suggest_current"; } - public static SearchStats readSearchStats(StreamInput in) throws IOException { - SearchStats searchStats = new SearchStats(); - searchStats.readFrom(in); - return searchStats; - } - @Override public void readFrom(StreamInput in) throws IOException { totalStats = Stats.readStats(in); openContexts = in.readVLong(); if (in.readBoolean()) { - int size = in.readVInt(); - groupStats = new HashMap<>(size); - for (int i = 0; i < size; i++) { - groupStats.put(in.readString(), Stats.readStats(in)); - } + groupStats = in.readMap(StreamInput::readString, Stats::readStats); } } @@ -365,24 +356,7 @@ public class SearchStats implements Streamable, ToXContent { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeVInt(groupStats.size()); - for (Map.Entry entry : groupStats.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - } - } - - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; + out.writeMap(groupStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); } } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java b/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java index f8132d557bb..5ee5ac66083 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/DocsStats.java @@ -57,12 +57,6 @@ public class DocsStats implements Streamable, ToXContent { return this.deleted; } - public static DocsStats readDocStats(StreamInput in) throws IOException { - DocsStats docsStats = new DocsStats(); - docsStats.readFrom(in); - return docsStats; - } - @Override public void readFrom(StreamInput in) throws IOException { count = in.readVLong(); diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index ba7eafc1a67..c94062e0657 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -143,11 +143,7 @@ public class IndexingStats implements Streamable, ToXContent { indexCount = in.readVLong(); indexTimeInMillis = in.readVLong(); indexCurrent = in.readVLong(); - - if(in.getVersion().onOrAfter(Version.V_2_1_0)){ - indexFailedCount = in.readVLong(); - } - + indexFailedCount = in.readVLong(); deleteCount = in.readVLong(); deleteTimeInMillis = in.readVLong(); deleteCurrent = in.readVLong(); @@ -161,11 +157,7 @@ public class IndexingStats implements Streamable, ToXContent { out.writeVLong(indexCount); out.writeVLong(indexTimeInMillis); out.writeVLong(indexCurrent); - - if(out.getVersion().onOrAfter(Version.V_2_1_0)) { - out.writeVLong(indexFailedCount); - } - + out.writeVLong(indexFailedCount); out.writeVLong(deleteCount); out.writeVLong(deleteTimeInMillis); out.writeVLong(deleteCurrent); @@ -283,21 +275,11 @@ public class IndexingStats implements Streamable, ToXContent { static final String THROTTLED_TIME = "throttle_time"; } - public static IndexingStats readIndexingStats(StreamInput in) throws IOException { - IndexingStats indexingStats = new IndexingStats(); - indexingStats.readFrom(in); - return indexingStats; - } - @Override public void readFrom(StreamInput in) throws IOException { totalStats = Stats.readStats(in); if (in.readBoolean()) { - int size = in.readVInt(); - typeStats = new HashMap<>(size); - for (int i = 0; i < size; i++) { - typeStats.put(in.readString(), Stats.readStats(in)); - } + typeStats = in.readMap(StreamInput::readString, Stats::readStats); } } @@ -308,11 +290,7 @@ public class IndexingStats implements Streamable, ToXContent { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeVInt(typeStats.size()); - for (Map.Entry entry : typeStats.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } + out.writeMap(typeStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); } } } diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreStats.java b/core/src/main/java/org/elasticsearch/index/store/StoreStats.java index d5e50513f3a..422508d8237 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -65,12 +65,6 @@ public class StoreStats implements Streamable, ToXContent { return size(); } - public static StoreStats readStoreStats(StreamInput in) throws IOException { - StoreStats store = new StoreStats(); - store.readFrom(in); - return store; - } - @Override public void readFrom(StreamInput in) throws IOException { sizeInBytes = in.readVLong(); diff --git a/core/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java b/core/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java index 233dbf4f5fe..21dec0f62a0 100644 --- a/core/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java +++ b/core/src/main/java/org/elasticsearch/index/warmer/WarmerStats.java @@ -86,12 +86,6 @@ public class WarmerStats implements Streamable, ToXContent { return new TimeValue(totalTimeInMillis); } - public static WarmerStats readWarmerStats(StreamInput in) throws IOException { - WarmerStats refreshStats = new WarmerStats(); - refreshStats.readFrom(in); - return refreshStats; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.WARMER); diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestStats.java b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java index dee806e0230..add02a5da90 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestStats.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestStats.java @@ -54,7 +54,7 @@ public class IngestStats implements Writeable, ToXContent { @Override public void writeTo(StreamOutput out) throws IOException { totalStats.writeTo(out); - out.writeVLong(statsPerPipeline.size()); + out.writeVInt(statsPerPipeline.size()); for (Map.Entry entry : statsPerPipeline.entrySet()) { out.writeString(entry.getKey()); entry.getValue().writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java index e5e1b1b9199..8b5761a7e9a 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionFieldStats.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Terms; import org.apache.lucene.search.suggest.document.CompletionTerms; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.regex.Regex; import java.io.IOException; @@ -64,6 +65,6 @@ public class CompletionFieldStats { throw new ElasticsearchException(ioe); } } - return new CompletionStats(sizeInBytes, completionFields); + return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields)); } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java index efea5915766..c123d46fe45 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionStats.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.suggest.completion; -import com.carrotsearch.hppc.ObjectLongHashMap; +import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,15 +31,19 @@ import java.io.IOException; public class CompletionStats implements Streamable, ToXContent { - private long sizeInBytes; + private static final String COMPLETION = "completion"; + private static final String SIZE_IN_BYTES = "size_in_bytes"; + private static final String SIZE = "size"; + private static final String FIELDS = "fields"; + private long sizeInBytes; @Nullable - private ObjectLongHashMap fields; + private FieldMemoryStats fields; public CompletionStats() { } - public CompletionStats(long size, @Nullable ObjectLongHashMap fields) { + public CompletionStats(long size, @Nullable FieldMemoryStats fields) { this.sizeInBytes = size; this.fields = fields; } @@ -52,98 +56,43 @@ public class CompletionStats implements Streamable, ToXContent { return new ByteSizeValue(sizeInBytes); } - public ObjectLongHashMap getFields() { + public FieldMemoryStats getFields() { return fields; } @Override public void readFrom(StreamInput in) throws IOException { sizeInBytes = in.readVLong(); - if (in.readBoolean()) { - int size = in.readVInt(); - fields = new ObjectLongHashMap<>(size); - for (int i = 0; i < size; i++) { - fields.put(in.readString(), in.readVLong()); - } - } + fields = in.readOptionalWriteable(FieldMemoryStats::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(sizeInBytes); - if (fields == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(fields.size()); - - assert !fields.containsKey(null); - final Object[] keys = fields.keys; - final long[] values = fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - out.writeString((String) keys[i]); - out.writeVLong(values[i]); - } - } - } + out.writeOptionalWriteable(fields); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.COMPLETION); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes); + builder.startObject(COMPLETION); + builder.byteSizeField(SIZE_IN_BYTES, SIZE, sizeInBytes); if (fields != null) { - builder.startObject(Fields.FIELDS); - - assert !fields.containsKey(null); - final Object[] keys = fields.keys; - final long[] values = fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - builder.startObject((String) keys[i]); - builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, values[i]); - builder.endObject(); - } - } - builder.endObject(); + fields.toXContent(builder, FIELDS, SIZE_IN_BYTES, SIZE); } builder.endObject(); return builder; } - public static CompletionStats readCompletionStats(StreamInput in) throws IOException { - CompletionStats stats = new CompletionStats(); - stats.readFrom(in); - return stats; - } - - static final class Fields { - static final String COMPLETION = "completion"; - static final String SIZE_IN_BYTES = "size_in_bytes"; - static final String SIZE = "size"; - static final String FIELDS = "fields"; - } - public void add(CompletionStats completion) { if (completion == null) { return; } - sizeInBytes += completion.getSizeInBytes(); - if (completion.fields != null) { if (fields == null) { - fields = completion.fields.clone(); + fields = completion.fields.copy(); } else { - assert !completion.fields.containsKey(null); - final Object[] keys = completion.fields.keys; - final long[] values = completion.fields.values; - for (int i = 0; i < keys.length; i++) { - if (keys[i] != null) { - fields.addTo((String) keys[i], values[i]); - } - } + fields.add(completion.fields); } } } diff --git a/core/src/test/java/org/elasticsearch/common/FieldMemoryStatsTests.java b/core/src/test/java/org/elasticsearch/common/FieldMemoryStatsTests.java new file mode 100644 index 00000000000..74427281894 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/FieldMemoryStatsTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common; + +import com.carrotsearch.hppc.ObjectLongHashMap; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class FieldMemoryStatsTests extends ESTestCase { + + public void testSerialize() throws IOException { + FieldMemoryStats stats = randomFieldMemoryStats(); + BytesStreamOutput out = new BytesStreamOutput(); + stats.writeTo(out); + StreamInput input = out.bytes().streamInput(); + FieldMemoryStats read = new FieldMemoryStats(input); + assertEquals(-1, input.read()); + assertEquals(stats, read); + } + + public void testHashCodeEquals() { + FieldMemoryStats stats = randomFieldMemoryStats(); + assertEquals(stats, stats); + assertEquals(stats.hashCode(), stats.hashCode()); + ObjectLongHashMap map1 = new ObjectLongHashMap<>(); + map1.put("bar", 1); + FieldMemoryStats stats1 = new FieldMemoryStats(map1); + ObjectLongHashMap map2 = new ObjectLongHashMap<>(); + map2.put("foo", 2); + FieldMemoryStats stats2 = new FieldMemoryStats(map2); + + ObjectLongHashMap map3 = new ObjectLongHashMap<>(); + map3.put("foo", 2); + map3.put("bar", 1); + FieldMemoryStats stats3 = new FieldMemoryStats(map3); + + ObjectLongHashMap map4 = new ObjectLongHashMap<>(); + map4.put("foo", 2); + map4.put("bar", 1); + FieldMemoryStats stats4 = new FieldMemoryStats(map4); + + assertNotEquals(stats1, stats2); + assertNotEquals(stats1, stats3); + assertNotEquals(stats2, stats3); + assertEquals(stats4, stats3); + + stats1.add(stats2); + assertEquals(stats1, stats3); + assertEquals(stats1, stats4); + assertEquals(stats1.hashCode(), stats3.hashCode()); + } + + public void testAdd() { + ObjectLongHashMap map1 = new ObjectLongHashMap<>(); + map1.put("bar", 1); + FieldMemoryStats stats1 = new FieldMemoryStats(map1); + ObjectLongHashMap map2 = new ObjectLongHashMap<>(); + map2.put("foo", 2); + FieldMemoryStats stats2 = new FieldMemoryStats(map2); + + ObjectLongHashMap map3 = new ObjectLongHashMap<>(); + map3.put("bar", 1); + FieldMemoryStats stats3 = new FieldMemoryStats(map3); + stats3.add(stats1); + + ObjectLongHashMap map4 = new ObjectLongHashMap<>(); + map4.put("foo", 2); + map4.put("bar", 2); + FieldMemoryStats stats4 = new FieldMemoryStats(map4); + assertNotEquals(stats3, stats4); + stats3.add(stats2); + assertEquals(stats3, stats4); + } + + public static FieldMemoryStats randomFieldMemoryStats() { + ObjectLongHashMap map = new ObjectLongHashMap<>(); + int keys = randomIntBetween(1, 1000); + for (int i = 0; i < keys; i++) { + map.put(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomPositiveLong()); + } + return new FieldMemoryStats(map); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 866a02476e7..d1340af0b22 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -456,6 +456,22 @@ public class BytesStreamsTests extends ESTestCase { out.close(); } + public void testWriteMap() throws IOException { + final int size = randomIntBetween(0, 100); + final Map expected = new HashMap<>(randomIntBetween(0, 100)); + for (int i = 0; i < size; ++i) { + expected.put(randomAsciiOfLength(2), randomAsciiOfLength(5)); + } + + final BytesStreamOutput out = new BytesStreamOutput(); + out.writeMap(expected, StreamOutput::writeString, StreamOutput::writeString); + final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); + final Map loaded = in.readMap(StreamInput::readString, StreamInput::readString); + + assertThat(loaded.size(), equalTo(expected.size())); + assertThat(expected, equalTo(loaded)); + } + public void testWriteMapOfLists() throws IOException { final int size = randomIntBetween(0, 5); final Map> expected = new HashMap<>(size); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataStatsTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataStatsTests.java new file mode 100644 index 00000000000..54881bad884 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataStatsTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.fielddata; + +import org.elasticsearch.common.FieldMemoryStats; +import org.elasticsearch.common.FieldMemoryStatsTests; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class FieldDataStatsTests extends ESTestCase { + + public void testSerialize() throws IOException { + FieldMemoryStats map = randomBoolean() ? null : FieldMemoryStatsTests.randomFieldMemoryStats(); + FieldDataStats stats = new FieldDataStats(randomPositiveLong(), randomPositiveLong(), map == null ? null : + map); + BytesStreamOutput out = new BytesStreamOutput(); + stats.writeTo(out); + FieldDataStats read = new FieldDataStats(); + StreamInput input = out.bytes().streamInput(); + read.readFrom(input); + assertEquals(-1, input.read()); + assertEquals(stats.evictions, read.evictions); + assertEquals(stats.memorySize, read.memorySize); + assertEquals(stats.getFields(), read.getFields()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/suggest/stats/CompletionsStatsTests.java b/core/src/test/java/org/elasticsearch/index/suggest/stats/CompletionsStatsTests.java new file mode 100644 index 00000000000..71ffffdcbff --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/suggest/stats/CompletionsStatsTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.suggest.stats; + +import org.elasticsearch.common.FieldMemoryStats; +import org.elasticsearch.common.FieldMemoryStatsTests; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.suggest.completion.CompletionStats; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class CompletionsStatsTests extends ESTestCase { + + public void testSerialize() throws IOException { + FieldMemoryStats map = randomBoolean() ? null : FieldMemoryStatsTests.randomFieldMemoryStats(); + CompletionStats stats = new CompletionStats(randomPositiveLong(), map == null ? null : + map); + BytesStreamOutput out = new BytesStreamOutput(); + stats.writeTo(out); + CompletionStats read = new CompletionStats(); + StreamInput input = out.bytes().streamInput(); + read.readFrom(input); + assertEquals(-1, input.read()); + assertEquals(stats.getSizeInBytes(), read.getSizeInBytes()); + assertEquals(stats.getFields(), read.getFields()); + } +} diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index af0b43358c8..3d9e66755eb 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -737,29 +736,29 @@ public class IndexStatsIT extends ESIntegTestCase { stats = builder.setFieldDataFields("bar").execute().actionGet(); assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(false)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); stats = builder.setFieldDataFields("bar", "baz").execute().actionGet(); assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); stats = builder.setFieldDataFields("*").execute().actionGet(); assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0L)); stats = builder.setFieldDataFields("*r").execute().actionGet(); assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true)); + assertThat(stats.getTotal().fieldData.getFields().containsField("bar"), is(true)); assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0L)); - assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(false)); + assertThat(stats.getTotal().fieldData.getFields().containsField("baz"), is(false)); } @@ -782,29 +781,29 @@ public class IndexStatsIT extends ESIntegTestCase { stats = builder.setCompletionFields("bar.completion").execute().actionGet(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(false)); + assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(false)); stats = builder.setCompletionFields("bar.completion", "baz.completion").execute().actionGet(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); stats = builder.setCompletionFields("*").execute().actionGet(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0L)); stats = builder.setCompletionFields("*r*").execute().actionGet(); assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true)); + assertThat(stats.getTotal().completion.getFields().containsField("bar.completion"), is(true)); assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0L)); - assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(false)); + assertThat(stats.getTotal().completion.getFields().containsField("baz.completion"), is(false)); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 74920fb8fc7..a86b63c1a59 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -750,7 +751,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { // regexes IndicesStatsResponse regexFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields("*").get(); - ObjectLongHashMap fields = regexFieldStats.getIndex(INDEX).getPrimaries().completion.getFields(); + FieldMemoryStats fields = regexFieldStats.getIndex(INDEX).getPrimaries().completion.getFields(); long regexSizeInBytes = fields.get(FIELD) + fields.get(otherField); assertThat(regexSizeInBytes, is(totalSizeInBytes)); } From 1f3eb068d54f6752e0dac155f8137b6eabc725bb Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sat, 17 Dec 2016 11:49:57 +0100 Subject: [PATCH 30/41] Add infrastructure to manage network connections outside of Transport/TransportService (#22194) Some expert users like UnicastZenPing today establishes real connections to nodes during it's ping phase that can be used by other parts of the system. Yet, this is potentially dangerous and undesirable unless the nodes have been fully verified and should be connected to in the case of a cluster state update or if we join a newly elected master. For use-cases like this, this change adds the infrastructure to manually handle connections that are not publicly available on the node ie. should not be managed by `Transport`/`TransportSerivce` --- .../transport/TransportService.java | 35 +++++++++++++-- .../discovery/zen/UnicastZenPingTests.java | 3 +- .../TransportServiceHandshakeTests.java | 28 +++++++++--- .../test/transport/MockTransportService.java | 44 +++++++++++++++++-- .../AbstractSimpleTransportTestCase.java | 8 ++-- 5 files changed, 102 insertions(+), 16 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index a02b763f2d9..8884177ba63 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -290,6 +290,9 @@ public class TransportService extends AbstractLifecycleComponent { return transport.getLocalAddresses(); } + /** + * Returns true iff the given node is already connected. + */ public boolean nodeConnected(DiscoveryNode node) { return node.equals(localNode) || transport.nodeConnected(node); } @@ -311,6 +314,20 @@ public class TransportService extends AbstractLifecycleComponent { transport.connectToNode(node, connectionProfile); } + /** + * Establishes and returns a new connection to the given node. The connection is NOT maintained by this service, it's the callers + * responsibility to close the connection once it goes out of scope. + * @param node the node to connect to + * @param profile the connection profile to use + */ + public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile profile) throws IOException { + if (node.equals(localNode)) { + return localNodeConnection; + } else { + return transport.openConnection(node, profile); + } + } + /** * Lightly connect to the specified node, returning updated node * information. The handshake will fail if the cluster name on the @@ -337,7 +354,19 @@ public class TransportService extends AbstractLifecycleComponent { return handshakeNode; } - private DiscoveryNode handshake( + /** + * Executes a high-level handshake using the given connection + * and returns the discovery node of the node the connection + * was established with. The handshake will fail if the cluster + * name on the target node mismatches the local cluster name. + * + * @param connection the connection to a specific node + * @param handshakeTimeout handshake timeout + * @return the connected node + * @throws ConnectTransportException if the connection failed + * @throws IllegalStateException if the handshake failed + */ + public DiscoveryNode handshake( final Transport.Connection connection, final long handshakeTimeout) throws ConnectTransportException { final HandshakeResponse response; @@ -465,7 +494,7 @@ public class TransportService extends AbstractLifecycleComponent { } } - final void sendRequest(final Transport.Connection connection, final String action, + public final void sendRequest(final Transport.Connection connection, final String action, final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { @@ -477,7 +506,7 @@ public class TransportService extends AbstractLifecycleComponent { * Returns either a real transport connection or a local node connection if we are using the local node optimization. * @throws NodeNotConnectedException if the given node is not connected */ - private Transport.Connection getConnection(DiscoveryNode node) { + public Transport.Connection getConnection(DiscoveryNode node) { if (Objects.requireNonNull(node, "node must be non-null").equals(localNode)) { return localNodeConnection; } else { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index 9886abb900a..de8d1a562e8 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.MockTcpTransport; @@ -571,7 +572,7 @@ public class UnicastZenPingTests extends ESTestCase { final BiFunction supplier) { final Transport transport = supplier.apply(settings, version); final TransportService transportService = - new TransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); transportService.start(); transportService.acceptIncomingRequests(); final ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); diff --git a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 16735f34efe..fd756f6790e 100644 --- a/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -113,14 +113,24 @@ public class TransportServiceHandshakeTests extends ESTestCase { emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()); + try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, ConnectionProfile.LIGHT_PROFILE)){ + DiscoveryNode connectedNode = handleA.transportService.handshake(connection, timeout); + assertNotNull(connectedNode); + // the name and version should be updated + assertEquals(connectedNode.getName(), "TS_B"); + assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion()); + assertFalse(handleA.transportService.nodeConnected(discoveryNode)); + } + DiscoveryNode connectedNode = - handleA.transportService.connectToNodeAndHandshake(discoveryNode, timeout); + handleA.transportService.connectToNodeAndHandshake(discoveryNode, timeout); assertNotNull(connectedNode); // the name and version should be updated assertEquals(connectedNode.getName(), "TS_B"); assertEquals(connectedNode.getVersion(), handleB.discoveryNode.getVersion()); assertTrue(handleA.transportService.nodeConnected(discoveryNode)); + } public void testMismatchedClusterName() { @@ -133,8 +143,12 @@ public class TransportServiceHandshakeTests extends ESTestCase { emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()); - IllegalStateException ex = expectThrows(IllegalStateException.class, () -> handleA.transportService.connectToNodeAndHandshake( - discoveryNode, timeout)); + IllegalStateException ex = expectThrows(IllegalStateException.class, () -> { + try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, + ConnectionProfile.LIGHT_PROFILE)) { + handleA.transportService.handshake(connection, timeout); + } + }); assertThat(ex.getMessage(), containsString("handshake failed, mismatched cluster name [Cluster [b]]")); assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } @@ -150,8 +164,12 @@ public class TransportServiceHandshakeTests extends ESTestCase { emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()); - IllegalStateException ex = expectThrows(IllegalStateException.class, () -> handleA.transportService.connectToNodeAndHandshake( - discoveryNode, timeout)); + IllegalStateException ex = expectThrows(IllegalStateException.class, () -> { + try (Transport.Connection connection = handleA.transportService.openConnection(discoveryNode, + ConnectionProfile.LIGHT_PROFILE)) { + handleA.transportService.handshake(connection, timeout); + } + }); assertThat(ex.getMessage(), containsString("handshake failed, incompatible version")); assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index dd05457cec1..a35a1919cb7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -57,11 +57,13 @@ import java.io.IOException; import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.LinkedBlockingDeque; @@ -80,6 +82,7 @@ import java.util.concurrent.atomic.AtomicBoolean; */ public final class MockTransportService extends TransportService { + private final Map> openConnections = new HashMap<>(); public static class TestPlugin extends Plugin { @Override @@ -553,9 +556,7 @@ public final class MockTransportService extends TransportService { } @Override - public void close() { - transport.close(); - } + public void close() { transport.close(); } @Override public Map profileBoundAddresses() { @@ -701,4 +702,41 @@ public final class MockTransportService extends TransportService { } return transport; } + + @Override + public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + FilteredConnection filteredConnection = new FilteredConnection(super.openConnection(node, profile)) { + final AtomicBoolean closed = new AtomicBoolean(false); + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + if (closed.compareAndSet(false, true)) { + synchronized (openConnections) { + List connections = openConnections.get(node); + boolean remove = connections.remove(this); + assert remove; + if (connections.isEmpty()) { + openConnections.remove(node); + } + } + } + } + + } + }; + synchronized (openConnections) { + List connections = openConnections.computeIfAbsent(node, + (n) -> new CopyOnWriteArrayList<>()); + connections.add(filteredConnection); + } + return filteredConnection; + } + + @Override + protected void doClose() { + super.doClose(); + assert openConnections.size() == 0 : "still open connections: " + openConnections; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 542dcfb8b8b..283ae288d29 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1351,8 +1351,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try { - serviceB.connectToNodeAndHandshake(nodeA, 100); + try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){ + serviceB.handshake(connection, 100); fail("exception should be thrown"); } catch (IllegalStateException e) { // all is well @@ -1409,8 +1409,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // all is well } - try { - serviceB.connectToNodeAndHandshake(nodeA, 100); + try (Transport.Connection connection = serviceB.openConnection(nodeA, ConnectionProfile.LIGHT_PROFILE)){ + serviceB.handshake(connection, 100); fail("exception should be thrown"); } catch (IllegalStateException e) { // all is well From 58d73bae74fd41e8a588e2ea0ab8b66adf072d9f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 17 Dec 2016 09:20:46 -0500 Subject: [PATCH 31/41] Tighten sequence numbers recovery This commit touches addresses issues related to recovery and sequence numbers: - A sequence number can be assigned and a Lucene commit created with a maximum sequence number at least as large as that sequence number, yet the operation corresponding to that sequence number can be missing from both the Lucene commit and the translog. This means that upon recovery the local checkpoint will be stuck at or below this missing sequence number. To address this, we force the local checkpoint to the maximum sequence number in the Lucene commit when opening the engine. Note that there can still be gaps in the history in the translog but we do not address those here. - The global checkpoint is transferred to the target shard at the end of peer recovery. - Additionally, we reenable the relocation integration tests. Lastly, this work uncovered some bugs in the assignment of sequence numbers on replica operations: - setting the sequence number on replica write requests was missing, very likely introduced as a result of resolving merge conflicts - handling operations that arrive out of order on a replica and have a version conflict with a previous operation were never marked as processed Relates #22212 --- .../action/bulk/TransportShardBulkAction.java | 14 +- .../action/delete/TransportDeleteAction.java | 1 + .../index/engine/InternalEngine.java | 143 ++++++++---- .../index/seqno/GlobalCheckpointService.java | 8 +- .../recovery/PeerRecoveryTargetService.java | 6 +- .../RecoveryFinalizeRecoveryRequest.java | 20 +- .../recovery/RecoverySourceHandler.java | 2 +- .../indices/recovery/RecoveryTarget.java | 3 +- .../recovery/RecoveryTargetHandler.java | 12 +- .../recovery/RemoteRecoveryTargetHandler.java | 4 +- .../index/engine/InternalEngineTests.java | 209 +++++++++++++++++- .../RecoveryDuringReplicationTests.java | 5 +- .../elasticsearch/recovery/RelocationIT.java | 29 +-- .../elasticsearch/test/ESIntegTestCase.java | 4 +- 14 files changed, 368 insertions(+), 92 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index b1fe096a564..cef89e1ce78 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -50,6 +50,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; @@ -150,6 +151,7 @@ public class TransportShardBulkAction extends TransportWriteAction seqNoService().getLocalCheckpoint()); assert translog.getGeneration() != null; } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); @@ -412,7 +423,7 @@ public class InternalEngine extends Engine { @Override public GetResult get(Get get, Function searcherFactory) throws EngineException { - try (ReleasableLock lock = readLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (get.realtime()) { VersionValue versionValue = versionMap.getUnderLock(get.uid()); @@ -434,11 +445,28 @@ public class InternalEngine extends Engine { } } - private boolean checkVersionConflict( - final Operation op, - final long currentVersion, - final long expectedVersion, - final boolean deleted) { + /** + * Checks for version conflicts. If a version conflict exists, the optional return value represents the operation result. Otherwise, if + * no conflicts are found, the optional return value is not present. + * + * @param the result type + * @param op the operation + * @param currentVersion the current version + * @param expectedVersion the expected version + * @param deleted {@code true} if the current version is not found or represents a delete + * @param onSuccess if there is a version conflict that can be ignored, the result of the operation + * @param onFailure if there is a version conflict that can not be ignored, the result of the operation + * @return if there is a version conflict, the optional value is present and represents the operation result, otherwise the return value + * is not present + */ + private Optional checkVersionConflict( + final Operation op, + final long currentVersion, + final long expectedVersion, + final boolean deleted, + final Supplier onSuccess, + final Function onFailure) { + final T result; if (op.versionType() == VersionType.FORCE) { if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { // If index was created in 5.0 or later, 'force' is not allowed at all @@ -452,14 +480,22 @@ public class InternalEngine extends Engine { if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) { if (op.origin().isRecovery()) { // version conflict, but okay - return true; + result = onSuccess.get(); } else { // fatal version conflict - throw new VersionConflictEngineException(shardId, op.type(), op.id(), + final VersionConflictEngineException e = + new VersionConflictEngineException( + shardId, + op.type(), + op.id(), op.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted)); + result = onFailure.apply(e); } + + return Optional.of(result); + } else { + return Optional.empty(); } - return false; } private long checkDeletedAndGCed(VersionValue versionValue) { @@ -475,7 +511,7 @@ public class InternalEngine extends Engine { @Override public IndexResult index(Index index) { IndexResult result; - try (ReleasableLock lock = readLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); if (index.origin().isRecovery()) { // Don't throttle recovery operations @@ -573,7 +609,7 @@ public class InternalEngine extends Engine { assert assertSequenceNumber(index.origin(), index.seqNo()); final Translog.Location location; final long updatedVersion; - IndexResult indexResult = null; + long seqNo = index.seqNo(); try (Releasable ignored = acquireLock(index.uid())) { lastWriteNanos = index.startTime(); /* if we have an autoGeneratedID that comes into the engine we can potentially optimize @@ -638,28 +674,33 @@ public class InternalEngine extends Engine { } } final long expectedVersion = index.version(); - if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) { - // skip index operation because of version conflict on recovery - indexResult = new IndexResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, false); + final Optional checkVersionConflictResult = + checkVersionConflict( + index, + currentVersion, + expectedVersion, + deleted, + () -> new IndexResult(currentVersion, index.seqNo(), false), + e -> new IndexResult(e, currentVersion, index.seqNo())); + + final IndexResult indexResult; + if (checkVersionConflictResult.isPresent()) { + indexResult = checkVersionConflictResult.get(); } else { - final long seqNo; + // no version conflict if (index.origin() == Operation.Origin.PRIMARY) { - seqNo = seqNoService.generateSeqNo(); - } else { - seqNo = index.seqNo(); + seqNo = seqNoService().generateSeqNo(); } + + /** + * Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence + * number service if this is on the primary, or the existing document's sequence number if this is on the replica. The + * primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created. + */ + index.parsedDoc().updateSeqID(seqNo, index.primaryTerm()); updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); index.parsedDoc().version().setLongValue(updatedVersion); - // Update the document's sequence number and primary term, the - // sequence number here is derived here from either the sequence - // number service if this is on the primary, or the existing - // document's sequence number if this is on the replica. The - // primary term here has already been set, see - // IndexShard.prepareIndex where the Engine.Index operation is - // created - index.parsedDoc().updateSeqID(seqNo, index.primaryTerm()); - if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) { // document does not exists, we can optimize for create, but double check if assertions are running assert assertDocDoesNotExist(index, canOptimizeAddDocument == false); @@ -669,8 +710,8 @@ public class InternalEngine extends Engine { } indexResult = new IndexResult(updatedVersion, seqNo, deleted); location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY - ? translog.add(new Translog.Index(index, indexResult)) - : null; + ? translog.add(new Translog.Index(index, indexResult)) + : null; versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion)); indexResult.setTranslogLocation(location); } @@ -678,8 +719,8 @@ public class InternalEngine extends Engine { indexResult.freeze(); return indexResult; } finally { - if (indexResult != null && indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - seqNoService.markSeqNoAsCompleted(indexResult.getSeqNo()); + if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + seqNoService().markSeqNoAsCompleted(seqNo); } } @@ -724,7 +765,7 @@ public class InternalEngine extends Engine { @Override public DeleteResult delete(Delete delete) { DeleteResult result; - try (ReleasableLock lock = readLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: result = innerDelete(delete); @@ -748,7 +789,7 @@ public class InternalEngine extends Engine { final Translog.Location location; final long updatedVersion; final boolean found; - DeleteResult deleteResult = null; + long seqNo = delete.seqNo(); try (Releasable ignored = acquireLock(delete.uid())) { lastWriteNanos = delete.startTime(); final long currentVersion; @@ -764,32 +805,40 @@ public class InternalEngine extends Engine { } final long expectedVersion = delete.version(); - if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) { - // skip executing delete because of version conflict on recovery - deleteResult = new DeleteResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, true); + + final Optional result = + checkVersionConflict( + delete, + currentVersion, + expectedVersion, + deleted, + () -> new DeleteResult(expectedVersion, delete.seqNo(), true), + e -> new DeleteResult(e, expectedVersion, delete.seqNo())); + + final DeleteResult deleteResult; + if (result.isPresent()) { + deleteResult = result.get(); } else { - final long seqNo; if (delete.origin() == Operation.Origin.PRIMARY) { - seqNo = seqNoService.generateSeqNo(); - } else { - seqNo = delete.seqNo(); + seqNo = seqNoService().generateSeqNo(); } + updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); deleteResult = new DeleteResult(updatedVersion, seqNo, found); location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY - ? translog.add(new Translog.Delete(delete, deleteResult)) - : null; + ? translog.add(new Translog.Delete(delete, deleteResult)) + : null; versionMap.putUnderLock(delete.uid().bytes(), - new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); + new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis())); deleteResult.setTranslogLocation(location); } deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.freeze(); return deleteResult; } finally { - if (deleteResult != null && deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { - seqNoService.markSeqNoAsCompleted(deleteResult.getSeqNo()); + if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + seqNoService().markSeqNoAsCompleted(seqNo); } } } diff --git a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java index 18d15707e40..e8aa0cdeb89 100644 --- a/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java +++ b/core/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointService.java @@ -149,12 +149,14 @@ public class GlobalCheckpointService extends AbstractIndexShardComponent { * updates the global checkpoint on a replica shard (after it has been updated by the primary). */ synchronized void updateCheckpointOnReplica(long globalCheckpoint) { + /* + * The global checkpoint here is a local knowledge which is updated under the mandate of the primary. It can happen that the primary + * information is lagging compared to a replica (e.g., if a replica is promoted to primary but has stale info relative to other + * replica shards). In these cases, the local knowledge of the global checkpoint could be higher than sync from the lagging primary. + */ if (this.globalCheckpoint <= globalCheckpoint) { this.globalCheckpoint = globalCheckpoint; logger.trace("global checkpoint updated from primary to [{}]", globalCheckpoint); - } else { - throw new IllegalArgumentException("global checkpoint from primary should never decrease. current [" + - this.globalCheckpoint + "], got [" + globalCheckpoint + "]"); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 84f35ebca43..5f59af04b50 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -312,9 +312,9 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde @Override public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { - try (RecoveriesCollection.RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) - { - recoveryRef.status().finalizeRecovery(); + try (RecoveriesCollection.RecoveryRef recoveryRef = + onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + recoveryRef.status().finalizeRecovery(request.globalCheckpoint()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index dca50f3f816..eaace661077 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -19,8 +19,10 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.TransportRequest; @@ -29,15 +31,16 @@ import java.io.IOException; public class RecoveryFinalizeRecoveryRequest extends TransportRequest { private long recoveryId; - private ShardId shardId; + private long globalCheckpoint; public RecoveryFinalizeRecoveryRequest() { } - RecoveryFinalizeRecoveryRequest(long recoveryId, ShardId shardId) { + RecoveryFinalizeRecoveryRequest(final long recoveryId, final ShardId shardId, final long globalCheckpoint) { this.recoveryId = recoveryId; this.shardId = shardId; + this.globalCheckpoint = globalCheckpoint; } public long recoveryId() { @@ -48,11 +51,20 @@ public class RecoveryFinalizeRecoveryRequest extends TransportRequest { return shardId; } + public long globalCheckpoint() { + return globalCheckpoint; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); + if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + globalCheckpoint = in.readZLong(); + } else { + globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } } @Override @@ -60,5 +72,9 @@ public class RecoveryFinalizeRecoveryRequest extends TransportRequest { super.writeTo(out); out.writeLong(recoveryId); shardId.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) { + out.writeZLong(globalCheckpoint); + } } + } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 8b540fcb508..fa1a9a7979a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -391,8 +391,8 @@ public class RecoverySourceHandler { StopWatch stopWatch = new StopWatch().start(); logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); cancellableThreads.execute(() -> { - recoveryTarget.finalizeRecovery(); shard.markAllocationIdAsInSync(recoveryTarget.getTargetAllocationId()); + recoveryTarget.finalizeRecovery(shard.getGlobalCheckpoint()); }); if (request.isPrimaryRelocation()) { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1ae1d494ca6..67ee1a5ac9a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -333,7 +333,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget } @Override - public void finalizeRecovery() { + public void finalizeRecovery(final long globalCheckpoint) { + indexShard().updateGlobalCheckpointOnReplica(globalCheckpoint); final IndexShard indexShard = indexShard(); indexShard.finalizeRecovery(); } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java index 86afa498e62..5cbfb4a53a5 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetHandler.java @@ -39,11 +39,12 @@ public interface RecoveryTargetHandler { void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException; /** - * The finalize request clears unreferenced translog files, refreshes the engine now that - * new segments are available, and enables garbage collection of - * tombstone files. - **/ - void finalizeRecovery(); + * The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and + * updates the global checkpoint. + * + * @param globalCheckpoint the global checkpoint on the recovery source + */ + void finalizeRecovery(long globalCheckpoint); /** * Blockingly waits for cluster state with at least clusterStateVersion to be available @@ -82,4 +83,5 @@ public interface RecoveryTargetHandler { * @return the allocation id of the target shard. */ String getTargetAllocationId(); + } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index ef6b7c2f901..5fa1ca22c70 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -86,9 +86,9 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { } @Override - public void finalizeRecovery() { + public void finalizeRecovery(final long globalCheckpoint) { transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FINALIZE, - new RecoveryFinalizeRecoveryRequest(recoveryId, shardId), + new RecoveryFinalizeRecoveryRequest(recoveryId, shardId, globalCheckpoint), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 340ea745aae..22746aaf2a1 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -27,7 +27,6 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; @@ -74,6 +73,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -130,7 +130,6 @@ import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; -import java.io.IOError; import java.io.IOException; import java.io.InputStream; import java.nio.charset.Charset; @@ -153,11 +152,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongSupplier; import java.util.function.Supplier; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -319,12 +322,27 @@ public class InternalEngineTests extends ESTestCase { } protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, Supplier indexWriterSupplier) throws IOException { + return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterSupplier, null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, + Store store, + Path translogPath, + MergePolicy mergePolicy, + Supplier indexWriterSupplier, + Supplier sequenceNumbersServiceSupplier) throws IOException { EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null); InternalEngine internalEngine = new InternalEngine(config) { @Override IndexWriter createWriter(boolean create) throws IOException { return (indexWriterSupplier != null) ? indexWriterSupplier.get() : super.createWriter(create); } + + @Override + public SequenceNumbersService seqNoService() { + return (sequenceNumbersServiceSupplier != null) ? sequenceNumbersServiceSupplier.get() : super.seqNoService(); + } }; if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { internalEngine.recoverFromTranslog(); @@ -2914,6 +2932,193 @@ public class InternalEngineTests extends ESTestCase { searchResult.close(); } + public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws BrokenBarrierException, InterruptedException, IOException { + engine.close(); + final int docs = randomIntBetween(1, 32); + InternalEngine initialEngine = null; + try { + final CountDownLatch latch = new CountDownLatch(1); + final CyclicBarrier barrier = new CyclicBarrier(2); + final AtomicBoolean skip = new AtomicBoolean(); + final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final List threads = new ArrayList<>(); + final SequenceNumbersService seqNoService = + new SequenceNumbersService( + shardId, + defaultSettings, + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.UNASSIGNED_SEQ_NO) { + @Override + public long generateSeqNo() { + final long seqNo = super.generateSeqNo(); + if (skip.get()) { + try { + barrier.await(); + latch.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + } else { + if (expectedLocalCheckpoint.get() + 1 == seqNo) { + expectedLocalCheckpoint.set(seqNo); + } + } + return seqNo; + } + }; + initialEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService); + final InternalEngine finalInitialEngine = initialEngine; + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final Term uid = newUid(id); + final ParsedDocument doc = testParsedDocument(id, id, "test", null, testDocumentWithTextField(), SOURCE, null); + + skip.set(randomBoolean()); + final Thread thread = new Thread(() -> finalInitialEngine.index(new Engine.Index(uid, doc))); + thread.start(); + if (skip.get()) { + threads.add(thread); + barrier.await(); + } else { + thread.join(); + } + } + + assertThat(initialEngine.seqNoService().getLocalCheckpoint(), equalTo(expectedLocalCheckpoint.get())); + assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo((long) (docs - 1))); + initialEngine.flush(true, true); + + latch.countDown(); + for (final Thread thread : threads) { + thread.join(); + } + } finally { + IOUtils.close(initialEngine); + } + + try (final Engine recoveringEngine = + new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + assertThat(recoveringEngine.seqNoService().getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); + } + } + + public void testSequenceNumberAdvancesToMaxSeqNoOnEngineOpenOnReplica() throws IOException { + final long v = Versions.MATCH_ANY; + final VersionType t = VersionType.INTERNAL; + final long ts = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; + final int docs = randomIntBetween(1, 32); + InternalEngine initialEngine = null; + try { + initialEngine = engine; + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final Term uid = newUid(id); + final ParsedDocument doc = testParsedDocument(id, id, "test", null, testDocumentWithTextField(), SOURCE, null); + // create a gap at sequence number 3 * i + 1 + initialEngine.index(new Engine.Index(uid, doc, 3 * i, 1, v, t, REPLICA, System.nanoTime(), ts, false)); + initialEngine.delete(new Engine.Delete("type", id, uid, 3 * i + 2, 1, v, t, REPLICA, System.nanoTime())); + } + + // bake the commit with the local checkpoint stuck at 0 and gaps all along the way up to the max sequence number + assertThat(initialEngine.seqNoService().getLocalCheckpoint(), equalTo((long) 0)); + assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo((long) (3 * (docs - 1) + 2))); + initialEngine.flush(true, true); + + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final Term uid = newUid(id); + final ParsedDocument doc = testParsedDocument(id, id, "test", null, testDocumentWithTextField(), SOURCE, null); + initialEngine.index(new Engine.Index(uid, doc, 3 * i + 1, 1, v, t, REPLICA, System.nanoTime(), ts, false)); + } + } finally { + IOUtils.close(initialEngine); + } + + try (final Engine recoveringEngine = + new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG))) { + assertThat(recoveringEngine.seqNoService().getLocalCheckpoint(), greaterThanOrEqualTo((long) (3 * (docs - 1) + 2 - 1))); + } + } + + public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOException { + final List operations = new ArrayList<>(); + + final int numberOfOperations = randomIntBetween(16, 32); + final Term uid = newUid("1"); + final Document document = testDocumentWithTextField(); + final AtomicLong sequenceNumber = new AtomicLong(); + final Engine.Operation.Origin origin = randomFrom(LOCAL_TRANSLOG_RECOVERY, PEER_RECOVERY, PRIMARY, REPLICA); + final LongSupplier sequenceNumberSupplier = + origin == PRIMARY ? () -> SequenceNumbersService.UNASSIGNED_SEQ_NO : sequenceNumber::getAndIncrement; + document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); + final ParsedDocument doc = testParsedDocument("1", "1", "test", null, document, B_1, null); + for (int i = 0; i < numberOfOperations; i++) { + if (randomBoolean()) { + final Engine.Index index = new Engine.Index( + uid, + doc, + sequenceNumberSupplier.getAsLong(), + 1, + i, + VersionType.EXTERNAL, + origin, + System.nanoTime(), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, + false); + operations.add(index); + } else { + final Engine.Delete delete = new Engine.Delete( + "test", + "1", + uid, + sequenceNumberSupplier.getAsLong(), + 1, + i, + VersionType.EXTERNAL, + origin, + System.nanoTime()); + operations.add(delete); + } + } + + final boolean exists = operations.get(operations.size() - 1) instanceof Engine.Index; + Randomness.shuffle(operations); + + for (final Engine.Operation operation : operations) { + if (operation instanceof Engine.Index) { + engine.index((Engine.Index) operation); + } else { + engine.delete((Engine.Delete) operation); + } + } + + final long expectedLocalCheckpoint; + if (origin == PRIMARY) { + // we can only advance as far as the number of operations that did not conflict + int count = 0; + + // each time the version increments as we walk the list, that counts as a successful operation + long version = -1; + for (int i = 0; i < numberOfOperations; i++) { + if (operations.get(i).version() >= version) { + count++; + version = operations.get(i).version(); + } + } + + // sequence numbers start at zero, so the expected local checkpoint is the number of successful operations minus one + expectedLocalCheckpoint = count - 1; + } else { + expectedLocalCheckpoint = numberOfOperations - 1; + } + + assertThat(engine.seqNoService().getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); + try (final Engine.GetResult result = engine.get(new Engine.Get(true, uid))) { + assertThat(result.exists(), equalTo(exists)); + } + } + /** * Return a tuple representing the sequence ID for the given {@code Get} * operation. The first value in the tuple is the sequence number, the diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index bafe6350a51..93b20633cf1 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -109,13 +109,14 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } @Override - public void finalizeRecovery() { + public void finalizeRecovery(long globalCheckpoint) { if (hasBlocked() == false) { // it maybe that not ops have been transferred, block now blockIfNeeded(RecoveryState.Stage.TRANSLOG); } blockIfNeeded(RecoveryState.Stage.FINALIZE); - super.finalizeRecovery(); + super.finalizeRecovery(globalCheckpoint); } + } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index f4de4bdea0b..bbd1ad56bd0 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -23,14 +23,12 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.procedures.IntProcedure; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.English; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -64,7 +62,6 @@ import org.elasticsearch.test.MockIndexEventListener; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -98,8 +95,6 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @TestLogging("_root:DEBUG,org.elasticsearch.indices.recovery:TRACE,org.elasticsearch.index.shard.service:TRACE") -@LuceneTestCase.AwaitsFix(bugUrl = "primary relocation needs to transfer the global check point. otherwise the new primary sends a " + - "an unknown global checkpoint during sync, causing assertions to trigger") public class RelocationIT extends ESIntegTestCase { private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @@ -183,15 +178,13 @@ public class RelocationIT extends ESIntegTestCase { clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); - assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); client().admin().indices().prepareRefresh().execute().actionGet(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().totalHits(), equalTo(20L)); } - @TestLogging("action.index:TRACE,action.bulk:TRACE,action.search:TRACE") + @TestLogging("org.elasticsearch.action.index:TRACE,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE") public void testRelocationWhileIndexingRandom() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; @@ -210,12 +203,12 @@ public class RelocationIT extends ESIntegTestCase { ).get(); - for (int i = 1; i < numberOfNodes; i++) { - logger.info("--> starting [node{}] ...", i + 1); - nodes[i] = internalCluster().startNode(); - if (i != numberOfNodes - 1) { + for (int i = 2; i <= numberOfNodes; i++) { + logger.info("--> starting [node{}] ...", i); + nodes[i - 1] = internalCluster().startNode(); + if (i != numberOfNodes) { ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) - .setWaitForNodes(Integer.toString(i + 1)).setWaitForGreenStatus().execute().actionGet(); + .setWaitForNodes(Integer.toString(i)).setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -246,8 +239,6 @@ public class RelocationIT extends ESIntegTestCase { } ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); - assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); indexer.pauseIndexing(); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); } @@ -261,7 +252,6 @@ public class RelocationIT extends ESIntegTestCase { logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { - try { logger.info("--> START search test round {}", i + 1); SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits(); ranOnce = true; @@ -283,10 +273,7 @@ public class RelocationIT extends ESIntegTestCase { } assertThat(hits.totalHits(), equalTo(indexer.totalIndexedDocs())); logger.info("--> DONE search test round {}", i + 1); - } catch (SearchPhaseExecutionException ex) { - // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough? - logger.warn("Got exception while searching.", ex); - } + } if (!ranOnce) { fail(); @@ -294,6 +281,7 @@ public class RelocationIT extends ESIntegTestCase { } } + @TestLogging("org.elasticsearch.action.index:TRACE,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE") public void testRelocationWhileRefreshing() throws Exception { int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4); int numberOfReplicas = randomBoolean() ? 0 : 1; @@ -464,6 +452,7 @@ public class RelocationIT extends ESIntegTestCase { } } + @TestLogging("org.elasticsearch.action.index:TRACE,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.search:TRACE") public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException { int halfNodes = randomIntBetween(1, 3); Settings[] nodeSettings = Stream.concat( diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 7ff288f3a6b..cb31ac6028b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1323,7 +1323,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either - * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document + * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. @@ -1339,7 +1339,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either - * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document + * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. From b78f7bc51d07bc73b8d2c41f45595b61bff9edcf Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Sun, 18 Dec 2016 08:05:59 +0100 Subject: [PATCH 32/41] InternalEngine should use global checkpoint when committing the translog relates to #22212 --- .../org/elasticsearch/index/engine/InternalEngine.java | 2 +- .../index/engine/InternalEngineTests.java | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 905b08f2bd3..2957b9ab064 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -188,7 +188,7 @@ public class InternalEngine extends Engine { seqNoService().markSeqNoAsCompleted(seqNoService().getLocalCheckpoint() + 1); } indexWriter = writer; - translog = openTranslog(engineConfig, writer, () -> seqNoService().getLocalCheckpoint()); + translog = openTranslog(engineConfig, writer, () -> seqNoService().getGlobalCheckpoint()); assert translog.getGeneration() != null; } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 22746aaf2a1..523efb6829c 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -154,13 +154,12 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; import java.util.function.Supplier; -import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; -import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -1694,8 +1693,10 @@ public class InternalEngineTests extends ESTestCase { } } - replicaLocalCheckpoint = - rarely() ? replicaLocalCheckpoint : randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo)); + if (randomInt(10) < 3) { + // only update rarely as we do it every doc + replicaLocalCheckpoint = randomIntBetween(Math.toIntExact(replicaLocalCheckpoint), Math.toIntExact(primarySeqNo)); + } initialEngine.seqNoService().updateLocalCheckpointForShard("primary", initialEngine.seqNoService().getLocalCheckpoint()); initialEngine.seqNoService().updateLocalCheckpointForShard("replica", replicaLocalCheckpoint); @@ -1707,6 +1708,7 @@ public class InternalEngineTests extends ESTestCase { } } + logger.info("localcheckpoint {}, global {}", replicaLocalCheckpoint, primarySeqNo); initialEngine.seqNoService().updateGlobalCheckpointOnPrimary(); globalCheckpoint = initialEngine.seqNoService().getGlobalCheckpoint(); From ccfeac8dd5bed28f047b1ce9caf26ab3ae96cc28 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 18 Dec 2016 09:26:53 +0100 Subject: [PATCH 33/41] Remove `doHandshake` test-only settings from TcpTransport (#22241) In #22094 we introduce a test-only setting to simulate transport impls that don't support handshakes. This commit implements the same logic without a setting. --- .../elasticsearch/transport/TcpTransport.java | 38 +++++----------- .../netty4/SimpleNetty4TransportTests.java | 22 +++++++-- .../AbstractSimpleTransportTestCase.java | 45 +++++++++++++------ .../transport/MockTcpTransportTests.java | 17 ++++++- 4 files changed, 77 insertions(+), 45 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index cd23fc9b074..2e8cb4f65ce 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -148,9 +148,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope); - // test-setting only - static final Setting CONNECTION_HANDSHAKE = Setting.boolSetting("transport.tcp.handshake", true); - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); private static final int PING_DATA_SIZE = -1; protected final boolean blockingClient; @@ -161,7 +158,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i protected final ThreadPool threadPool; private final BigArrays bigArrays; protected final NetworkService networkService; - private final boolean doHandshakes; protected volatile TransportServiceAdapter transportServiceAdapter; // node id to actual channel @@ -200,7 +196,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i this.transportName = transportName; this.blockingClient = TCP_BLOCKING_CLIENT.get(settings); defaultConnectionProfile = buildDefaultConnectionProfile(settings); - this.doHandshakes = CONNECTION_HANDSHAKE.get(settings); } static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { @@ -463,21 +458,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i "failed to connect to [{}], cleaning dangling connections", node), e); throw e; } - if (doHandshakes) { // some tests need to disable this - Channel channel = nodeChannels.channel(TransportRequestOptions.Type.PING); - final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? - defaultConnectionProfile.getConnectTimeout(): - connectionProfile.getConnectTimeout(); - final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ? - connectTimeout : connectionProfile.getHandshakeTimeout(); - Version version = executeHandshake(node, channel, handshakeTimeout); - if (version != null) { - // this is a BWC layer, if we talk to a pre 5.2 node then the handshake is not supported - // this will go away in master once it's all ported to 5.2 but for now we keep this to make - // the backport straight forward - nodeChannels = new NodeChannels(nodeChannels, version); - } - } + Channel channel = nodeChannels.channel(TransportRequestOptions.Type.PING); + final TimeValue connectTimeout = connectionProfile.getConnectTimeout() == null ? + defaultConnectionProfile.getConnectTimeout() : + connectionProfile.getConnectTimeout(); + final TimeValue handshakeTimeout = connectionProfile.getHandshakeTimeout() == null ? + connectTimeout : connectionProfile.getHandshakeTimeout(); + Version version = executeHandshake(node, channel, handshakeTimeout); // we acquire a connection lock, so no way there is an existing connection connectedNodes.put(node, nodeChannels); if (logger.isDebugEnabled()) { @@ -1130,7 +1117,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i * @param length the payload length in bytes * @see TcpHeader */ - private BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { + final BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { headerOutput.setVersion(protocolVersion); TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); @@ -1306,7 +1293,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status); } else { final TransportResponseHandler handler; - if (TransportStatus.isHandshake(status) && doHandshakes) { + if (TransportStatus.isHandshake(status)) { handler = pendingHandshakes.remove(requestId); } else { TransportResponseHandler theHandler = transportServiceAdapter.onResponseReceived(requestId); @@ -1398,7 +1385,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i transportServiceAdapter.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { - if (TransportStatus.isHandshake(status) && doHandshakes) { + if (TransportStatus.isHandshake(status)) { final VersionHandshakeResponse response = new VersionHandshakeResponse(getCurrentVersion()); sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, TransportStatus.setHandshake((byte)0)); @@ -1509,8 +1496,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } } - // pkg private for testing - final Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException, InterruptedException { + protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException, InterruptedException { numHandshakes.inc(); final long requestId = newRequestId(); final HandshakeResponseHandler handler = new HandshakeResponseHandler(channel); @@ -1520,7 +1506,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i boolean success = false; try { if (isOpen(channel) == false) { - // we have to protect ourself here since sendRequestToChannel won't barf if the channel is closed. + // we have to protect us here since sendRequestToChannel won't barf if the channel is closed. // it's weird but to change it will cause a lot of impact on the exception handling code all over the codebase. // yet, if we don't check the state here we might have registered a pending handshake handler but the close // listener calling #onChannelClosed might have already run and we are waiting on the latch below unitl we time out. diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index a7a674007ba..3875cea31ac 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty4; +import io.netty.channel.Channel; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -26,6 +27,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; @@ -38,6 +40,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Collections; @@ -49,10 +52,21 @@ import static org.hamcrest.Matchers.containsString; public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, - ClusterSettings clusterSettings) { + ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new Netty4Transport(settings, threadPool, new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { + + @Override + protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, channel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + @Override protected Version getCurrentVersion() { return version; @@ -63,9 +77,9 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase } @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings) { + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build(); - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings); + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); transportService.start(); return transportService; } @@ -92,7 +106,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { - MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings); + MockTransportService transportService = nettyFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); try { transportService.start(); } finally { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 283ae288d29..540420f35e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -29,15 +29,21 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -48,13 +54,15 @@ import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.nio.channels.ClosedChannelException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -70,7 +78,6 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -94,7 +101,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected volatile DiscoveryNode nodeB; protected volatile MockTransportService serviceB; - protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings); + protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake); @Override @Before @@ -149,7 +156,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } private MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings, - Settings settings, boolean acceptRequests) { + Settings settings, boolean acceptRequests, boolean doHandshake) { MockTransportService service = build( Settings.builder() .put(settings) @@ -158,7 +165,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .build(), version, - clusterSettings); + clusterSettings, doHandshake); if (acceptRequests) { service.acceptIncomingRequests(); } @@ -166,7 +173,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } private MockTransportService buildService(final String name, final Version version, ClusterSettings clusterSettings) { - return buildService(name, version, clusterSettings, Settings.EMPTY, true); + return buildService(name, version, clusterSettings, Settings.EMPTY, true, true); } @Override @@ -1463,7 +1470,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testBlockingIncomingRequests() throws Exception { try (TransportService service = buildService("TS_TEST", version0, null, - Settings.builder().put(TcpTransport.CONNECTION_HANDSHAKE.getKey(), false).build(), false)) { + Settings.EMPTY, false, false)) { AtomicBoolean requestProcessed = new AtomicBoolean(false); service.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, (request, channel) -> { @@ -1475,7 +1482,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { new DiscoveryNode("TS_TEST", "TS_TEST", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); serviceA.close(); serviceA = buildService("TS_A", version0, null, - Settings.builder().put(TcpTransport.CONNECTION_HANDSHAKE.getKey(), false).build(), true); + Settings.EMPTY, true, false); serviceA.connectToNode(node); CountDownLatch latch = new CountDownLatch(1); @@ -1583,7 +1590,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .build(), version0, - null); + null, true); DiscoveryNode nodeC = new DiscoveryNode("TS_C", "TS_C", serviceC.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); serviceC.acceptIncomingRequests(); @@ -1786,7 +1793,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { TransportRequestOptions.Type.STATE); // connection with one connection and a large timeout -- should consume the one spot in the backlog queue try (TransportService service = buildService("TS_TPC", Version.CURRENT, null, - Settings.builder().put(TcpTransport.CONNECTION_HANDSHAKE.getKey(), false).build(), true)) { + Settings.EMPTY, true, false)) { service.connectToNode(first, builder.build()); builder.setConnectTimeout(TimeValue.timeValueMillis(1)); final ConnectionProfile profile = builder.build(); @@ -1805,11 +1812,23 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { public void testTcpHandshake() throws IOException, InterruptedException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); - try (TransportService service = buildService("TS_TPC", Version.CURRENT, null, - Settings.builder().put(TcpTransport.CONNECTION_HANDSHAKE.getKey(), false).build(), true)) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + + try (MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Settings.EMPTY, Collections.emptyList())){ + @Override + protected String handleRequest(MockChannel mockChannel, String profileName, StreamInput stream, long requestId, + int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) + throws IOException { + return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress, + (byte)(status & ~(1<<3))); // we flip the isHanshake bit back and ackt like the handler is not found + } + }) { + transport.transportServiceAdapter(serviceA.new Adapter()); + transport.start(); // this acts like a node that doesn't have support for handshakes DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); ConnectTransportException exception = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(node)); assertTrue(exception.getCause() instanceof IllegalStateException); assertEquals("handshake failed", exception.getCause().getMessage()); diff --git a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java index 7ed12d249a5..2cc84c4c0cd 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/MockTcpTransportTests.java @@ -19,22 +19,35 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.transport.MockTransportService; +import java.io.IOException; import java.util.Collections; public class MockTcpTransportTests extends AbstractSimpleTransportTestCase { @Override - protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings) { + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); Transport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version); + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(settings, Collections.emptyList()), version) { + @Override + protected Version executeHandshake(DiscoveryNode node, MockChannel mockChannel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, mockChannel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + }; MockTransportService mockTransportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, clusterSettings); mockTransportService.start(); From 6327e35414c92fe911701b2d7163a330b7370e33 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 19 Dec 2016 09:10:58 +0100 Subject: [PATCH 34/41] Change type of ingest doc meta-data field 'TIMESTAMP' to `Date` (#22234) With this commit we change the data type of the 'TIMESTAMP' meta-data field from a formatted date string to a plain `java.util.Date` instance. The main reason for this change is that our benchmarks have indicated that this contributes significantly to the time spent in the ingest pipeline. The overhead in terms of indexing throughput of the ingest pipeline is about 15% and breaks down roughly as follows: * 5% overhead caused by the conversion from `XContent` -> `Map` * 5% overhead caused by the timestamp formatting * 5% overhead caused by the conversion `Map` -> `XContent` Relates #22074 --- .../elasticsearch/ingest/IngestDocument.java | 10 +++------- .../ingest/IngestDocumentTests.java | 20 +++++++------------ docs/reference/migration/migrate_6_0.asciidoc | 3 +++ .../migration/migrate_6_0/ingest.asciidoc | 6 ++++++ 4 files changed, 19 insertions(+), 20 deletions(-) create mode 100644 docs/reference/migration/migrate_6_0/ingest.asciidoc diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java index edb92b6e837..eaae1a3e881 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -27,18 +27,14 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import java.text.DateFormat; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Date; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.TimeZone; /** * Represents a single document being captured before indexing and holds the source and metadata (like id, type and index). @@ -68,9 +64,7 @@ public final class IngestDocument { } this.ingestMetadata = new HashMap<>(); - DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", Locale.ROOT); - df.setTimeZone(TimeZone.getTimeZone("UTC")); - this.ingestMetadata.put(TIMESTAMP, df.format(new Date())); + this.ingestMetadata.put(TIMESTAMP, new Date()); } /** @@ -595,6 +589,8 @@ public final class IngestDocument { value instanceof Long || value instanceof Float || value instanceof Double || value instanceof Boolean) { return value; + } else if (value instanceof Date) { + return ((Date) value).clone(); } else { throw new IllegalArgumentException("unexpected value type [" + value.getClass() + "]"); } diff --git a/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index e16be95d2e6..1bd5676f474 100644 --- a/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -22,21 +22,17 @@ package org.elasticsearch.ingest; import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.text.DateFormat; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -48,13 +44,14 @@ import static org.hamcrest.Matchers.sameInstance; public class IngestDocumentTests extends ESTestCase { + private static final Date BOGUS_TIMESTAMP = new Date(0L); private IngestDocument ingestDocument; @Before public void setIngestDocument() { Map document = new HashMap<>(); Map ingestMap = new HashMap<>(); - ingestMap.put("timestamp", "bogus_timestamp"); + ingestMap.put("timestamp", BOGUS_TIMESTAMP); document.put("_ingest", ingestMap); document.put("foo", "bar"); document.put("int", 123); @@ -86,9 +83,9 @@ public class IngestDocumentTests extends ESTestCase { assertThat(ingestDocument.getFieldValue("_index", String.class), equalTo("index")); assertThat(ingestDocument.getFieldValue("_type", String.class), equalTo("type")); assertThat(ingestDocument.getFieldValue("_id", String.class), equalTo("id")); - assertThat(ingestDocument.getFieldValue("_ingest.timestamp", String.class), - both(notNullValue()).and(not(equalTo("bogus_timestamp")))); - assertThat(ingestDocument.getFieldValue("_source._ingest.timestamp", String.class), equalTo("bogus_timestamp")); + assertThat(ingestDocument.getFieldValue("_ingest.timestamp", Date.class), + both(notNullValue()).and(not(equalTo(BOGUS_TIMESTAMP)))); + assertThat(ingestDocument.getFieldValue("_source._ingest.timestamp", Date.class), equalTo(BOGUS_TIMESTAMP)); } public void testGetSourceObject() { @@ -972,11 +969,8 @@ public class IngestDocumentTests extends ESTestCase { long before = System.currentTimeMillis(); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); long after = System.currentTimeMillis(); - String timestampString = (String) ingestDocument.getIngestMetadata().get("timestamp"); - assertThat(timestampString, notNullValue()); - assertThat(timestampString, endsWith("+0000")); - DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", Locale.ROOT); - Date timestamp = df.parse(timestampString); + Date timestamp = (Date) ingestDocument.getIngestMetadata().get(IngestDocument.TIMESTAMP); + assertThat(timestamp, notNullValue()); assertThat(timestamp.getTime(), greaterThanOrEqualTo(before)); assertThat(timestamp.getTime(), lessThanOrEqualTo(after)); } diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc index f1712d29cf9..abc476a7d1b 100644 --- a/docs/reference/migration/migrate_6_0.asciidoc +++ b/docs/reference/migration/migrate_6_0.asciidoc @@ -35,6 +35,7 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> include::migrate_6_0/cat.asciidoc[] @@ -57,3 +58,5 @@ include::migrate_6_0/plugins.asciidoc[] include::migrate_6_0/indices.asciidoc[] include::migrate_6_0/scripting.asciidoc[] + +include::migrate_6_0/ingest.asciidoc[] diff --git a/docs/reference/migration/migrate_6_0/ingest.asciidoc b/docs/reference/migration/migrate_6_0/ingest.asciidoc new file mode 100644 index 00000000000..db2caabe43a --- /dev/null +++ b/docs/reference/migration/migrate_6_0/ingest.asciidoc @@ -0,0 +1,6 @@ +[[breaking_60_ingest_changes]] +=== Ingest changes + +==== Timestamp meta-data field type has changed + +The type of the "timestamp" meta-data field has changed from `java.lang.String` to `java.util.Date`. \ No newline at end of file From 3ce7b119d28e7d1be7284bd804606a65c2d1a383 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 19 Dec 2016 09:29:47 +0100 Subject: [PATCH 35/41] Enable strict duplicate checks for all XContent types (#22225) With this commit we enable the Jackson feature 'STRICT_DUPLICATE_DETECTION' by default for all XContent types (not only JSON). We have also changed the name of the system property to disable this feature from `es.json.strict_duplicate_detection` to the now more appropriate name `es.xcontent.strict_duplicate_detection`. Relates elastic/elasticsearch#19614 Relates elastic/elasticsearch#22073 --- .../common/xcontent/XContent.java | 22 +++++++++++++++++ .../common/xcontent/cbor/CborXContent.java | 2 ++ .../common/xcontent/json/JsonXContent.java | 24 +------------------ .../common/xcontent/smile/SmileXContent.java | 2 ++ .../common/xcontent/yaml/YamlXContent.java | 2 ++ .../loader/JsonSettingsLoaderTests.java | 6 ++--- .../loader/YamlSettingsLoaderTests.java | 4 ++++ .../common/xcontent/BaseXContentTestCase.java | 18 ++++++++++++++ .../ConstructingObjectParserTests.java | 4 ++-- .../xcontent/json/JsonXContentTests.java | 10 -------- .../index/query/BoolQueryBuilderTests.java | 5 ++-- .../query/ConstantScoreQueryBuilderTests.java | 5 ++-- .../FunctionScoreQueryBuilderTests.java | 5 ++-- .../aggregations/AggregatorParsingTests.java | 9 +++---- .../phrase/DirectCandidateGeneratorTests.java | 3 ++- .../migration/migrate_6_0/rest.asciidoc | 6 ++--- ...tClientYamlTestFragmentParserTestCase.java | 9 ++++--- .../yaml/parser/DoSectionParserTests.java | 7 ++++++ ...entYamlSuiteRestApiParserFailingTests.java | 9 +++---- 19 files changed, 93 insertions(+), 59 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java index c73f5f19d25..72210f09d9b 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; import java.io.IOException; @@ -33,6 +34,27 @@ import java.util.Set; */ public interface XContent { + /* + * NOTE: This comment is only meant for maintainers of the Elasticsearch code base and is intentionally not a Javadoc comment as it + * describes an undocumented system property. + * + * + * Determines whether the XContent parser will always check for duplicate keys. This behavior is enabled by default but + * can be disabled by setting the otherwise undocumented system property "es.xcontent.strict_duplicate_detection to "false". + * + * Before we've enabled this mode, we had custom duplicate checks in various parts of the code base. As the user can still disable this + * mode and fall back to the legacy duplicate checks, we still need to keep the custom duplicate checks around and we also need to keep + * the tests around. + * + * If this fallback via system property is removed one day in the future you can remove all tests that call this method and also remove + * the corresponding custom duplicate check code. + * + */ + static boolean isStrictDuplicateDetectionEnabled() { + // Don't allow duplicate keys in JSON content by default but let the user opt out + return Booleans.parseBooleanExact(System.getProperty("es.xcontent.strict_duplicate_detection", "true")); + } + /** * The type this content handles and produces. */ diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index 4224b5328a7..d79173cfc2b 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent.cbor; import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; @@ -54,6 +55,7 @@ public class CborXContent implements XContent { cborFactory.configure(CBORFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); + cborFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled()); cborXContent = new CborXContent(); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java index 4657a554099..1b0b351e6ef 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java @@ -23,7 +23,6 @@ import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.xcontent.XContent; @@ -50,27 +49,6 @@ public class JsonXContent implements XContent { public static final JsonXContent jsonXContent; - /* - * NOTE: This comment is only meant for maintainers of the Elasticsearch code base and is intentionally not a Javadoc comment as it - * describes an undocumented system property. - * - * - * Determines whether the JSON parser will always check for duplicate keys in JSON content. This behavior is enabled by default but - * can be disabled by setting the otherwise undocumented system property "es.json.strict_duplicate_detection" to "false". - * - * Before we've enabled this mode, we had custom duplicate checks in various parts of the code base. As the user can still disable this - * mode and fall back to the legacy duplicate checks, we still need to keep the custom duplicate checks around and we also need to keep - * the tests around. - * - * If this fallback via system property is removed one day in the future you can remove all tests that call this method and also remove - * the corresponding custom duplicate check code. - * - */ - public static boolean isStrictDuplicateDetectionEnabled() { - // Don't allow duplicate keys in JSON content by default but let the user opt out - return Booleans.parseBooleanExact(System.getProperty("es.json.strict_duplicate_detection", "true")); - } - static { jsonFactory = new JsonFactory(); jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); @@ -78,7 +56,7 @@ public class JsonXContent implements XContent { jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); - jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, isStrictDuplicateDetectionEnabled()); + jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled()); jsonXContent = new JsonXContent(); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index 94ac9b94356..643326cd82f 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent.smile; import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; import org.elasticsearch.common.bytes.BytesReference; @@ -55,6 +56,7 @@ public class SmileXContent implements XContent { smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); + smileFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled()); smileXContent = new SmileXContent(); } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java index 54da03118d7..7413f05f583 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent.yaml; import com.fasterxml.jackson.core.JsonEncoding; +import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; @@ -50,6 +51,7 @@ public class YamlXContent implements XContent { static { yamlFactory = new YAMLFactory(); + yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled()); yamlXContent = new YamlXContent(); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java index 70439ef8bf9..d917c0d12c0 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.common.settings.loader; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.CoreMatchers.containsString; @@ -49,8 +49,8 @@ public class JsonSettingsLoaderTests extends ESTestCase { } public void testDuplicateKeysThrowsException() { - assumeFalse("Test only makes sense if JSON parser doesn't have strict duplicate checks enabled", - JsonXContent.isStrictDuplicateDetectionEnabled()); + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); final String json = "{\"foo\":\"bar\",\"foo\":\"baz\"}"; final SettingsException e = expectThrows(SettingsException.class, () -> Settings.builder().loadFromSource(json).build()); assertEquals(e.getCause().getClass(), ElasticsearchParseException.class); diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java index 7c956de8f9a..eeb2df7e1d6 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.settings.loader; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.test.ESTestCase; import java.nio.charset.StandardCharsets; @@ -68,6 +69,9 @@ public class YamlSettingsLoaderTests extends ESTestCase { } public void testDuplicateKeysThrowsException() { + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + String yaml = "foo: bar\nfoo: baz"; SettingsException e = expectThrows(SettingsException.class, () -> { Settings.builder().loadFromSource(yaml); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 57e9e0d5216..1c0a92dbd74 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParseException; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -66,6 +67,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; public abstract class BaseXContentTestCase extends ESTestCase { @@ -984,6 +986,22 @@ public abstract class BaseXContentTestCase extends ESTestCase { assertThat(e.getMessage(), containsString("Object has already been built and is self-referencing itself")); } + public void testChecksForDuplicates() throws Exception { + assumeTrue("Test only makes sense if XContent parser has strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + + BytesReference bytes = builder() + .startObject() + .field("key", 1) + .field("key", 2) + .endObject() + .bytes(); + + JsonParseException pex = expectThrows(JsonParseException.class, () -> createParser(xcontentType().xContent(), bytes).map()); + assertThat(pex.getMessage(), startsWith("Duplicate field 'key'")); + } + + private static void expectUnclosedException(ThrowingRunnable runnable) { IllegalStateException e = expectThrows(IllegalStateException.class, runnable); assertThat(e.getMessage(), containsString("Failed to close the XContentBuilder")); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java index 387efdb5e50..6cae59391c5 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java @@ -169,8 +169,8 @@ public class ConstructingObjectParserTests extends ESTestCase { } public void testRepeatedConstructorParam() throws IOException { - assumeFalse("Test only makes sense if JSON parser doesn't have strict duplicate checks enabled", - JsonXContent.isStrictDuplicateDetectionEnabled()); + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); XContentParser parser = createParser(JsonXContent.jsonXContent, "{\n" + " \"vegetable\": 1,\n" diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java index b4549830432..4a79ddb4ec6 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent.json; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonParseException; import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.common.xcontent.XContentType; @@ -40,13 +39,4 @@ public class JsonXContentTests extends BaseXContentTestCase { JsonGenerator generator = new JsonFactory().createGenerator(os); doTestBigInteger(generator, os); } - - public void testChecksForDuplicates() throws Exception { - assumeTrue("Test only makes sense if JSON parser doesn't have strict duplicate checks enabled", - JsonXContent.isStrictDuplicateDetectionEnabled()); - - JsonParseException pex = expectThrows(JsonParseException.class, - () -> XContentType.JSON.xContent().createParser("{ \"key\": 1, \"key\": 2 }").map()); - assertEquals("Duplicate field 'key'", pex.getMessage()); - } } diff --git a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index 8e04ec7f553..13854ffc1e2 100644 --- a/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -340,8 +341,8 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase Date: Mon, 19 Dec 2016 09:54:27 +0100 Subject: [PATCH 36/41] The `_all` default mapper is not completely configured. (#22236) In some cases, it might happen that the `_all` field gets a field type that is not totally configured, and in particular lacks analyzers. This is due to the fact that `AllFieldMapper.TypeParser.getDefault` uses `Defaults.FIELD_TYPE` as a default field type, which does not have any analyzers configured since it does not know about the default analyzers. --- .../index/mapper/AllFieldMapper.java | 15 +++++++++++---- .../index/mapper/DocumentMapper.java | 3 ++- .../index/mapper/FieldNamesFieldMapper.java | 15 +++++++++++---- .../elasticsearch/index/mapper/IdFieldMapper.java | 4 +++- .../index/mapper/IndexFieldMapper.java | 4 ++-- .../index/mapper/MetadataFieldMapper.java | 5 ++--- .../index/mapper/ParentFieldMapper.java | 5 +++-- .../index/mapper/RoutingFieldMapper.java | 15 +++++++++++---- .../index/mapper/SeqNoFieldMapper.java | 4 +++- .../index/mapper/SourceFieldMapper.java | 6 +++--- .../index/mapper/TypeFieldMapper.java | 5 +++-- .../index/mapper/UidFieldMapper.java | 3 ++- .../index/mapper/VersionFieldMapper.java | 3 ++- .../index/mapper/AllFieldMapperTests.java | 11 +++++++++++ .../index/mapper/ExternalMetadataMapper.java | 4 +++- .../index/mapper/FieldNamesFieldMapperTests.java | 3 ++- .../elasticsearch/indices/IndicesModuleTests.java | 4 +--- .../index/mapper/size/SizeFieldMapper.java | 3 ++- 18 files changed, 77 insertions(+), 35 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java index 49b8fb085d6..a0f7ad57d04 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -100,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, + public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); @@ -141,8 +142,14 @@ public class AllFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { - return new AllFieldMapper(indexSettings, fieldType); + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new AllFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } } } @@ -179,7 +186,7 @@ public class AllFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabledState; private AllFieldMapper(Settings indexSettings, MappedFieldType existing) { - this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), Defaults.ENABLED, indexSettings); + this(existing.clone(), Defaults.ENABLED, indexSettings); } private AllFieldMapper(MappedFieldType fieldType, EnabledAttributeMapper enabled, Settings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index fbe82a70bd7..fdc45530b99 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -74,7 +74,8 @@ public class DocumentMapper implements ToXContent { final MetadataFieldMapper metadataMapper; if (existingMetadataMapper == null) { final TypeParser parser = entry.getValue(); - metadataMapper = parser.getDefault(indexSettings, mapperService.fullName(name), builder.name()); + metadataMapper = parser.getDefault(mapperService.fullName(name), + mapperService.documentMapperParser().parserContext(builder.name())); } else { metadataMapper = existingMetadataMapper; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 1b4a97a4660..764586562d2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -98,7 +99,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { @@ -114,8 +115,14 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { - return new FieldNamesFieldMapper(indexSettings, fieldType); + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new FieldNamesFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } } } @@ -183,7 +190,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { } private FieldNamesFieldMapper(Settings indexSettings, MappedFieldType existing) { - this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), indexSettings); + this(existing.clone(), indexSettings); } private FieldNamesFieldMapper(MappedFieldType fieldType, Settings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index 1b208421a8e..4f74278014b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -79,7 +80,8 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new IdFieldMapper(indexSettings, fieldType); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 72a7244976d..164f18075a4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; @@ -85,7 +84,8 @@ public class IndexFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new IndexFieldMapper(indexSettings, fieldType); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 07a4b3b9a51..ec84631e041 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -39,14 +39,13 @@ public abstract class MetadataFieldMapper extends FieldMapper { * Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed. * @param fieldType null if this is the first root mapper on this index, the existing * fieldType for this index otherwise - * @param indexSettings the index-level settings * @param fieldType the existing field type for this meta mapper on the current index * or null if this is the first type being introduced - * @param typeName the name of the type that this mapper will be used on + * @param parserContext context that may be useful to build the field like analyzers */ // TODO: remove the fieldType parameter which is only used for bw compat with pre-2.0 // since settings could be modified - MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName); + MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext parserContext); } public abstract static class Builder extends FieldMapper.Builder { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java index 677da0e5f4e..d3f9eb22b72 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; @@ -131,7 +130,9 @@ public class ParentFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + final String typeName = context.type(); KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0))); MappedFieldType childJoinFieldType = new ParentFieldType(Defaults.FIELD_TYPE, typeName); childJoinFieldType.setName(ParentFieldMapper.NAME); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index fe2575b9345..8640bfaa35b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -78,7 +79,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); @@ -93,8 +94,14 @@ public class RoutingFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { - return new RoutingFieldMapper(indexSettings, fieldType); + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new RoutingFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } } } @@ -121,7 +128,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { private boolean required; private RoutingFieldMapper(Settings indexSettings, MappedFieldType existing) { - this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), Defaults.REQUIRED, indexSettings); + this(existing.clone(), Defaults.REQUIRED, indexSettings); } private RoutingFieldMapper(MappedFieldType fieldType, boolean required, Settings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 5820519af7f..e38a2aab840 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; @@ -136,7 +137,8 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new SeqNoFieldMapper(indexSettings); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index b52d1262796..efddec90669 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -109,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { @@ -144,7 +143,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new SourceFieldMapper(indexSettings); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index 551208c797e..67775283d5a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -76,12 +76,13 @@ public class TypeFieldMapper extends MetadataFieldMapper { public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { throw new MapperParsingException(NAME + " is not configurable"); } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new TypeFieldMapper(indexSettings, fieldType); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java index c0515b18bcc..bb98f44ed1c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -69,7 +69,8 @@ public class UidFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new UidFieldMapper(indexSettings, fieldType); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index fb686d7781b..ce3b2c4b8e8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -62,7 +62,8 @@ public class VersionFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new VersionFieldMapper(indexSettings); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 091aa6003c9..d7def1db8cf 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.all.AllTermQuery; import org.elasticsearch.common.lucene.all.AllTokenStream; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -476,4 +477,14 @@ public class AllFieldMapperTests extends ESSingleNodeTestCase { e.getMessage().contains("Field [_all] is a metadata field and cannot be added inside a document")); } } + + public void testAllDefaults() { + // We use to have a bug with the default mapping having null analyzers because + // it was not fully constructed and was in particular lacking analyzers + IndexService index = createIndex("index", Settings.EMPTY, "type"); + AllFieldMapper all = index.mapperService().documentMapper("type").allFieldMapper(); + assertNotNull(all.fieldType().indexAnalyzer()); + assertNotNull(all.fieldType().searchAnalyzer()); + assertNotNull(all.fieldType().searchQuoteAnalyzer()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java index 234e4fa312b..9f815a7fdea 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalMetadataMapper.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext; import java.io.IOException; import java.util.Collections; @@ -104,7 +105,8 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new ExternalMetadataMapper(indexSettings); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 8292970d38c..7de66511f59 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -166,7 +166,8 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new DummyMetadataFieldMapper(indexSettings); } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 04b60b80c0c..298bb57c499 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -25,8 +25,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -56,7 +54,7 @@ public class IndicesModuleTests extends ESTestCase { return null; } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { return null; } } diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index a12d5be1fde..c2a7e79ff2b 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -111,7 +111,8 @@ public class SizeFieldMapper extends MetadataFieldMapper { } @Override - public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) { + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); return new SizeFieldMapper(indexSettings, fieldType); } } From 1ed2e18ded46a4743e43c1b70b18ca359422e3a1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 19 Dec 2016 09:55:13 +0100 Subject: [PATCH 37/41] Fix MapperService.allEnabled(). (#22227) It returns whether the last merged mapping has `_all` enabled rather than whether any of the types has `_all` enabled. --- .../index/mapper/MapperService.java | 6 +++-- .../index/mapper/MapperServiceTests.java | 27 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index d848ce15331..1e3f96fbe2c 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -153,7 +153,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } /** - * Returns true if the "_all" field is enabled for the type + * Returns true if the "_all" field is enabled on any type. */ public boolean allEnabled() { return this.allEnabled; @@ -377,7 +377,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable { this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; this.parentTypes = parentTypes; - this.allEnabled = mapper.allFieldMapper().enabled(); + // this is only correct because types cannot be removed and we do not + // allow to disable an existing _all field + this.allEnabled |= mapper.allFieldMapper().enabled(); assert assertSerialization(newMapper); assert assertMappersShareSameFieldType(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 87afdedf89d..b32339b2357 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -215,4 +215,31 @@ public class MapperServiceTests extends ESSingleNodeTestCase { indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE, true); assertNotSame(indexService.mapperService().documentMapper("type1"), documentMapper); } + + public void testAllEnabled() throws Exception { + IndexService indexService = createIndex("test"); + assertFalse(indexService.mapperService().allEnabled()); + + CompressedXContent enabledAll = new CompressedXContent(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", true) + .endObject().endObject().bytes()); + + CompressedXContent disabledAll = new CompressedXContent(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", false) + .endObject().endObject().bytes()); + + indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, enabledAll, + MergeReason.MAPPING_UPDATE, random().nextBoolean()); + assertFalse(indexService.mapperService().allEnabled()); // _default_ does not count + + indexService.mapperService().merge("some_type", enabledAll, + MergeReason.MAPPING_UPDATE, random().nextBoolean()); + assertTrue(indexService.mapperService().allEnabled()); + + indexService.mapperService().merge("other_type", disabledAll, + MergeReason.MAPPING_UPDATE, random().nextBoolean()); + assertTrue(indexService.mapperService().allEnabled()); // this returns true if any of the types has _all enabled + } } From e38f06cdc658efb74bb0e004047a67b96870a34c Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 19 Dec 2016 10:02:11 +0100 Subject: [PATCH 38/41] Update Gradle shadow plugin for microbenchmarks to 1.2.4 --- benchmarks/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 3b8b92328e1..7546e6d7e1c 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -24,7 +24,7 @@ buildscript { } } dependencies { - classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.3' + classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.4' } } From b2aaeb56f3d00c934cf85cab27ef649350c6e8ad Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 19 Dec 2016 10:02:28 +0100 Subject: [PATCH 39/41] Update JMH to 1.17.3 --- benchmarks/build.gradle | 5 ++--- buildSrc/version.properties | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 7546e6d7e1c..36732215d43 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -44,9 +44,8 @@ task test(type: Test, overwrite: true) dependencies { compile("org.elasticsearch:elasticsearch:${version}") { - // JMH ships with the conflicting version 4.6 (JMH will not update this dependency as it is Java 6 compatible and joptsimple is one - // of the most recent compatible version). This prevents us from using jopt-simple in benchmarks (which should be ok) but allows us - // to invoke the JMH uberjar as usual. + // JMH ships with the conflicting version 4.6. This prevents us from using jopt-simple in benchmarks (which should be ok) but allows + // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } compile "org.openjdk.jmh:jmh-core:$versions.jmh" diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 146aafceb7f..44835f7227c 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,4 +21,4 @@ commonscodec = 1.10 hamcrest = 1.3 securemock = 1.2 # benchmark dependencies -jmh = 1.15 +jmh = 1.17.3 From 655a95a2bb8ddd13989f88c443297c6574ea6400 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 19 Dec 2016 10:06:12 +0100 Subject: [PATCH 40/41] Cache results of geoip lookups (#22231) With this commit, we introduce a cache to the geoip ingest processor. The cache is enabled by default and caches the 1000 most recent items. The cache size is controlled by the setting `ingest.geoip.cache_size`. Closes #22074 --- docs/plugins/ingest-geoip.asciidoc | 11 ++++ .../ingest/geoip/GeoIpCache.java | 46 +++++++++++++++++ .../ingest/geoip/IngestGeoIpPlugin.java | 27 ++++++++-- .../ingest/geoip/GeoIpCacheTests.java | 51 +++++++++++++++++++ .../geoip/GeoIpProcessorFactoryTests.java | 6 ++- 5 files changed, 137 insertions(+), 4 deletions(-) create mode 100644 plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java create mode 100644 plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc index 0481ad40ab6..95e7a0442a4 100644 --- a/docs/plugins/ingest-geoip.asciidoc +++ b/docs/plugins/ingest-geoip.asciidoc @@ -203,3 +203,14 @@ Which returns: } -------------------------------------------------- // TESTRESPONSE + +[[ingest-geoip-settings]] +===== Node Settings + +The geoip processor supports the following setting: + +`ingest.geoip.cache_size`:: + + The maximum number of results that should be cached. Defaults to `1000`. + +Note that these settings are node settings and apply to all geoip processors, i.e. there is one cache for all defined geoip processors. \ No newline at end of file diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java new file mode 100644 index 00000000000..83a3374b504 --- /dev/null +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.ingest.geoip; + +import com.fasterxml.jackson.databind.JsonNode; +import com.maxmind.db.NodeCache; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; + +final class GeoIpCache implements NodeCache { + private final Cache cache; + + GeoIpCache(long maxSize) { + this.cache = CacheBuilder.builder().setMaximumWeight(maxSize).build(); + } + + @Override + public JsonNode get(int key, Loader loader) throws IOException { + try { + return cache.computeIfAbsent(key, loader::load); + } catch (ExecutionException e) { + Throwable cause = e.getCause() != null ? e.getCause() : e; + throw new ElasticsearchException(cause); + } + } +} diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 6d5af71aa5b..4e5cc5c0237 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -26,38 +26,57 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.PathMatcher; import java.nio.file.StandardOpenOption; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.stream.Stream; import java.util.zip.GZIPInputStream; +import com.maxmind.db.NoCache; +import com.maxmind.db.NodeCache; import com.maxmind.geoip2.DatabaseReader; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable { + public static final Setting CACHE_SIZE = + Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); private Map databaseReaders; + @Override + public List> getSettings() { + return Arrays.asList(CACHE_SIZE); + } + @Override public Map getProcessors(Processor.Parameters parameters) { if (databaseReaders != null) { throw new IllegalStateException("getProcessors called twice for geoip plugin!!"); } Path geoIpConfigDirectory = parameters.env.configFile().resolve("ingest-geoip"); + NodeCache cache; + long cacheSize = CACHE_SIZE.get(parameters.env.settings()); + if (cacheSize > 0) { + cache = new GeoIpCache(cacheSize); + } else { + cache = NoCache.getInstance(); + } try { - databaseReaders = loadDatabaseReaders(geoIpConfigDirectory); + databaseReaders = loadDatabaseReaders(geoIpConfigDirectory, cache); } catch (IOException e) { throw new RuntimeException(e); } return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders)); } - static Map loadDatabaseReaders(Path geoIpConfigDirectory) throws IOException { + static Map loadDatabaseReaders(Path geoIpConfigDirectory, NodeCache cache) throws IOException { if (Files.exists(geoIpConfigDirectory) == false && Files.isDirectory(geoIpConfigDirectory)) { throw new IllegalStateException("the geoip directory [" + geoIpConfigDirectory + "] containing databases doesn't exist"); } @@ -71,7 +90,8 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable Path databasePath = iterator.next(); if (Files.isRegularFile(databasePath) && pathMatcher.matches(databasePath)) { try (InputStream inputStream = new GZIPInputStream(Files.newInputStream(databasePath, StandardOpenOption.READ))) { - databaseReaders.put(databasePath.getFileName().toString(), new DatabaseReader.Builder(inputStream).build()); + databaseReaders.put(databasePath.getFileName().toString(), + new DatabaseReader.Builder(inputStream).withCache(cache).build()); } } } @@ -85,4 +105,5 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, Closeable IOUtils.close(databaseReaders.values()); } } + } diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java new file mode 100644 index 00000000000..71cab99115f --- /dev/null +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.ingest.geoip; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.IntNode; +import com.maxmind.db.NodeCache; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.test.ESTestCase; + +public class GeoIpCacheTests extends ESTestCase { + public void testCachesAndEvictsResults() throws Exception { + GeoIpCache cache = new GeoIpCache(1); + final NodeCache.Loader loader = key -> new IntNode(key); + + JsonNode jsonNode1 = cache.get(1, loader); + assertSame(jsonNode1, cache.get(1, loader)); + + // evict old key by adding another value + cache.get(2, loader); + + assertNotSame(jsonNode1, cache.get(1, loader)); + } + + public void testThrowsElasticsearchException() throws Exception { + GeoIpCache cache = new GeoIpCache(1); + NodeCache.Loader loader = (int key) -> { + throw new IllegalArgumentException("Illegal key"); + }; + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> cache.get(1, loader)); + assertTrue("Expected cause to be of type IllegalArgumentException but was [" + ex.getCause().getClass() + "]", + ex.getCause() instanceof IllegalArgumentException); + assertEquals("Illegal key", ex.getCause().getMessage()); + } +} diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index ec4db09cd96..162137b5f3c 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.ingest.geoip; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.maxmind.db.NoCache; +import com.maxmind.db.NodeCache; import com.maxmind.geoip2.DatabaseReader; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Randomness; @@ -57,7 +59,9 @@ public class GeoIpProcessorFactoryTests extends ESTestCase { geoIpConfigDir.resolve("GeoLite2-City.mmdb.gz")); Files.copy(new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb.gz")), geoIpConfigDir.resolve("GeoLite2-Country.mmdb.gz")); - databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir); + + NodeCache cache = randomFrom(NoCache.getInstance(), new GeoIpCache(randomPositiveLong())); + databaseReaders = IngestGeoIpPlugin.loadDatabaseReaders(geoIpConfigDir, cache); } @AfterClass From f96769f97b6caa6d394f04805062cf4273ad5747 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 19 Dec 2016 10:08:58 +0100 Subject: [PATCH 41/41] Update painless-syntax.asciidoc Fix asciidoc syntax --- docs/reference/modules/scripting/painless-syntax.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc index 10d6d501412..e3a6ed24bc0 100644 --- a/docs/reference/modules/scripting/painless-syntax.asciidoc +++ b/docs/reference/modules/scripting/painless-syntax.asciidoc @@ -185,9 +185,9 @@ doesn't have a `foo.keyword` field but is the length of that field if it does. Lastly, `?:` is lazy so the right hand side is not evaluated at all if the left hand side isn't null. -NOTE: Unlike Groovy, Painless' `?:` operator only coalesces `null`, not `false` +NOTE: Unlike Groovy, Painless' ++?:++ operator only coalesces `null`, not `false` or http://groovy-lang.org/semantics.html#Groovy-Truth[falsy] values. Strictly -speaking Painless' `?:` is more like Kotlin's `?:` than Groovy's `?:`. +speaking Painless' ++?:++ is more like Kotlin's ++?:++ than Groovy's ++?:++. NOTE: The result of `?.` and `?:` can't be assigned to primitives. So `int[] someArray = null; int l = someArray?.length` and