From 9b710e8fdd9ceb81532cffbd022a682259715bf9 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 4 Oct 2016 23:24:29 +0200 Subject: [PATCH 01/13] Make getter for bulk shard requests items visible (#20743) --- .../java/org/elasticsearch/action/bulk/BulkShardRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index ffc2407b8a4..b9d7f876dc1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -45,7 +45,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest { setRefreshPolicy(refreshPolicy); } - BulkItemRequest[] items() { + public BulkItemRequest[] items() { return items; } From 7d1e3377b862aebb5b3e5a7253de222d3da3f694 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 4 Oct 2016 19:47:47 -0400 Subject: [PATCH 02/13] Clarify wording for the strict REST params message This commit changes the strict REST parameters message to say that unconsumed parameters are unrecognized rather than unused. Additionally, the test is beefed up to include two unused parameters. Relates #20745 --- .../java/org/elasticsearch/rest/BaseRestHandler.java | 8 +++++++- .../org/elasticsearch/rest/BaseRestHandlerTests.java | 11 +++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 10f5359803b..fd5ff846529 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -30,6 +30,7 @@ import org.elasticsearch.plugins.ActionPlugin; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Set; import java.util.stream.Collectors; @@ -63,7 +64,12 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH // validate the non-response params if (!unconsumedParams.isEmpty()) { - throw new IllegalArgumentException("request [" + request.path() + "] contains unused params: " + unconsumedParams.toString()); + final String message = String.format( + Locale.ROOT, + "request [%s] contains unrecognized parameters: %s", + request.path(), + unconsumedParams.toString()); + throw new IllegalArgumentException(message); } // execute the action diff --git a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 6019ab8aacf..fc5af192b74 100644 --- a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -33,6 +33,7 @@ import java.util.HashMap; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.core.AnyOf.anyOf; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; import static org.mockito.Mockito.mock; @@ -51,12 +52,18 @@ public class BaseRestHandlerTests extends ESTestCase { final HashMap params = new HashMap<>(); params.put("consumed", randomAsciiOfLength(8)); - params.put("unconsumed", randomAsciiOfLength(8)); + params.put("unconsumed-first", randomAsciiOfLength(8)); + params.put("unconsumed-second", randomAsciiOfLength(8)); RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); - assertThat(e, hasToString(containsString("request [/] contains unused params: [unconsumed]"))); + assertThat( + e, + // we can not rely on ordering of the unconsumed parameters here + anyOf( + hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first, unconsumed-second]")), + hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-second, unconsumed-first]")))); assertFalse(executed.get()); } From 9a83ded55399c9e5af9de93f52125984e7e9c7cd Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 4 Oct 2016 20:45:07 -0400 Subject: [PATCH 03/13] Add did you mean to strict REST params This commit adds a did you mean feature to the strict REST params error message. This works by comparing any unconsumed parameters to all of the consumer parameters, comparing the Levenstein distance between those parameters, and taking any consumed parameters that are close to an unconsumed parameter as candiates for the did you mean. * Fix pluralization in strict REST params message This commit fixes the pluralization in the strict REST parameters error message so that the word "parameter" is not unconditionally written as "parameters" even when there is only one unrecognized parameter. * Strength strict REST params did you mean test This commit adds an unconsumed parameter that is too far from every consumed parameter to have any candidate suggestions. Relates #20747 --- .../elasticsearch/rest/BaseRestHandler.java | 44 +++++++++++-- .../org/elasticsearch/rest/RestRequest.java | 11 +++- .../admin/indices/RestAnalyzeAction.java | 5 +- .../rest/BaseRestHandlerTests.java | 62 +++++++++++++++++-- 4 files changed, 108 insertions(+), 14 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index fd5ff846529..3bc91dc48e2 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,8 +19,11 @@ package org.elasticsearch.rest; +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -28,10 +31,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.stream.Collectors; /** @@ -59,16 +65,44 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH final RestChannelConsumer action = prepareRequest(request, client); // validate unconsumed params, but we must exclude params used to format the response - final List unconsumedParams = - request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toList()); + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = + request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new)); // validate the non-response params if (!unconsumedParams.isEmpty()) { - final String message = String.format( + String message = String.format( Locale.ROOT, - "request [%s] contains unrecognized parameters: %s", + "request [%s] contains unrecognized parameter%s: ", request.path(), - unconsumedParams.toString()); + unconsumedParams.size() > 1 ? "s" : ""); + boolean first = true; + for (final String unconsumedParam : unconsumedParams) { + final LevensteinDistance ld = new LevensteinDistance(); + final List> scoredParams = new ArrayList<>(); + for (String consumedParam : request.consumedParams()) { + final float distance = ld.getDistance(unconsumedParam, consumedParam); + if (distance > 0.5f) { + scoredParams.add(new Tuple<>(distance, consumedParam)); + } + } + CollectionUtil.timSort(scoredParams, (a, b) -> { + // sort by distance in reverse order, then parameter name for equal distances + int compare = a.v1().compareTo(b.v1()); + if (compare != 0) return -compare; + else return a.v2().compareTo(b.v2()); + }); + if (first == false) { + message += ", "; + } + message += "[" + unconsumedParam + "]"; + final List keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList()); + if (keys.isEmpty() == false) { + message += " -> did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?"; + } + first = false; + } + throw new IllegalArgumentException(message); } diff --git a/core/src/main/java/org/elasticsearch/rest/RestRequest.java b/core/src/main/java/org/elasticsearch/rest/RestRequest.java index 5960fc8979e..3f0f32fff37 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/core/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import java.net.SocketAddress; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -129,6 +128,16 @@ public abstract class RestRequest implements ToXContent.Params { return params; } + /** + * Returns a list of parameters that have been consumed. This method returns a copy, callers + * are free to modify the returned list. + * + * @return the list of currently consumed parameters. + */ + List consumedParams() { + return consumedParams.stream().collect(Collectors.toList()); + } + /** * Returns a list of parameters that have not yet been consumed. This method returns a copy, * callers are free to modify the returned list. diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index 4b3035e08b5..247df1a380e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -73,8 +73,9 @@ public class RestAnalyzeAction extends BaseRestHandler { analyzeRequest.text(texts); analyzeRequest.analyzer(request.param("analyzer")); analyzeRequest.field(request.param("field")); - if (request.hasParam("tokenizer")) { - analyzeRequest.tokenizer(request.param("tokenizer")); + final String tokenizer = request.param("tokenizer"); + if (tokenizer != null) { + analyzeRequest.tokenizer(tokenizer); } for (String filter : request.paramAsStringArray("filter", Strings.EMPTY_ARRAY)) { analyzeRequest.addTokenFilter(filter); diff --git a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index fc5af192b74..8f9dd445369 100644 --- a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -33,14 +33,34 @@ import java.util.HashMap; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import static org.hamcrest.core.AnyOf.anyOf; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; import static org.mockito.Mockito.mock; public class BaseRestHandlerTests extends ESTestCase { - public void testUnconsumedParameters() throws Exception { + public void testOneUnconsumedParameters() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + return channel -> executed.set(true); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("unconsumed", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); + assertThat(e, hasToString(containsString("request [/] contains unrecognized parameter: [unconsumed]"))); + assertFalse(executed.get()); + } + + public void testMultipleUnconsumedParameters() throws Exception { final AtomicBoolean executed = new AtomicBoolean(); BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { @Override @@ -56,14 +76,44 @@ public class BaseRestHandlerTests extends ESTestCase { params.put("unconsumed-second", randomAsciiOfLength(8)); RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); + assertThat(e, hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first], [unconsumed-second]"))); + assertFalse(executed.get()); + } + + public void testUnconsumedParametersDidYouMean() throws Exception { + final AtomicBoolean executed = new AtomicBoolean(); + BaseRestHandler handler = new BaseRestHandler(Settings.EMPTY) { + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + request.param("consumed"); + request.param("field"); + request.param("tokenizer"); + request.param("very_close_to_parameter_1"); + request.param("very_close_to_parameter_2"); + return channel -> executed.set(true); + } + }; + + final HashMap params = new HashMap<>(); + params.put("consumed", randomAsciiOfLength(8)); + params.put("flied", randomAsciiOfLength(8)); + params.put("tokenzier", randomAsciiOfLength(8)); + params.put("very_close_to_parametre", randomAsciiOfLength(8)); + params.put("very_far_from_every_consumed_parameter", randomAsciiOfLength(8)); + RestRequest request = new FakeRestRequest.Builder().withParams(params).build(); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mock(NodeClient.class))); assertThat( e, - // we can not rely on ordering of the unconsumed parameters here - anyOf( - hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first, unconsumed-second]")), - hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-second, unconsumed-first]")))); + hasToString(containsString( + "request [/] contains unrecognized parameters: " + + "[flied] -> did you mean [field]?, " + + "[tokenzier] -> did you mean [tokenizer]?, " + + "[very_close_to_parametre] -> did you mean any of [very_close_to_parameter_1, very_close_to_parameter_2]?, " + + "[very_far_from_every_consumed_parameter]"))); assertFalse(executed.get()); } From 6a5630f901985cb669b3d5b33467202505546825 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 30 Sep 2016 15:24:26 +0200 Subject: [PATCH 04/13] ingest: Upgrade geoip2 dependency Closes #20563 --- plugins/ingest-geoip/build.gradle | 31 +++++++++++-------- .../licenses/geoip2-2.7.0.jar.sha1 | 1 - .../licenses/geoip2-2.8.0.jar.sha1 | 1 + .../jackson-annotations-2.7.1.jar.sha1 | 1 - .../jackson-annotations-2.8.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.7.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.8.2.jar.sha1 | 1 + 7 files changed, 21 insertions(+), 16 deletions(-) delete mode 100644 plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 create mode 100644 plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 delete mode 100644 plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 create mode 100644 plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 delete mode 100644 plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 create mode 100644 plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 diff --git a/plugins/ingest-geoip/build.gradle b/plugins/ingest-geoip/build.gradle index d9e90a61d40..e74a27844db 100644 --- a/plugins/ingest-geoip/build.gradle +++ b/plugins/ingest-geoip/build.gradle @@ -23,10 +23,10 @@ esplugin { } dependencies { - compile ('com.maxmind.geoip2:geoip2:2.7.0') + compile ('com.maxmind.geoip2:geoip2:2.8.0') // geoip2 dependencies: - compile('com.fasterxml.jackson.core:jackson-annotations:2.7.1') - compile('com.fasterxml.jackson.core:jackson-databind:2.7.1') + compile('com.fasterxml.jackson.core:jackson-annotations:2.8.2') + compile('com.fasterxml.jackson.core:jackson-databind:2.8.2') compile('com.maxmind.db:maxmind-db:1.2.1') testCompile 'org.elasticsearch:geolite2-databases:20160608' @@ -50,14 +50,19 @@ bundlePlugin { } thirdPartyAudit.excludes = [ - // geoip WebServiceClient needs Google http client, but we're not using WebServiceClient: - 'com.google.api.client.http.HttpTransport', - 'com.google.api.client.http.GenericUrl', - 'com.google.api.client.http.HttpResponse', - 'com.google.api.client.http.HttpRequestFactory', - 'com.google.api.client.http.HttpRequest', - 'com.google.api.client.http.HttpHeaders', - 'com.google.api.client.http.HttpResponseException', - 'com.google.api.client.http.javanet.NetHttpTransport', - 'com.google.api.client.http.javanet.NetHttpTransport', + // geoip WebServiceClient needs apache http client, but we're not using WebServiceClient: + 'org.apache.http.HttpEntity', + 'org.apache.http.HttpHost', + 'org.apache.http.HttpResponse', + 'org.apache.http.StatusLine', + 'org.apache.http.auth.UsernamePasswordCredentials', + 'org.apache.http.client.config.RequestConfig$Builder', + 'org.apache.http.client.config.RequestConfig', + 'org.apache.http.client.methods.CloseableHttpResponse', + 'org.apache.http.client.methods.HttpGet', + 'org.apache.http.client.utils.URIBuilder', + 'org.apache.http.impl.auth.BasicScheme', + 'org.apache.http.impl.client.CloseableHttpClient', + 'org.apache.http.impl.client.HttpClientBuilder', + 'org.apache.http.util.EntityUtils' ] diff --git a/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 b/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 deleted file mode 100644 index 2015e311d60..00000000000 --- a/plugins/ingest-geoip/licenses/geoip2-2.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2010d922191f5801939b462a5703ab79a7829626 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 b/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 new file mode 100644 index 00000000000..c6036686601 --- /dev/null +++ b/plugins/ingest-geoip/licenses/geoip2-2.8.0.jar.sha1 @@ -0,0 +1 @@ +46226778ec32b776e80f282c5bf65b88d36cc0a0 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 deleted file mode 100644 index 69b45742d84..00000000000 --- a/plugins/ingest-geoip/licenses/jackson-annotations-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b93f301823b79033fcbe873779b3d84f9730fc1 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 new file mode 100644 index 00000000000..c3b701dbb86 --- /dev/null +++ b/plugins/ingest-geoip/licenses/jackson-annotations-2.8.2.jar.sha1 @@ -0,0 +1 @@ +a38d544583e90cf163b2e45e4a57f5c54de670d3 \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 deleted file mode 100644 index d9b4ca6a79b..00000000000 --- a/plugins/ingest-geoip/licenses/jackson-databind-2.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14d88822bca655de7aa6ed3e4c498d115505710a \ No newline at end of file diff --git a/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 b/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 new file mode 100644 index 00000000000..7e90b5f8e97 --- /dev/null +++ b/plugins/ingest-geoip/licenses/jackson-databind-2.8.2.jar.sha1 @@ -0,0 +1 @@ +1f12816593c1422be957471c98a80bfbace60fa2 \ No newline at end of file From e168b3b66b38f35987b9f362a4f084d8f0cba638 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 4 Oct 2016 19:53:28 +0100 Subject: [PATCH 05/13] Fix date_range aggregation to not cache if now is used Before this change the processing of the ranges in the date range (and other range type) aggregations was done when the Aggregator was created. This meant that the SearchContext did not know that now had been used in a range until after the decision to cache was made. This change moves the processing of the ranges to the aggregation builders so that the search context is made aware that now has been used before it decides if the request should be cached --- .../range/AbstractRangeAggregatorFactory.java | 6 +-- .../bucket/range/AbstractRangeBuilder.java | 38 +++++++++++++++++ .../bucket/range/RangeAggregationBuilder.java | 2 + .../bucket/range/RangeAggregator.java | 41 +++---------------- .../bucket/range/RangeAggregatorFactory.java | 3 +- .../date/DateRangeAggregationBuilder.java | 3 ++ .../date/DateRangeAggregatorFactory.java | 3 +- .../GeoDistanceAggregationBuilder.java | 1 + .../GeoDistanceRangeAggregatorFactory.java | 6 +-- .../indices/IndicesRequestCacheIT.java | 30 ++++++++++++-- 10 files changed, 85 insertions(+), 48 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index f4103d87fbd..f67bec631bc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -40,10 +40,10 @@ public class AbstractRangeAggregatorFactory { private final InternalRange.Factory rangeFactory; - private final List ranges; + private final R[] ranges; private final boolean keyed; - public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public AbstractRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, R[] ranges, boolean keyed, InternalRange.Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, context, parent, subFactoriesBuilder, metaData); @@ -55,7 +55,7 @@ public class AbstractRangeAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); + return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index 13d10bd0a0c..0692b0ed304 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -19,13 +19,17 @@ package org.elasticsearch.search.aggregations.bucket.range; +import org.apache.lucene.util.InPlaceMergeSorter; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; +import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import java.io.IOException; import java.util.ArrayList; @@ -55,6 +59,40 @@ public abstract class AbstractRangeBuilder config) { + Range[] ranges = new Range[this.ranges.size()]; + for (int i = 0; i < ranges.length; i++) { + ranges[i] = this.ranges.get(i).process(config.format(), context.searchContext()); + } + sortRanges(ranges); + return ranges; + } + + private static void sortRanges(final Range[] ranges) { + new InPlaceMergeSorter() { + + @Override + protected void swap(int i, int j) { + final Range tmp = ranges[i]; + ranges[i] = ranges[j]; + ranges[j] = tmp; + } + + @Override + protected int compare(int i, int j) { + int cmp = Double.compare(ranges[i].from, ranges[j].from); + if (cmp == 0) { + cmp = Double.compare(ranges[i].to, ranges[j].to); + } + return cmp; + } + }.sort(0, ranges.length); + } + @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(ranges.size()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index c815ae9d3cf..73a4d86819a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -114,6 +114,8 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + // We need to call processRanges here so they are parsed before we make the decision of whether to cache the request + Range[] ranges = processRanges(context, config); return new RangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index c83e2d2c721..27d826a644a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.InPlaceMergeSorter; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; @@ -210,7 +209,7 @@ public class RangeAggregator extends BucketsAggregator { final double[] maxTo; public RangeAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, DocValueFormat format, - InternalRange.Factory rangeFactory, List ranges, boolean keyed, AggregationContext aggregationContext, + InternalRange.Factory rangeFactory, Range[] ranges, boolean keyed, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); @@ -220,11 +219,7 @@ public class RangeAggregator extends BucketsAggregator { this.keyed = keyed; this.rangeFactory = rangeFactory; - this.ranges = new Range[ranges.size()]; - for (int i = 0; i < this.ranges.length; i++) { - this.ranges[i] = ranges.get(i).process(format, context.searchContext()); - } - sortRanges(this.ranges); + this.ranges = ranges; maxTo = new double[this.ranges.length]; maxTo[0] = this.ranges[0].to; @@ -337,45 +332,21 @@ public class RangeAggregator extends BucketsAggregator { return rangeFactory.create(name, buckets, format, keyed, pipelineAggregators(), metaData()); } - private static void sortRanges(final Range[] ranges) { - new InPlaceMergeSorter() { - - @Override - protected void swap(int i, int j) { - final Range tmp = ranges[i]; - ranges[i] = ranges[j]; - ranges[j] = tmp; - } - - @Override - protected int compare(int i, int j) { - int cmp = Double.compare(ranges[i].from, ranges[j].from); - if (cmp == 0) { - cmp = Double.compare(ranges[i].to, ranges[j].to); - } - return cmp; - } - }.sort(0, ranges.length); - } - public static class Unmapped extends NonCollectingAggregator { - private final List ranges; + private final R[] ranges; private final boolean keyed; private final InternalRange.Factory factory; private final DocValueFormat format; @SuppressWarnings("unchecked") - public Unmapped(String name, List ranges, boolean keyed, DocValueFormat format, + public Unmapped(String name, R[] ranges, boolean keyed, DocValueFormat format, AggregationContext context, Aggregator parent, InternalRange.Factory factory, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); - this.ranges = new ArrayList<>(); - for (R range : ranges) { - this.ranges.add((R) range.process(format, context.searchContext())); - } + this.ranges = ranges; this.keyed = keyed; this.format = format; this.factory = factory; @@ -384,7 +355,7 @@ public class RangeAggregator extends BucketsAggregator { @Override public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); - List buckets = new ArrayList<>(ranges.size()); + List buckets = new ArrayList<>(ranges.length); for (RangeAggregator.Range range : ranges) { buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, format)); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 5dec4c40c45..b3297401457 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -29,12 +29,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.List; import java.util.Map; public class RangeAggregatorFactory extends AbstractRangeAggregatorFactory { - public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public RangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java index a75b071569c..c8a8e16640b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java @@ -259,6 +259,9 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make + // the decision of whether to cache the request + Range[] ranges = processRanges(context, config); return new DateRangeAggregatorFactory(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java index d3bb7ac6238..d5d16123ec3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregatorFactory.java @@ -30,12 +30,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.List; import java.util.Map; public class DateRangeAggregatorFactory extends AbstractRangeAggregatorFactory { - public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, List ranges, boolean keyed, + public DateRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, Range[] ranges, boolean keyed, Factory rangeFactory, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java index 4a4cab2affa..583bc83feb4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java @@ -215,6 +215,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde protected ValuesSourceAggregatorFactory innerBuild(AggregationContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + Range[] ranges = this.ranges.toArray(new Range[this.range().size()]); return new GeoDistanceRangeAggregatorFactory(name, type, config, origin, ranges, unit, distanceType, keyed, context, parent, subFactoriesBuilder, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java index 32c3592a8fc..62aa18b168a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceRangeAggregatorFactory.java @@ -51,13 +51,13 @@ public class GeoDistanceRangeAggregatorFactory private final InternalRange.Factory rangeFactory = InternalGeoDistance.FACTORY; private final GeoPoint origin; - private final List ranges; + private final Range[] ranges; private final DistanceUnit unit; private final GeoDistance distanceType; private final boolean keyed; public GeoDistanceRangeAggregatorFactory(String name, Type type, ValuesSourceConfig config, GeoPoint origin, - List ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context, + Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, type, config, context, parent, subFactoriesBuilder, metaData); this.origin = origin; @@ -70,7 +70,7 @@ public class GeoDistanceRangeAggregatorFactory @Override protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); + return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); } @Override diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 7b9f2dbb7cf..078bf499ff4 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,8 @@ import org.joda.time.chrono.ISOChronology; import java.util.List; import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @@ -406,11 +408,33 @@ public class IndicesRequestCacheIT extends ESIntegTestCase { assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(0L)); - // If size > 1 and cache flag is set on the request we should cache - final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) - .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get(); + // If the request has an aggregation containng now we should not cache + final SearchResponse r4 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(filter("foo", QueryBuilders.rangeQuery("s").from("now-10y").to("now"))).get(); assertSearchResponse(r4); assertThat(r4.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + // If the request has an aggregation containng now we should not cache + final SearchResponse r5 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-20").lte("2016-03-26")) + .addAggregation(dateRange("foo").field("s").addRange("now-10y", "now")).get(); + assertSearchResponse(r5); + assertThat(r5.getHits().getTotalHits(), equalTo(7L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), + equalTo(0L)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), + equalTo(0L)); + + // If size > 1 and cache flag is set on the request we should cache + final SearchResponse r6 = client().prepareSearch("index").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1) + .setRequestCache(true).setQuery(QueryBuilders.rangeQuery("s").gte("2016-03-21").lte("2016-03-27")).get(); + assertSearchResponse(r6); + assertThat(r6.getHits().getTotalHits(), equalTo(7L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(0L)); assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), From 6da0f0dcc097d71552226cf858e2e459a8f7b33a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 5 Oct 2016 08:10:27 -0700 Subject: [PATCH 06/13] Build: Fix plugin poms to have correct artifact id (#20764) We already override the name in plugin pom files to be that configured for the plugin but we also need to explicitly set the artifactId. --- .../org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy | 1 + 1 file changed, 1 insertion(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 0bb97aa84db..d5295519ad2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -231,6 +231,7 @@ public class PluginBuildPlugin extends BuildPlugin { * ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to * maven local work, since we publish to maven central externally. */ zipReal(MavenPublication) { + artifactId = project.pluginProperties.extension.name pom.withXml { XmlProvider xml -> Node root = xml.asNode() root.appendNode('name', project.pluginProperties.extension.name) From f895abcf40e6c4623bc89502c1fe02f97f467471 Mon Sep 17 00:00:00 2001 From: Anatolii Stepaniuk Date: Wed, 5 Oct 2016 15:54:32 +0300 Subject: [PATCH 07/13] Fix grammar issues in some docs This commit fixes some grammar issues in various docs. Closes #20751 Closes #20752 Closes #20754 Closes #20755 --- docs/reference/mapping/types/geo-shape.asciidoc | 2 +- docs/reference/query-dsl/geo-bounding-box-query.asciidoc | 2 +- docs/reference/query-dsl/geo-distance-query.asciidoc | 2 +- docs/reference/query-dsl/joining-queries.asciidoc | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 1f0c76e1b93..4fe185fe463 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -175,7 +175,7 @@ this into a tree_levels setting of 26. ===== Performance considerations Elasticsearch uses the paths in the prefix tree as terms in the index -and in queries. The higher the levels is (and thus the precision), the +and in queries. The higher the level is (and thus the precision), the more terms are generated. Of course, calculating the terms, keeping them in memory, and storing them on disk all have a price. Especially with higher tree levels, indices can become extremely large even with a modest diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 40debb57105..1307d0184f1 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -92,7 +92,7 @@ Default is `memory`. ==== Accepted Formats In much the same way the geo_point type can accept different -representation of the geo point, the filter can accept it as well: +representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index c6496eac39d..4591ccf5c9e 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -69,7 +69,7 @@ GET /my_locations/location/_search ==== Accepted Formats In much the same way the `geo_point` type can accept different -representation of the geo point, the filter can accept it as well: +representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties diff --git a/docs/reference/query-dsl/joining-queries.asciidoc b/docs/reference/query-dsl/joining-queries.asciidoc index cfbbf5360b6..6a467fe539a 100644 --- a/docs/reference/query-dsl/joining-queries.asciidoc +++ b/docs/reference/query-dsl/joining-queries.asciidoc @@ -7,7 +7,7 @@ which are designed to scale horizontally. <>:: -Documents may contains fields of type <>. These +Documents may contain fields of type <>. These fields are used to index arrays of objects, where each object can be queried (with the `nested` query) as an independent document. From 41d6529d062bcfd53a10d2c9f3c537d0c9fe1500 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 5 Oct 2016 11:16:40 -0400 Subject: [PATCH 08/13] CONSOLEify scroll docs This causes the snippets to be tested during the build and gives helpful links to the reader to open the docs in console or copy them as curl commands. Relates to #18160 --- docs/build.gradle | 1 - docs/reference/search/request/scroll.asciidoc | 55 ++++++++++--------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/docs/build.gradle b/docs/build.gradle index 2d550e1f191..44b467dee9e 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -171,7 +171,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/search/request/highlighting.asciidoc', 'reference/search/request/inner-hits.asciidoc', 'reference/search/request/rescore.asciidoc', - 'reference/search/request/scroll.asciidoc', 'reference/search/search-template.asciidoc', 'reference/search/suggesters/completion-suggest.asciidoc', ] diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index d924a56b652..bee8e158175 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -38,7 +38,7 @@ should keep the ``search context'' alive (see <>), eg `?s [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' +POST /twitter/tweet/_search?scroll=1m { "query": { "match" : { @@ -46,8 +46,9 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' } } } -' -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The result from the above request includes a `_scroll_id`, which should be passed to the `scroll` API in order to retrieve the next batch of @@ -55,13 +56,14 @@ results. [source,js] -------------------------------------------------- -curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d' +POST <1> /_search/scroll <2> { "scroll" : "1m", <3> - "scroll_id" : "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1" <4> + "scroll_id" : "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" <4> } -' -------------------------------------------------- +// CONSOLE +// TEST[continued s/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==/$body._scroll_id/] <1> `GET` or `POST` can be used. <2> The URL should not include the `index` or `type` name -- these @@ -73,14 +75,6 @@ curl -XGET <1> 'localhost:9200/_search/scroll' <2> -d' Each call to the `scroll` API returns the next batch of results until there are no more results left to return, ie the `hits` array is empty. -For backwards compatibility, `scroll_id` and `scroll` can be passed in the query string. -And the `scroll_id` can be passed in the request body - -[source,js] --------------------------------------------------- -curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1' --------------------------------------------------- - IMPORTANT: The initial search request and each subsequent scroll request returns a new `_scroll_id` -- only the most recent `_scroll_id` should be used. @@ -94,14 +88,15 @@ order, this is the most efficient option: [source,js] -------------------------------------------------- -curl -XGET 'localhost:9200/_search?scroll=1m' -d ' +GET /_search?scroll=1m { "sort": [ "_doc" ] } -' -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [[scroll-search-context]] ==== Keeping the search context alive @@ -130,8 +125,9 @@ You can check how many search contexts are open with the [source,js] --------------------------------------- -curl -XGET localhost:9200/_nodes/stats/indices/search?pretty +GET /_nodes/stats/indices/search --------------------------------------- +// CONSOLE ==== Clear scroll API @@ -143,37 +139,46 @@ cleared as soon as the scroll is not being used anymore using the [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll -d ' +DELETE /_search/scroll { - "scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"] -}' + "scroll_id" : ["DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ=="] +} --------------------------------------- +// CONSOLE +// TEST[catch:missing] Multiple scroll IDs can be passed as array: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll -d ' +DELETE /_search/scroll { - "scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1", "aGVuRmV0Y2g7NTsxOnkxaDZ"] -}' + "scroll_id" : [ + "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", + "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB" + ] +} --------------------------------------- +// CONSOLE +// TEST[catch:missing] All search contexts can be cleared with the `_all` parameter: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll/_all +DELETE /_search/scroll/_all --------------------------------------- +// CONSOLE The `scroll_id` can also be passed as a query string parameter or in the request body. Multiple scroll IDs can be passed as comma separated values: [source,js] --------------------------------------- -curl -XDELETE localhost:9200/_search/scroll \ - -d 'c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1,aGVuRmV0Y2g7NTsxOnkxaDZ' +DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB --------------------------------------- +// CONSOLE +// TEST[catch:missing] [[sliced-scroll]] ==== Sliced Scroll From 8c4108d8868e4ee05601b2af9cb70483e9a36b5f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 5 Oct 2016 11:26:19 -0400 Subject: [PATCH 09/13] Add response params to REST params did you mean This commit adds the response params as candidates for the did you mean suggestions for strict REST params handling. Relates #20753 --- .../java/org/elasticsearch/rest/BaseRestHandler.java | 12 ++++++++---- .../org/elasticsearch/rest/BaseRestHandlerTests.java | 7 +++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 3bc91dc48e2..3bb6c6773b9 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -33,6 +33,7 @@ import org.elasticsearch.plugins.ActionPlugin; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -70,7 +71,7 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH request.unconsumedParams().stream().filter(p -> !responseParams().contains(p)).collect(Collectors.toCollection(TreeSet::new)); // validate the non-response params - if (!unconsumedParams.isEmpty()) { + if (unconsumedParams.isEmpty() == false) { String message = String.format( Locale.ROOT, "request [%s] contains unrecognized parameter%s: ", @@ -80,10 +81,13 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH for (final String unconsumedParam : unconsumedParams) { final LevensteinDistance ld = new LevensteinDistance(); final List> scoredParams = new ArrayList<>(); - for (String consumedParam : request.consumedParams()) { - final float distance = ld.getDistance(unconsumedParam, consumedParam); + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams()); + for (final String candidateParam : candidateParams) { + final float distance = ld.getDistance(unconsumedParam, candidateParam); if (distance > 0.5f) { - scoredParams.add(new Tuple<>(distance, consumedParam)); + scoredParams.add(new Tuple<>(distance, candidateParam)); } } CollectionUtil.timSort(scoredParams, (a, b) -> { diff --git a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 8f9dd445369..ef67006c204 100644 --- a/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -94,11 +94,17 @@ public class BaseRestHandlerTests extends ESTestCase { request.param("very_close_to_parameter_2"); return channel -> executed.set(true); } + + @Override + protected Set responseParams() { + return Collections.singleton("response_param"); + } }; final HashMap params = new HashMap<>(); params.put("consumed", randomAsciiOfLength(8)); params.put("flied", randomAsciiOfLength(8)); + params.put("respones_param", randomAsciiOfLength(8)); params.put("tokenzier", randomAsciiOfLength(8)); params.put("very_close_to_parametre", randomAsciiOfLength(8)); params.put("very_far_from_every_consumed_parameter", randomAsciiOfLength(8)); @@ -111,6 +117,7 @@ public class BaseRestHandlerTests extends ESTestCase { hasToString(containsString( "request [/] contains unrecognized parameters: " + "[flied] -> did you mean [field]?, " + + "[respones_param] -> did you mean [response_param]?, " + "[tokenzier] -> did you mean [tokenizer]?, " + "[very_close_to_parametre] -> did you mean any of [very_close_to_parameter_1, very_close_to_parameter_2]?, " + "[very_far_from_every_consumed_parameter]"))); From 02dd1ba61a6bdc2163eaee327a6938f8517c7f41 Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Wed, 5 Oct 2016 17:42:25 +0200 Subject: [PATCH 10/13] [vagrant] packaging tests: add parametrized retries for dnf install (#20749) * Add parametrized retries for dnf install Given that dnf doesn't do retries installation of openjdk can sometimes be affected by checksum or network issues with mirrors offered by metalink. Allow setting number of retries through the parameter `install_command_retries` * Insert delay between package install retries Fedora's metalink occasionally returns broken mirrors. Pausing for a few seconds between retries increases the chance of receiving a different list of mirrors from metalink and success with package installation. --- Vagrantfile | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 761ef20628d..8ade7c8dd30 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -156,6 +156,7 @@ def dnf_common(config) update_command: "dnf check-update", update_tracking_file: "/var/cache/dnf/last_update", install_command: "dnf install -y", + install_command_retries: 5, java_package: "java-1.8.0-openjdk-devel") if Vagrant.has_plugin?("vagrant-cachier") # Autodetect doesn't work.... @@ -205,6 +206,7 @@ def provision(config, update_command: 'required', update_tracking_file: 'required', install_command: 'required', + install_command_retries: 0, java_package: 'required', extra: '') # Vagrant run ruby 2.0.0 which doesn't have required named parameters.... @@ -215,9 +217,27 @@ def provision(config, config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL set -e set -o pipefail + + # Retry install command up to $2 times, if failed + retry_installcommand() { + n=0 + while true; do + #{install_command} $1 && break + let n=n+1 + if [ $n -ge $2 ]; then + echo "==> Exhausted retries to install $1" + return 1 + fi + echo "==> Retrying installing $1, attempt $((n+1))" + # Add a small delay to increase chance of metalink providing updated list of mirrors + sleep 5 + done + } + installed() { command -v $1 2>&1 >/dev/null } + install() { # Only apt-get update if we haven't in the last day if [ ! -f #{update_tracking_file} ] || [ "x$(find #{update_tracking_file} -mtime +0)" == "x#{update_tracking_file}" ]; then @@ -226,8 +246,14 @@ def provision(config, touch #{update_tracking_file} fi echo "==> Installing $1" - #{install_command} $1 + if [ #{install_command_retries} -eq 0 ] + then + #{install_command} $1 + else + retry_installcommand $1 #{install_command_retries} + fi } + ensure() { installed $1 || install $1 } From ba88d9af57c1d26f3abb30c106c406477ee87af8 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 5 Oct 2016 09:47:20 -0700 Subject: [PATCH 11/13] Remove all date 'now' methods from the Painless whitelist to ensure Painless scripts are pure functions. --- .../painless/java.time.chrono.txt | 20 ------------------- .../org/elasticsearch/painless/java.time.txt | 20 ------------------- 2 files changed, 40 deletions(-) diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt index 7374efea04b..286141e2921 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.chrono.txt @@ -82,8 +82,6 @@ class Chronology -> java.time.chrono.Chronology extends Comparable { ChronoLocalDate date(Era,int,int,int) ChronoLocalDate date(int,int,int) ChronoLocalDate dateEpochDay(long) - ChronoLocalDate dateNow() - ChronoLocalDate dateNow(ZoneId) ChronoLocalDate dateYearDay(Era,int,int) ChronoLocalDate dateYearDay(int,int) boolean equals(Object) @@ -171,8 +169,6 @@ class HijrahChronology -> java.time.chrono.HijrahChronology extends AbstractChro HijrahDate date(int,int,int) HijrahDate date(Era,int,int,int) HijrahDate dateEpochDay(long) - HijrahDate dateNow() - HijrahDate dateNow(ZoneId) HijrahDate dateYearDay(int,int) HijrahDate dateYearDay(Era,int,int) HijrahEra eraOf(int) @@ -185,8 +181,6 @@ class HijrahDate -> java.time.chrono.HijrahDate extends ChronoLocalDate,Temporal HijrahEra getEra() HijrahDate minus(TemporalAmount) HijrahDate minus(long,TemporalUnit) - HijrahDate now() - HijrahDate now(ZoneId) HijrahDate of(int,int,int) HijrahDate plus(TemporalAmount) HijrahDate plus(long,TemporalUnit) @@ -201,8 +195,6 @@ class IsoChronology -> java.time.chrono.IsoChronology extends AbstractChronology LocalDate date(int,int,int) LocalDate date(Era,int,int,int) LocalDate dateEpochDay(long) - LocalDate dateNow() - LocalDate dateNow(ZoneId) LocalDate dateYearDay(int,int) LocalDate dateYearDay(Era,int,int) IsoEra eraOf(int) @@ -219,8 +211,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract JapaneseDate date(int,int,int) JapaneseDate date(Era,int,int,int) JapaneseDate dateEpochDay(long) - JapaneseDate dateNow() - JapaneseDate dateNow(ZoneId) JapaneseDate dateYearDay(int,int) JapaneseDate dateYearDay(Era,int,int) JapaneseEra eraOf(int) @@ -228,8 +218,6 @@ class JapaneseChronology -> java.time.chrono.JapaneseChronology extends Abstract } class JapaneseDate -> java.time.chrono.JapaneseDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - JapaneseDate now() - JapaneseDate now(ZoneId) JapaneseDate of(int,int,int) JapaneseDate from(TemporalAccessor) JapaneseChronology getChronology() @@ -259,8 +247,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro MinguoDate date(int,int,int) MinguoDate date(Era,int,int,int) MinguoDate dateEpochDay(long) - MinguoDate dateNow() - MinguoDate dateNow(ZoneId) MinguoDate dateYearDay(int,int) MinguoDate dateYearDay(Era,int,int) MinguoEra eraOf(int) @@ -268,8 +254,6 @@ class MinguoChronology -> java.time.chrono.MinguoChronology extends AbstractChro } class MinguoDate -> java.time.chrono.MinguoDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - MinguoDate now() - MinguoDate now(ZoneId) MinguoDate of(int,int,int) MinguoDate from(TemporalAccessor) MinguoChronology getChronology() @@ -288,8 +272,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends ThaiBuddhistDate date(int,int,int) ThaiBuddhistDate date(Era,int,int,int) ThaiBuddhistDate dateEpochDay(long) - ThaiBuddhistDate dateNow() - ThaiBuddhistDate dateNow(ZoneId) ThaiBuddhistDate dateYearDay(int,int) ThaiBuddhistDate dateYearDay(Era,int,int) ThaiBuddhistEra eraOf(int) @@ -297,8 +279,6 @@ class ThaiBuddhistChronology -> java.time.chrono.ThaiBuddhistChronology extends } class ThaiBuddhistDate -> java.time.chrono.ThaiBuddhistDate extends ChronoLocalDate,Temporal,TemporalAccessor,TemporalAdjuster,Comparable,Object { - ThaiBuddhistDate now() - ThaiBuddhistDate now(ZoneId) ThaiBuddhistDate of(int,int,int) ThaiBuddhistDate from(TemporalAccessor) ThaiBuddhistChronology getChronology() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt index 4481004fdf6..4c99be712ad 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.time.txt @@ -103,8 +103,6 @@ class Instant -> java.time.Instant extends Comparable,Temporal,TemporalAccessor, Instant minusMillis(long) Instant minusNanos(long) Instant minusSeconds(long) - Instant now() - Instant now(Clock) Instant ofEpochSecond(long) Instant ofEpochSecond(long,long) Instant ofEpochMilli(long) @@ -143,8 +141,6 @@ class LocalDate -> java.time.LocalDate extends ChronoLocalDate,Temporal,Temporal LocalDate minusMonths(long) LocalDate minusWeeks(long) LocalDate minusDays(long) - LocalDate now() - LocalDate now(ZoneId) LocalDate of(int,int,int) LocalDate ofYearDay(int,int) LocalDate ofEpochDay(long) @@ -191,8 +187,6 @@ class LocalDateTime -> java.time.LocalDateTime extends ChronoLocalDateTime,Tempo LocalDateTime minusSeconds(long) LocalDateTime minusWeeks(long) LocalDateTime minusYears(long) - LocalDateTime now() - LocalDateTime now(ZoneId) LocalDateTime of(LocalDate,LocalTime) LocalDateTime of(int,int,int,int,int) LocalDateTime of(int,int,int,int,int,int) @@ -246,8 +240,6 @@ class LocalTime -> java.time.LocalTime extends Temporal,TemporalAccessor,Tempora LocalTime minusMinutes(long) LocalTime minusNanos(long) LocalTime minusSeconds(long) - LocalTime now() - LocalTime now(ZoneId) LocalTime of(int,int) LocalTime of(int,int,int) LocalTime of(int,int,int,int) @@ -283,8 +275,6 @@ class MonthDay -> java.time.MonthDay extends TemporalAccessor,TemporalAdjuster,C boolean isAfter(MonthDay) boolean isBefore(MonthDay) boolean isValidYear(int) - MonthDay now() - MonthDay now(ZoneId) MonthDay of(int,int) MonthDay parse(CharSequence) MonthDay parse(CharSequence,DateTimeFormatter) @@ -325,8 +315,6 @@ class OffsetDateTime -> java.time.OffsetDateTime extends Temporal,TemporalAccess OffsetDateTime minusMinutes(long) OffsetDateTime minusSeconds(long) OffsetDateTime minusNanos(long) - OffsetDateTime now() - OffsetDateTime now(ZoneId) OffsetDateTime of(LocalDate,LocalTime,ZoneOffset) OffsetDateTime of(LocalDateTime,ZoneOffset) OffsetDateTime of(int,int,int,int,int,int,int,ZoneOffset) @@ -380,8 +368,6 @@ class OffsetTime -> java.time.OffsetTime extends Temporal,TemporalAccessor,Tempo boolean isAfter(OffsetTime) boolean isBefore(OffsetTime) boolean isEqual(OffsetTime) - OffsetTime now() - OffsetTime now(ZoneId) OffsetTime of(LocalTime,ZoneOffset) OffsetTime of(int,int,int,int,ZoneOffset) OffsetTime ofInstant(Instant,ZoneId) @@ -460,8 +446,6 @@ class Year -> java.time.Year extends Temporal,TemporalAccessor,TemporalAdjuster, Year minus(TemporalAmount) Year minus(long,TemporalUnit) Year minusYears(long) - Year now() - Year now(ZoneId) Year of(int) Year parse(CharSequence) Year parse(CharSequence,DateTimeFormatter) @@ -491,8 +475,6 @@ class YearMonth -> java.time.YearMonth extends Temporal,TemporalAccessor,Tempora YearMonth minus(long,TemporalUnit) YearMonth minusYears(long) YearMonth minusMonths(long) - YearMonth now() - YearMonth now(ZoneId) YearMonth of(int,int) YearMonth parse(CharSequence) YearMonth parse(CharSequence,DateTimeFormatter) @@ -530,8 +512,6 @@ class ZonedDateTime -> java.time.ZonedDateTime extends ChronoZonedDateTime,Tempo ZonedDateTime minusMinutes(long) ZonedDateTime minusSeconds(long) ZonedDateTime minusNanos(long) - ZonedDateTime now() - ZonedDateTime now(ZoneId) ZonedDateTime of(LocalDate,LocalTime,ZoneId) ZonedDateTime of(LocalDateTime,ZoneId) ZonedDateTime of(int,int,int,int,int,int,int,ZoneId) From 15950b71b8ccec44e7d64b38a801573730d77b1b Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Wed, 5 Oct 2016 14:23:25 -0400 Subject: [PATCH 12/13] BalancedShardAllocator code improvements (#20746) This commit improves the logic flow of BalancedShardsAllocator in preparation for separating out components of this class to be used in the cluster allocation explain APIs. In particular, this commit: 1. Adds a minimum value for the index/shard balance factor settings (0.0) 2. Makes the Balancer data structures immutable and pre-calculated at construction time. 3. Removes difficult to follow labeled blocks / GOTOs 4. Better logic for skipping over the same replica set when one of the replicas received a NO decision 5. Separates the decision making logic for a single shard from the logic to iterate over all unassigned shards. --- .../allocator/BalancedShardsAllocator.java | 245 ++++++++++-------- 1 file changed, 134 insertions(+), 111 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 50c2d28cf01..e35df67ae62 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -73,9 +73,9 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { public static final Setting INDEX_BALANCE_FACTOR_SETTING = - Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope); + Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, 0.0f, Property.Dynamic, Property.NodeScope); public static final Setting SHARD_BALANCE_FACTOR_SETTING = - Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope); + Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, 0.0f, Property.Dynamic, Property.NodeScope); public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, Property.Dynamic, Property.NodeScope); @@ -210,7 +210,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards */ public static class Balancer { private final Logger logger; - private final Map nodes = new HashMap<>(); + private final Map nodes; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; private final WeightFunction weight; @@ -218,6 +218,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final float threshold; private final MetaData metaData; private final float avgShardsPerNode; + private final NodeSorter sorter; public Balancer(Logger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.logger = logger; @@ -227,7 +228,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards this.routingNodes = allocation.routingNodes(); this.metaData = allocation.metaData(); avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size(); - buildModelFromAssigned(); + nodes = Collections.unmodifiableMap(buildModelFromAssigned()); + sorter = newNodeSorter(); } /** @@ -304,11 +306,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } public Map weighShard(ShardRouting shard) { - final NodeSorter sorter = newNodeSorter(); final ModelNode[] modelNodes = sorter.modelNodes; final float[] weights = sorter.weights; - buildWeightOrderedIndices(sorter); + buildWeightOrderedIndices(); Map nodes = new HashMap<>(modelNodes.length); float currentNodeWeight = 0.0f; for (int i = 0; i < modelNodes.length; i++) { @@ -332,20 +333,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * weight of the maximum node and the minimum node according to the * {@link WeightFunction}. This weight is calculated per index to * distribute shards evenly per index. The balancer tries to relocate - * shards only if the delta exceeds the threshold. If the default case + * shards only if the delta exceeds the threshold. In the default case * the threshold is set to 1.0 to enforce gaining relocation * only, or in other words relocations that move the weight delta closer * to 0.0 */ private void balanceByWeights() { - final NodeSorter sorter = newNodeSorter(); final AllocationDeciders deciders = allocation.deciders(); final ModelNode[] modelNodes = sorter.modelNodes; final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices(sorter)) { + for (String index : buildWeightOrderedIndices()) { IndexMetaData indexMetaData = metaData.index(index); - // find nodes that have a shard of this index or where shards of this index are allowed to stay + // find nodes that have a shard of this index or where shards of this index are allowed to be allocated to, // move these nodes to the front of modelNodes so that we can only balance based on these nodes int relevantNodes = 0; for (int i = 0; i < modelNodes.length; i++) { @@ -440,14 +440,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * allocations on added nodes from one index when the weight parameters * for global balance overrule the index balance at an intermediate * state. For example this can happen if we have 3 nodes and 3 indices - * with 3 shards and 1 shard. At the first stage all three nodes hold - * 2 shard for each index. now we add another node and the first index - * is balanced moving 3 two of the nodes over to the new node since it + * with 3 primary and 1 replica shards. At the first stage all three nodes hold + * 2 shard for each index. Now we add another node and the first index + * is balanced moving three shards from two of the nodes over to the new node since it * has no shards yet and global balance for the node is way below * average. To re-balance we need to move shards back eventually likely * to the nodes we relocated them from. */ - private String[] buildWeightOrderedIndices(NodeSorter sorter) { + private String[] buildWeightOrderedIndices() { final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); final float[] deltas = new float[indices.length]; for (int i = 0; i < deltas.length; i++) { @@ -501,7 +501,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are // offloading the shards. - final NodeSorter sorter = newNodeSorter(); for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) { ShardRouting shardRouting = it.next(); // we can only move started shards... @@ -511,7 +510,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards RoutingNode routingNode = sourceNode.getRoutingNode(); Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); if (decision.type() == Decision.Type.NO) { - moveShard(sorter, shardRouting, sourceNode, routingNode); + moveShard(shardRouting, sourceNode, routingNode); } } } @@ -520,7 +519,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards /** * Move started shard to the minimal eligible node with respect to the weight function */ - private void moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) { + private void moveShard(ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) { logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); sorter.reset(shardRouting.getIndexName()); /* @@ -557,7 +556,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * on the target node which we respect during the allocation / balancing * process. In short, this method recreates the status-quo in the cluster. */ - private void buildModelFromAssigned() { + private Map buildModelFromAssigned() { + Map nodes = new HashMap<>(); for (RoutingNode rn : routingNodes) { ModelNode node = new ModelNode(rn); nodes.put(rn.nodeId(), node); @@ -572,6 +572,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } } } + return nodes; } /** @@ -626,91 +627,37 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards do { for (int i = 0; i < primaryLength; i++) { ShardRouting shard = primary[i]; - if (!shard.primary()) { - final Decision decision = deciders.canAllocate(shard, allocation); - if (decision.type() == Type.NO) { - UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision); - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); - while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); - } - continue; - } else { + Tuple allocationDecision = allocateUnassignedShard(shard, throttledNodes); + final Decision decision = allocationDecision.v1(); + final ModelNode minNode = allocationDecision.v2(); + + if (decision.type() == Type.YES) { + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); + } + + final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); + minNode.addShard(shard); + if (!shard.primary()) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { secondary[secondaryLength++] = primary[++i]; } } - } - assert !shard.assignedToNode() : shard; - /* find an node with minimal weight we can allocate on*/ - float minWeight = Float.POSITIVE_INFINITY; - ModelNode minNode = null; - Decision decision = null; - if (throttledNodes.size() < nodes.size()) { - /* Don't iterate over an identity hashset here the - * iteration order is different for each run and makes testing hard */ - for (ModelNode node : nodes.values()) { - if (throttledNodes.contains(node)) { - continue; - } - if (!node.containsShard(shard)) { - // simulate weight if we would add shard to node - float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName()); - /* - * Unless the operation is not providing any gains we - * don't check deciders - */ - if (currentWeight <= minWeight) { - Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation); - NOUPDATE: - if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { - if (currentWeight == minWeight) { - /* we have an equal weight tie breaking: - * 1. if one decision is YES prefer it - * 2. prefer the node that holds the primary for this index with the next id in the ring ie. - * for the 3 shards 2 replica case we try to build up: - * 1 2 0 - * 2 0 1 - * 0 1 2 - * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater - * than the id of the shard we need to assign. This works find when new indices are created since - * primaries are added first and we only add one shard set a time in this algorithm. - */ - if (currentDecision.type() == decision.type()) { - final int repId = shard.id(); - final int nodeHigh = node.highestPrimary(shard.index().getName()); - final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); - if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh)) - || (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) { - // nothing to set here; the minNode, minWeight, and decision get set below - } else { - break NOUPDATE; - } - } else if (currentDecision.type() != Type.YES) { - break NOUPDATE; - } - } - minNode = node; - minWeight = currentWeight; - decision = currentDecision; - } - } - } + } else { + // did *not* receive a YES decision + if (logger.isTraceEnabled()) { + logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type()); } - } - assert (decision == null) == (minNode == null); - if (minNode != null) { - final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - if (decision.type() == Type.YES) { - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); - } - shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); - minNode.addShard(shard); - continue; // don't add to ignoreUnassigned - } else { + if (minNode != null) { + // throttle decision scenario + assert decision.type() == Type.THROTTLE; + final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); final RoutingNode node = minNode.getRoutingNode(); final Decision.Type nodeLevelDecision = deciders.canAllocate(node, allocation).type(); @@ -721,21 +668,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards assert nodeLevelDecision == Type.NO; throttledNodes.add(minNode); } + } else { + assert decision.type() == Type.NO; + if (logger.isTraceEnabled()) { + logger.trace("No Node found to assign shard [{}]", shard); + } } - if (logger.isTraceEnabled()) { - logger.trace("No eligible node found to assign shard [{}] decision [{}]", shard, decision.type()); - } - } else if (logger.isTraceEnabled()) { - logger.trace("No Node found to assign shard [{}]", shard); - } - assert decision == null || decision.type() == Type.THROTTLE; - UnassignedInfo.AllocationStatus allocationStatus = - decision == null ? UnassignedInfo.AllocationStatus.DECIDERS_NO : - UnassignedInfo.AllocationStatus.fromDecision(decision); - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); - if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas - while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) { - unassigned.ignoreShard(secondary[--secondaryLength], allocationStatus, allocation.changes()); + + UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision); + unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); + if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas + while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) { + unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); + } } } } @@ -748,6 +693,84 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // clear everything we have either added it or moved to ignoreUnassigned } + /** + * Make a decision for allocating an unassigned shard. This method returns a two values in a tuple: the + * first value is the {@link Decision} taken to allocate the unassigned shard, the second value is the + * {@link ModelNode} representing the node that the shard should be assigned to. If the decision returned + * is of type {@link Type#NO}, then the assigned node will be null. + */ + private Tuple allocateUnassignedShard(final ShardRouting shard, final Set throttledNodes) { + assert !shard.assignedToNode() : "not an unassigned shard: " + shard; + if (allocation.deciders().canAllocate(shard, allocation).type() == Type.NO) { + // NO decision for allocating the shard, irrespective of any particular node, so exit early + return Tuple.tuple(Decision.NO, null); + } + + /* find an node with minimal weight we can allocate on*/ + float minWeight = Float.POSITIVE_INFINITY; + ModelNode minNode = null; + Decision decision = null; + if (throttledNodes.size() < nodes.size()) { + /* Don't iterate over an identity hashset here the + * iteration order is different for each run and makes testing hard */ + for (ModelNode node : nodes.values()) { + if (throttledNodes.contains(node)) { + continue; + } + if (!node.containsShard(shard)) { + // simulate weight if we would add shard to node + float currentWeight = weight.weightShardAdded(this, node, shard.getIndexName()); + /* + * Unless the operation is not providing any gains we + * don't check deciders + */ + if (currentWeight <= minWeight) { + Decision currentDecision = allocation.deciders().canAllocate(shard, node.getRoutingNode(), allocation); + if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { + final boolean updateMinNode; + if (currentWeight == minWeight) { + /* we have an equal weight tie breaking: + * 1. if one decision is YES prefer it + * 2. prefer the node that holds the primary for this index with the next id in the ring ie. + * for the 3 shards 2 replica case we try to build up: + * 1 2 0 + * 2 0 1 + * 0 1 2 + * such that if we need to tie-break we try to prefer the node holding a shard with the minimal id greater + * than the id of the shard we need to assign. This works find when new indices are created since + * primaries are added first and we only add one shard set a time in this algorithm. + */ + if (currentDecision.type() == decision.type()) { + final int repId = shard.id(); + final int nodeHigh = node.highestPrimary(shard.index().getName()); + final int minNodeHigh = minNode.highestPrimary(shard.getIndexName()); + updateMinNode = ((((nodeHigh > repId && minNodeHigh > repId) + || (nodeHigh < repId && minNodeHigh < repId)) + && (nodeHigh < minNodeHigh)) + || (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)); + } else { + updateMinNode = currentDecision.type() == Type.YES; + } + } else { + updateMinNode = true; + } + if (updateMinNode) { + minNode = node; + minWeight = currentWeight; + decision = currentDecision; + } + } + } + } + } + } + if (decision == null) { + // decision was not set and a node was not assigned, so treat it as a NO decision + decision = Decision.NO; + } + return Tuple.tuple(decision, minNode); + } + /** * Tries to find a relocation from the max node to the minimal node for an arbitrary shard of the given index on the * balance model. Iff this method returns a true the relocation has already been executed on the From 134b1f9b4dbb91f492d7a34d2977251d8803850c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 5 Oct 2016 21:40:27 +0200 Subject: [PATCH 13/13] Prevent thread suspension when inside SecurityManager (#20770) LongGCDisruption suspends and resumes node threads but respects several `unsafe` class name patterns where it's unsafe to suspend. For instance log4j uses a global lock so we can't suspend a thread that is currently calling into log4j. The same is true for the security manager, it's similar to log4j a shared resource between the test and the node that is _suspended_. This change adds `java.lang.SecrityManager` to the unsafe patterns. This prevents test framework deadlocking if a nodes thread is supended while it's calling into the security manager that uses synchronized maps etc. --- .../org/elasticsearch/test/disruption/LongGCDisruption.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java index 944ddb9b05f..6985d2dcf17 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java @@ -39,7 +39,9 @@ public class LongGCDisruption extends SingleNodeDisruption { private static final Pattern[] unsafeClasses = new Pattern[]{ // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing - Pattern.compile("logging\\.log4j") + Pattern.compile("logging\\.log4j"), + // security manager is shared across all nodes AND it uses synced hashmaps interanlly + Pattern.compile("java\\.lang\\.SecurityManager") }; protected final String disruptedNode;