From 41e9ed13d6bfdbf12f4ffff3f758ba727b57e8de Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 24 Nov 2016 15:29:50 +0100 Subject: [PATCH 1/8] [TEST] Fix AbstractBytesReferenceTestCase#testSlice to not assert on offset --- .../common/bytes/AbstractBytesReferenceTestCase.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index aae6522da12..f3fd0e2f9c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -74,9 +74,14 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { int sliceLength = Math.max(0, length - sliceOffset - 1); BytesReference slice = pbr.slice(sliceOffset, sliceLength); assertEquals(sliceLength, slice.length()); + for (int i = 0; i < sliceLength; i++) { + assertEquals(pbr.get(i+sliceOffset), slice.get(i)); + } BytesRef singlePageOrNull = getSinglePageOrNull(slice); if (singlePageOrNull != null) { - assertEquals(sliceOffset, singlePageOrNull.offset); + // we can't assert the offset since if the length is smaller than the refercence + // the offset can be anywhere + assertEquals(sliceLength, singlePageOrNull.length); } } From 0871073f9b676c00e310ebb1728f593a3c08d6e0 Mon Sep 17 00:00:00 2001 From: Adrin Jalali Date: Thu, 24 Nov 2016 16:05:41 +0100 Subject: [PATCH 2/8] clarification on geo distance sorting (#21779) * clarification on geo distance sorting * applying the suggested change --- docs/reference/search/request/sort.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 5a7c3a51f01..431f649aa08 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -213,7 +213,7 @@ then Elasticsearch will handle it as if there was a mapping of type [[geo-sorting]] ==== Geo Distance Sorting -Allow to sort by `_geo_distance`. Here is an example: +Allow to sort by `_geo_distance`. Here is an example, assuming `pin.location` is a field of type `geo_point`: [source,js] -------------------------------------------------- @@ -243,7 +243,7 @@ GET /_search How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slightly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles). -`sort_mode`:: +`mode`:: What to do in case a field has several geo points. By default, the shortest distance is taken into account when sorting in ascending order and the From aa60e5cc07ef3d87fd7fc9adc3fc481785256924 Mon Sep 17 00:00:00 2001 From: markharwood Date: Thu, 17 Nov 2016 15:32:59 +0000 Subject: [PATCH 3/8] Aggregations - support for partitioning set of terms used in aggregations so that multiple requests can be done without trying to compute everything in one request. Closes #21487 --- .../bucket/terms/TermsAggregatorFactory.java | 5 +- .../bucket/terms/support/IncludeExclude.java | 153 +++++++++++++++++- .../aggregations/bucket/DoubleTermsIT.java | 40 +++++ .../aggregations/bucket/LongTermsIT.java | 45 ++++++ .../aggregations/bucket/StringTermsIT.java | 43 +++++ .../bucket/terms-aggregation.asciidoc | 68 +++++++- 6 files changed, 348 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index c01377d9761..3a7053d26d2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -131,7 +131,10 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory 0) { valids = new LongHashSet(numValids); } @@ -96,6 +118,13 @@ public class IncludeExclude implements Writeable, ToXContent { public abstract boolean accept(BytesRef value); } + class PartitionedStringFilter extends StringFilter { + @Override + public boolean accept(BytesRef value) { + return Math.floorMod(value.hashCode(), incNumPartitions) == incZeroBasedPartition; + } + } + static class AutomatonBackedStringFilter extends StringFilter { private final ByteRunAutomaton runAutomaton; @@ -138,6 +167,25 @@ public class IncludeExclude implements Writeable, ToXContent { } + class PartitionedOrdinalsFilter extends OrdinalsFilter { + + @Override + public LongBitSet acceptedGlobalOrdinals(RandomAccessOrds globalOrdinals) throws IOException { + final long numOrds = globalOrdinals.getValueCount(); + final LongBitSet acceptedGlobalOrdinals = new LongBitSet(numOrds); + final TermsEnum termEnum = globalOrdinals.termsEnum(); + + BytesRef term = termEnum.next(); + while (term != null) { + if (Math.floorMod(term.hashCode(), incNumPartitions) == incZeroBasedPartition) { + acceptedGlobalOrdinals.set(termEnum.ord()); + } + term = termEnum.next(); + } + return acceptedGlobalOrdinals; + } + } + static class AutomatonBackedOrdinalsFilter extends OrdinalsFilter { private final CompiledAutomaton compiled; @@ -205,6 +253,8 @@ public class IncludeExclude implements Writeable, ToXContent { private final RegExp include, exclude; private final SortedSet includeValues, excludeValues; + private final int incZeroBasedPartition; + private final int incNumPartitions; /** * @param include The regular expression pattern for the terms to be included @@ -218,6 +268,8 @@ public class IncludeExclude implements Writeable, ToXContent { this.exclude = exclude; this.includeValues = null; this.excludeValues = null; + this.incZeroBasedPartition = 0; + this.incNumPartitions = 0; } public IncludeExclude(String include, String exclude) { @@ -234,6 +286,8 @@ public class IncludeExclude implements Writeable, ToXContent { } this.include = null; this.exclude = null; + this.incZeroBasedPartition = 0; + this.incNumPartitions = 0; this.includeValues = includeValues; this.excludeValues = excludeValues; } @@ -250,6 +304,21 @@ public class IncludeExclude implements Writeable, ToXContent { this(convertToBytesRefSet(includeValues), convertToBytesRefSet(excludeValues)); } + public IncludeExclude(int partition, int numPartitions) { + if (partition < 0 || partition >= numPartitions) { + throw new IllegalArgumentException("Partition must be >=0 and < numPartition which is "+numPartitions); + } + this.incZeroBasedPartition = partition; + this.incNumPartitions = numPartitions; + this.include = null; + this.exclude = null; + this.includeValues = null; + this.excludeValues = null; + + } + + + /** * Read from a stream. */ @@ -257,6 +326,8 @@ public class IncludeExclude implements Writeable, ToXContent { if (in.readBoolean()) { includeValues = null; excludeValues = null; + incZeroBasedPartition = 0; + incNumPartitions = 0; String includeString = in.readOptionalString(); include = includeString == null ? null : new RegExp(includeString); String excludeString = in.readOptionalString(); @@ -283,6 +354,13 @@ public class IncludeExclude implements Writeable, ToXContent { } else { excludeValues = null; } + if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) { + incNumPartitions = in.readVInt(); + incZeroBasedPartition = in.readVInt(); + } else { + incNumPartitions = 0; + incZeroBasedPartition = 0; + } } @Override @@ -309,6 +387,10 @@ public class IncludeExclude implements Writeable, ToXContent { out.writeBytesRef(value); } } + if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) { + out.writeVInt(incNumPartitions); + out.writeVInt(incZeroBasedPartition); + } } } @@ -436,11 +518,26 @@ public class IncludeExclude implements Writeable, ToXContent { if (token == XContentParser.Token.START_OBJECT) { if (parseFieldMatcher.match(currentFieldName, INCLUDE_FIELD)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + + // This "include":{"pattern":"foo.*"} syntax is undocumented since 2.0 + // Regexes should be "include":"foo.*" if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if (parseFieldMatcher.match(currentFieldName, PATTERN_FIELD)) { otherOptions.put(INCLUDE_FIELD, parser.text()); + } else { + throw new ElasticsearchParseException( + "Unknown string parameter in Include/Exclude clause: " + currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (parseFieldMatcher.match(currentFieldName, NUM_PARTITIONS_FIELD)) { + otherOptions.put(NUM_PARTITIONS_FIELD, parser.intValue()); + } else if (parseFieldMatcher.match(currentFieldName, PARTITION_FIELD)) { + otherOptions.put(INCLUDE_FIELD, parser.intValue()); + } else { + throw new ElasticsearchParseException( + "Unknown numeric parameter in Include/Exclude clause: " + currentFieldName); } } } @@ -480,15 +577,43 @@ public class IncludeExclude implements Writeable, ToXContent { public IncludeExclude createIncludeExclude(Map otherOptions) { Object includeObject = otherOptions.get(INCLUDE_FIELD); String include = null; + int partition = -1; + int numPartitions = -1; SortedSet includeValues = null; if (includeObject != null) { if (includeObject instanceof String) { include = (String) includeObject; } else if (includeObject instanceof SortedSet) { includeValues = (SortedSet) includeObject; + } else if (includeObject instanceof Integer) { + partition = (Integer) includeObject; + Object numPartitionsObject = otherOptions.get(NUM_PARTITIONS_FIELD); + if (numPartitionsObject instanceof Integer) { + numPartitions = (Integer) numPartitionsObject; + if (numPartitions < 2) { + throw new IllegalArgumentException(NUM_PARTITIONS_FIELD.getPreferredName() + " must be >1"); + } + if (partition < 0 || partition >= numPartitions) { + throw new IllegalArgumentException( + PARTITION_FIELD.getPreferredName() + " must be >=0 and <" + numPartitions); + } + } else { + if (numPartitionsObject == null) { + throw new IllegalArgumentException(NUM_PARTITIONS_FIELD.getPreferredName() + " parameter is missing"); + } + throw new IllegalArgumentException(NUM_PARTITIONS_FIELD.getPreferredName() + " value must be an integer"); + } } } Object excludeObject = otherOptions.get(EXCLUDE_FIELD); + if (numPartitions >0 ){ + if(excludeObject!=null){ + throw new IllegalArgumentException("Partitioned Include cannot be used in combination with excludes"); + } + return new IncludeExclude(partition, numPartitions); + } + + String exclude = null; SortedSet excludeValues = null; if (excludeObject != null) { @@ -517,6 +642,10 @@ public class IncludeExclude implements Writeable, ToXContent { return include != null || exclude != null; } + public boolean isPartitionBased() { + return incNumPartitions > 0; + } + private Automaton toAutomaton() { Automaton a = null; if (include != null) { @@ -538,6 +667,9 @@ public class IncludeExclude implements Writeable, ToXContent { if (isRegexBased()) { return new AutomatonBackedStringFilter(toAutomaton()); } + if (isPartitionBased()){ + return new PartitionedStringFilter(); + } return new TermListBackedStringFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format)); } @@ -559,13 +691,22 @@ public class IncludeExclude implements Writeable, ToXContent { if (isRegexBased()) { return new AutomatonBackedOrdinalsFilter(toAutomaton()); } + if (isPartitionBased()){ + return new PartitionedOrdinalsFilter(); + } + return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format)); } public LongFilter convertToLongFilter(DocValueFormat format) { + + if(isPartitionBased()){ + return new PartitionedLongFilter(); + } + int numValids = includeValues == null ? 0 : includeValues.size(); int numInvalids = excludeValues == null ? 0 : excludeValues.size(); - LongFilter result = new LongFilter(numValids, numInvalids); + SetBackedLongFilter result = new SetBackedLongFilter(numValids, numInvalids); if (includeValues != null) { for (BytesRef val : includeValues) { result.addAccept(format.parseLong(val.utf8ToString(), false, null)); @@ -580,9 +721,13 @@ public class IncludeExclude implements Writeable, ToXContent { } public LongFilter convertToDoubleFilter() { + if(isPartitionBased()){ + return new PartitionedLongFilter(); + } + int numValids = includeValues == null ? 0 : includeValues.size(); int numInvalids = excludeValues == null ? 0 : excludeValues.size(); - LongFilter result = new LongFilter(numValids, numInvalids); + SetBackedLongFilter result = new SetBackedLongFilter(numValids, numInvalids); if (includeValues != null) { for (BytesRef val : includeValues) { double dval = Double.parseDouble(val.utf8ToString()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 1dc9943e8a3..ef477553bac 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.max.Max; @@ -48,10 +49,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -359,6 +362,43 @@ public class DoubleTermsIT extends AbstractTermsTestCase { assertThat(bucket.getDocCount(), equalTo(1L)); } } + + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); + } + + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); + } + + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { + // Find total number of unique terms + SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) + .execute().actionGet(); + assertSearchResponse(allResponse); + Terms terms = allResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + int expectedCardinality = terms.getBuckets().size(); + + // Gather terms using partitioned aggregations + final int numPartitions = randomIntBetween(2, 4); + Set foundTerms = new HashSet<>(); + for (int partition = 0; partition < numPartitions; partition++) { + SearchResponse response = client().prepareSearch("idx").setTypes("type").addAggregation(terms("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)).collectMode(randomFrom(SubAggCollectionMode.values()))) + .execute().actionGet(); + assertSearchResponse(response); + terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsNumber())); + } + } + assertEquals(expectedCardinality, foundTerms.size()); + } public void testSingleValueFieldOrderedByTermAsc() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("type") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 1739d09a054..35905f91a91 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.aggregations.metrics.max.Max; @@ -47,10 +48,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -326,6 +329,48 @@ public class LongTermsIT extends AbstractTermsTestCase { assertThat(bucket.getDocCount(), equalTo(1L)); } } + + + + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); + } + + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); + } + + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { + // Find total number of unique terms + SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet(); + assertSearchResponse(allResponse); + Terms terms = allResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + int expectedCardinality = terms.getBuckets().size(); + + // Gather terms using partitioned aggregations + final int numPartitions = randomIntBetween(2, 4); + Set foundTerms = new HashSet<>(); + for (int partition = 0; partition < numPartitions; partition++) { + SearchResponse response = client().prepareSearch("idx").setTypes("type") + .addAggregation( + terms("terms").field(field).includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values()))) + .execute().actionGet(); + assertSearchResponse(response); + terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (Bucket bucket : terms.getBuckets()) { + assertFalse(foundTerms.contains(bucket.getKeyAsNumber())); + foundTerms.add(bucket.getKeyAsNumber()); + } + } + assertEquals(expectedCardinality, foundTerms.size()); + } + public void testSingleValueFieldWithMaxSize() throws Exception { SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java index 3d5d13bf04a..46af395c476 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -37,6 +39,7 @@ import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.metrics.avg.Avg; @@ -54,10 +57,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -455,6 +460,44 @@ public class StringTermsIT extends AbstractTermsTestCase { } } + + public void testSingleValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(SINGLE_VALUED_FIELD_NAME); + } + + public void testMultiValueFieldWithPartitionedFiltering() throws Exception { + runTestFieldWithPartitionedFiltering(MULTI_VALUED_FIELD_NAME); + } + + private void runTestFieldWithPartitionedFiltering(String field) throws Exception { + // Find total number of unique terms + SearchResponse allResponse = client().prepareSearch("idx").setTypes("type") + .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) + .execute().actionGet(); + assertSearchResponse(allResponse); + Terms terms = allResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + int expectedCardinality = terms.getBuckets().size(); + + // Gather terms using partitioned aggregations + final int numPartitions = randomIntBetween(2, 4); + Set foundTerms = new HashSet<>(); + for (int partition = 0; partition < numPartitions; partition++) { + SearchResponse response = client().prepareSearch("idx").setTypes("type").addAggregation(terms("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)).collectMode(randomFrom(SubAggCollectionMode.values()))) + .execute().actionGet(); + assertSearchResponse(response); + terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + for (Bucket bucket : terms.getBuckets()) { + assertTrue(foundTerms.add(bucket.getKeyAsString())); + } + } + assertEquals(expectedCardinality, foundTerms.size()); + } + public void testSingleValueFieldWithMaxSize() throws Exception { SearchResponse response = client() diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index fb3baca0967..180bcad1d0b 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -514,7 +514,10 @@ TIP: for indexed scripts replace the `file` parameter with an `id` parameter. ==== Filtering Values It is possible to filter the values for which buckets will be created. This can be done using the `include` and -`exclude` parameters which are based on regular expression strings or arrays of exact values. +`exclude` parameters which are based on regular expression strings or arrays of exact values. Additionally, +`include` clauses can filter using `partition` expressions. + +===== Filtering Values with regular expressions [source,js] -------------------------------------------------- @@ -538,6 +541,8 @@ both are defined, the `exclude` has precedence, meaning, the `include` is evalua The syntax is the same as <>. +===== Filtering Values with exact values + For matching based on exact values the `include` and `exclude` parameters can simply take an array of strings that represent the terms as they are found in the index: @@ -561,6 +566,67 @@ strings that represent the terms as they are found in the index: } -------------------------------------------------- +===== Filtering Values with partitions + +Sometimes there are too many unique terms to process in a single request/response pair so +it can be useful to break the analysis up into multiple requests. +This can be achieved by grouping the field's values into a number of partitions at query-time and processing +only one partition in each request. +Consider this request which is looking for accounts that have not logged any access recently: + +[source,js] +-------------------------------------------------- +{ + "size": 0, + "aggs": { + "expired_sessions": { + "terms": { + "field": "account_id", + "include": { + "partition": 0, + "num_partitions": 20 + }, + "size": 10000, + "order": { + "last_access": "asc" + } + }, + "aggs": { + "last_access": { + "max": { + "field": "access_date" + } + } + } + } + } +} +-------------------------------------------------- + +This request is finding the last logged access date for a subset of customer accounts because we +might want to expire some customer accounts who haven't been seen for a long while. +The `num_partitions` setting has requested that the unique account_ids are organized evenly into twenty +partitions (0 to 19). and the `partition` setting in this request filters to only consider account_ids falling +into partition 0. Subsequent requests should ask for partitions 1 then 2 etc to complete the expired-account analysis. + +Note that the `size` setting for the number of results returned needs to be tuned with the `num_partitions`. +For this particular account-expiration example the process for balancing values for `size` and `num_partitions` would be as follows: + +1. Use the `cardinality` aggregation to estimate the total number of unique account_id values +2. Pick a value for `num_partitions` to break the number from 1) up into more manageable chunks +3. Pick a `size` value for the number of responses we want from each partition +4. Run a test request + +If we have a circuit-breaker error we are trying to do too much in one request and must increase `num_partitions`. +If the request was successful but the last account ID in the date-sorted test response was still an account we might want to +expire then we may be missing accounts of interest and have set our numbers too low. We must either + +* increase the `size` parameter to return more results per partition (could be heavy on memory) or +* increase the `num_partitions` to consider less accounts per request (could increase overall processing time as we need to make more requests) + +Ultimately this is a balancing act between managing the elasticsearch resources required to process a single request and the volume +of requests that the client application must issue to complete a task. + ==== Multi-field terms aggregation The `terms` aggregation does not support collecting terms from multiple fields From 9b8e3c68788563ac8851281e0597cf92f425bab7 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 24 Nov 2016 16:41:05 +0100 Subject: [PATCH 4/8] Add docs for the batch mode of plugin installation (#21700) * Add docs for the batch mode of plugin installation Closes https://github.com/elastic/elasticsearch/issues/21490 * Updated to clarify that all permissions are granted automatically in batch mode --- docs/plugins/plugin-script.asciidoc | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index d09ccc34905..3234b6ae226 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -126,6 +126,24 @@ return the following exit codes: `74`:: IO error `70`:: any other error +[float] +=== Batch mode + +Certain plugins require more privileges than those provided by default in core +Elasticsearch. These plugins will list the required privileges and ask the +user for confirmation before continuing with installation. + +When running the plugin install script from another program (e.g. install +automation scripts), the plugin script should detect that it is not being +called from the console and skip the confirmation response, automatically +granting all requested permissions. If console detection fails, then batch +mode can be forced by specifying `-b` or `--batch` as follows: + +[source,shell] +----------------------------------- +sudo bin/elasticsearch-plugin install --batch [pluginname] +----------------------------------- + [float] === Custom config directory From 3bb9317ca276b5e735175a79b12e26449dff2c55 Mon Sep 17 00:00:00 2001 From: Adrin Jalali Date: Thu, 24 Nov 2016 16:46:45 +0100 Subject: [PATCH 5/8] clarify ambiguous sentence. (#21734) --- docs/reference/docs/update-by-query.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 69c22921225..4b88cad7246 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -53,9 +53,9 @@ version number, documents with version equal to zero cannot be updated using All update and query failures cause the `_update_by_query` to abort and are returned in the `failures` of the response. The updates that have been performed still stick. In other words, the process is not rolled back, only -aborted. While the first failure causes the abort all failures that are -returned by the failing bulk request are returned in the `failures` element so -it's possible for there to be quite a few. +aborted. While the first failure causes the abort, all failures that are +returned by the failing bulk request are returned in the `failures` element; therefore +it's possible for there to be quite a few failed entities. If you want to simply count version conflicts not cause the `_update_by_query` to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"` From 953928b2c5734b4e274040cf46aecd3b31c399eb Mon Sep 17 00:00:00 2001 From: Adrin Jalali Date: Thu, 24 Nov 2016 17:11:10 +0100 Subject: [PATCH 6/8] typo fix (it self -> itself) (#21781) * typo fix. * apply "stored field value" * replaced "whereas" with "on the contrary" --- docs/reference/search/request/stored-fields.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index da9a0830621..f09c332553c 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -40,7 +40,7 @@ GET /_search If the requested fields are not stored (`store` mapping set to `false`), they will be ignored. -Field values fetched from the document it self are always returned as an array. Metadata fields like `_routing` and +Stored field values fetched from the document itself are always returned as an array. On the contrary, metadata fields like `_routing` and `_parent` fields are never returned as an array. Also only leaf fields can be returned via the `field` option. So object fields can't be returned and such requests From 72ef6fa0d76984de2a860fb112f10b1ed65c241b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 24 Nov 2016 21:43:58 +0100 Subject: [PATCH 7/8] Handle spaces in `action.auto_create_index` gracefully (#21790) Today if a comma-separated list is passed to action.auto_create_index leading and trailing whitespaces are not trimmed but since the values are index expressions whitespaces should be removed for convenience. Closes #21449 --- .../action/support/AutoCreateIndex.java | 3 ++- .../action/support/AutoCreateIndexTests.java | 23 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index 5c9152b4751..a9a5afed9f3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -111,9 +111,10 @@ public final class AutoCreateIndex { try { String[] patterns = Strings.commaDelimitedListToStringArray(value); for (String pattern : patterns) { - if (pattern == null || pattern.length() == 0) { + if (pattern == null || pattern.trim().length() == 0) { throw new IllegalArgumentException("Can't parse [" + value + "] for setting [action.auto_create_index] must be either [true, false, or a comma separated list of index patterns]"); } + pattern = pattern.trim(); Tuple expression; if (pattern.startsWith("-")) { if (pattern.length() == 1) { diff --git a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java index 8a45ca47535..19342e5e604 100644 --- a/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/AutoCreateIndexTests.java @@ -25,11 +25,16 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import static org.hamcrest.CoreMatchers.equalTo; public class AutoCreateIndexTests extends ESTestCase { @@ -57,6 +62,24 @@ public class AutoCreateIndexTests extends ESTestCase { } } + public void testHandleSpaces() { // see #21449 + Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), + randomFrom(".marvel-, .security, .watches, .triggered_watches, .watcher-history-", + ".marvel-,.security,.watches,.triggered_watches,.watcher-history-")).build(); + AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); + List> expressions = autoCreateIndex.getAutoCreate().getExpressions(); + Map map = new HashMap<>(); + for (Tuple t : expressions) { + map.put(t.v1(), t.v2()); + } + assertTrue(map.get(".marvel-")); + assertTrue(map.get(".security")); + assertTrue(map.get(".watches")); + assertTrue(map.get(".triggered_watches")); + assertTrue(map.get(".watcher-history-")); + assertEquals(5, map.size()); + } + public void testAutoCreationDisabled() { Settings settings = Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false).build(); AutoCreateIndex autoCreateIndex = newAutoCreateIndex(settings); From 9809760eb0658d9918c6f4aa5b15606c6e857fb3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 24 Nov 2016 21:53:04 +0100 Subject: [PATCH 8/8] Fix settings diff generation for affix, list and group settings (#21788) Group, List and Affix settings generate a bogus diff that turns the actual diff into a string containing a json structure for instance: ``` "action" : { "search" : { "remote" : { "" : "{\"my_remote_cluster\":\"[::1]:60378\"}" } } } ``` which make reading the setting impossible. This happens for instance if a group or affix setting is rendered via `_cluster/settings?include_defaults=true` This change fixes the issue as well as several minor issues with affix settings that where not accepted as valid setting today. --- .../settings/AbstractScopedSettings.java | 16 +++-- .../common/settings/Setting.java | 65 +++++++++++++++++-- .../common/settings/ScopedSettingsTests.java | 36 ++++++++-- .../common/settings/SettingTests.java | 16 ++--- .../azure/storage/AzureStorageSettings.java | 13 ++-- .../test/cluster.put_settings/10_basic.yaml | 13 ++++ 6 files changed, 122 insertions(+), 37 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index b993cef9290..3622623987b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -57,6 +57,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); + private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+(?:[*][.])+[-\\w]+$"); protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Property scope) { super(settings); @@ -86,7 +87,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } protected void validateSettingKey(Setting setting) { - if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { + if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey()) + || isValidAffixKey(setting.getKey())) == false) { throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); } } @@ -111,6 +113,10 @@ public abstract class AbstractScopedSettings extends AbstractComponent { return GROUP_KEY_PATTERN.matcher(key).matches(); } + private static boolean isValidAffixKey(String key) { + return AFFIX_KEY_PATTERN.matcher(key).matches(); + } + public Setting.Property getScope() { return this.scope; } @@ -372,14 +378,10 @@ public abstract class AbstractScopedSettings extends AbstractComponent { public Settings diff(Settings source, Settings defaultSettings) { Settings.Builder builder = Settings.builder(); for (Setting setting : keySettings.values()) { - if (setting.exists(source) == false) { - builder.put(setting.getKey(), setting.getRaw(defaultSettings)); - } + setting.diff(builder, source, defaultSettings); } for (Setting setting : complexMatchers.values()) { - if (setting.exists(source) == false) { - builder.put(setting.getKey(), setting.getRaw(defaultSettings)); - } + setting.diff(builder, source, defaultSettings); } return builder.build(); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index a96b47762d5..22c74afee7c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -311,6 +311,19 @@ public class Setting extends ToXContentToBytes { } } + /** + * Add this setting to the builder if it doesn't exists in the source settings. + * The value added to the builder is taken from the given default settings object. + * @param builder the settings builder to fill the diff into + * @param source the source settings object to diff + * @param defaultSettings the default settings object to diff against + */ + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + if (exists(source) == false) { + builder.put(getKey(), getRaw(defaultSettings)); + } + } + /** * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. @@ -649,6 +662,9 @@ public class Setting extends ToXContentToBytes { public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, Property... properties) { + if (defaultStringValue.apply(Settings.EMPTY) == null) { + throw new IllegalArgumentException("default value function must not return null"); + } Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); @@ -670,6 +686,18 @@ public class Setting extends ToXContentToBytes { boolean exists = super.exists(settings); return exists || settings.get(getKey() + ".0") != null; } + + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + if (exists(source) == false) { + String[] asArray = defaultSettings.getAsArray(getKey(), null); + if (asArray == null) { + builder.putArray(getKey(), defaultStringValue.apply(defaultSettings)); + } else { + builder.putArray(getKey(), asArray); + } + } + } }; } @@ -747,6 +775,17 @@ public class Setting extends ToXContentToBytes { return false; } + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + Map leftGroup = get(source).getAsMap(); + Settings defaultGroup = get(defaultSettings); + for (Map.Entry entry : defaultGroup.getAsMap().entrySet()) { + if (leftGroup.containsKey(entry.getKey()) == false) { + builder.put(getKey() + entry.getKey(), entry.getValue()); + } + } + } + @Override public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, Logger logger, Consumer validator) { @@ -856,14 +895,14 @@ public class Setting extends ToXContentToBytes { * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, + public static Setting affixKeySetting(String prefix, String suffix, Function defaultValue, Function parser, Property... properties) { - return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties); + return affixKeySetting(AffixKey.withAffix(prefix, suffix), defaultValue, parser, properties); } - public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, + public static Setting affixKeySetting(String prefix, String suffix, String defaultValue, Function parser, Property... properties) { - return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); + return affixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); } public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, @@ -888,6 +927,15 @@ public class Setting extends ToXContentToBytes { throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); } } + + @Override + public void diff(Settings.Builder builder, Settings source, Settings defaultSettings) { + for (Map.Entry entry : defaultSettings.getAsMap().entrySet()) { + if (match(entry.getKey())) { + getConcreteSetting(entry.getKey()).diff(builder, source, defaultSettings); + } + } + } }; } @@ -960,7 +1008,7 @@ public class Setting extends ToXContentToBytes { return new AffixKey(prefix, null); } - public static AffixKey withAdfix(String prefix, String suffix) { + public static AffixKey withAffix(String prefix, String suffix) { return new AffixKey(prefix, suffix); } @@ -970,6 +1018,9 @@ public class Setting extends ToXContentToBytes { public AffixKey(String prefix, String suffix) { assert prefix != null || suffix != null: "Either prefix or suffix must be non-null"; this.prefix = prefix; + if (prefix.endsWith(".") == false) { + throw new IllegalArgumentException("prefix must end with a '.'"); + } this.suffix = suffix; } @@ -1005,9 +1056,9 @@ public class Setting extends ToXContentToBytes { sb.append(prefix); } if (suffix != null) { - sb.append("*"); + sb.append('*'); + sb.append('.'); sb.append(suffix); - sb.append("."); } return sb.toString(); } diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 99126dcccd4..851ea26a19d 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -213,20 +213,44 @@ public class ScopedSettingsTests extends ESTestCase { public void testDiff() throws IOException { Setting fooBarBaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); Setting fooBar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting someGroup = Setting.groupSetting("some.group.", Property.Dynamic, Property.NodeScope); + Setting someAffix = Setting.affixKeySetting("some.prefix.", "somekey", "true", Boolean::parseBoolean, Property.NodeScope); Setting> foorBarQuux = Setting.listSetting("foo.bar.quux", Arrays.asList("a", "b", "c"), Function.identity(), Property.NodeScope); - ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fooBar, fooBarBaz, foorBarQuux))); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fooBar, fooBarBaz, foorBarQuux, + someGroup, someAffix))); Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); - assertThat(diff.getAsMap().size(), equalTo(2)); + assertEquals(4, diff.getAsMap().size()); // 4 since foo.bar.quux has 3 values essentially assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); - assertThat(diff.get("foo.bar.quux", null), equalTo("[\"a\",\"b\",\"c\"]")); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"a", "b", "c"}); diff = settings.diff( Settings.builder().put("foo.bar", 5).build(), - Settings.builder().put("foo.bar.baz", 17).put("foo.bar.quux", "d,e,f").build()); - assertThat(diff.getAsMap().size(), equalTo(2)); + Settings.builder().put("foo.bar.baz", 17).putArray("foo.bar.quux", "d", "e", "f").build()); + assertEquals(4, diff.getAsMap().size()); // 4 since foo.bar.quux has 3 values essentially assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(17)); - assertThat(diff.get("foo.bar.quux", null), equalTo("[\"d\",\"e\",\"f\"]")); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"d", "e", "f"}); + + diff = settings.diff( + Settings.builder().put("some.group.foo", 5).build(), + Settings.builder().put("some.group.foobar", 17, "some.group.foo", 25).build()); + assertEquals(6, diff.getAsMap().size()); // 6 since foo.bar.quux has 3 values essentially + assertThat(diff.getAsInt("some.group.foobar", null), equalTo(17)); + assertNull(diff.get("some.group.foo")); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"a", "b", "c"}); + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); + + diff = settings.diff( + Settings.builder().put("some.prefix.foo.somekey", 5).build(), + Settings.builder().put("some.prefix.foobar.somekey", 17, + "some.prefix.foo.somekey", 18).build()); + assertEquals(6, diff.getAsMap().size()); // 6 since foo.bar.quux has 3 values essentially + assertThat(diff.getAsInt("some.prefix.foobar.somekey", null), equalTo(17)); + assertNull(diff.get("some.prefix.foo.somekey")); + assertArrayEquals(diff.getAsArray("foo.bar.quux", null), new String[] {"a", "b", "c"}); + assertThat(diff.getAsInt("foo.bar.baz", null), equalTo(1)); + assertThat(diff.getAsInt("foo.bar", null), equalTo(1)); } public void testUpdateTracer() { diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 6ec9093536e..2bd5dea3c10 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -442,9 +442,9 @@ public class SettingTests extends ESTestCase { } } - public void testAdfixKeySetting() { + public void testAffixKeySetting() { Setting setting = - Setting.adfixKeySetting("foo", "enable", "false", Boolean::parseBoolean, Property.NodeScope); + Setting.affixKeySetting("foo.", "enable", "false", Boolean::parseBoolean, Property.NodeScope); assertTrue(setting.hasComplexMatcher()); assertTrue(setting.match("foo.bar.enable")); assertTrue(setting.match("foo.baz.enable")); @@ -456,12 +456,12 @@ public class SettingTests extends ESTestCase { assertTrue(concreteSetting.get(Settings.builder().put("foo.bar.enable", "true").build())); assertFalse(concreteSetting.get(Settings.builder().put("foo.baz.enable", "true").build())); - try { - setting.getConcreteSetting("foo"); - fail(); - } catch (IllegalArgumentException ex) { - assertEquals("key [foo] must match [foo*enable.] but didn't.", ex.getMessage()); - } + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> setting.getConcreteSetting("foo")); + assertEquals("key [foo] must match [foo.*.enable] but didn't.", exc.getMessage()); + + exc = expectThrows(IllegalArgumentException.class, () -> Setting.affixKeySetting("foo", "enable", "false", + Boolean::parseBoolean, Property.NodeScope)); + assertEquals("prefix must end with a '.'", exc.getMessage()); } public void testMinMaxInt() { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index 6d1ed0c1049..9d67eea628b 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -34,12 +34,7 @@ import java.util.Map; import java.util.function.Function; public final class AzureStorageSettings { - private static final String TIMEOUT_SUFFIX = "timeout"; - private static final String ACCOUNT_SUFFIX = "account"; - private static final String KEY_SUFFIX = "key"; - private static final String DEFAULT_SUFFIX = "default"; - - private static final Setting.AffixKey TIMEOUT_KEY = Setting.AffixKey.withAdfix(Storage.PREFIX, TIMEOUT_SUFFIX); + private static final Setting.AffixKey TIMEOUT_KEY = Setting.AffixKey.withAffix(Storage.PREFIX, "timeout"); private static final Setting TIMEOUT_SETTING = Setting.affixKeySetting( TIMEOUT_KEY, @@ -47,11 +42,11 @@ public final class AzureStorageSettings { (s) -> Setting.parseTimeValue(s, TimeValue.timeValueSeconds(-1), TIMEOUT_KEY.toString()), Setting.Property.NodeScope); private static final Setting ACCOUNT_SETTING = - Setting.adfixKeySetting(Storage.PREFIX, ACCOUNT_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "account", "", Function.identity(), Setting.Property.NodeScope); private static final Setting KEY_SETTING = - Setting.adfixKeySetting(Storage.PREFIX, KEY_SUFFIX, "", Function.identity(), Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "key", "", Function.identity(), Setting.Property.NodeScope); private static final Setting DEFAULT_SETTING = - Setting.adfixKeySetting(Storage.PREFIX, DEFAULT_SUFFIX, "false", Boolean::valueOf, Setting.Property.NodeScope); + Setting.affixKeySetting(Storage.PREFIX, "default", "false", Boolean::valueOf, Setting.Property.NodeScope); private final String name; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml index 41552f217be..5031c977ccd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml @@ -61,3 +61,16 @@ - match: {persistent: {}} +--- +"Test get a default settings": + + - skip: + version: " - 5.99.99" # this can't be bumped to 5.0.2 until snapshots are published + reason: Fetching default group setting was buggy until 5.0.3 + + - do: + cluster.get_settings: + include_defaults: true + + - match: {defaults.node.attr.testattr: "test"} +