From 534caa89275e4bb16ce35d1ed398cb9db83041f4 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 22 Mar 2016 14:28:24 -0700 Subject: [PATCH 01/12] Handle regex parsing errors in Gsub and Grok Processors Currently, both Gsub and Grok parse regex strings during Pipeline creation. Thrown parsing exceptions were leaking out, this commit wraps those exceptions in ElasticsearchParseExceptions. --- .../ingest/processor/GsubProcessor.java | 17 ++++++++--- .../processor/GsubProcessorFactoryTests.java | 14 +++++++++ .../ingest/grok/GrokProcessor.java | 9 +++++- .../grok/GrokProcessorFactoryTests.java | 29 +++++++++++++++++++ 4 files changed, 64 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/GsubProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/GsubProcessor.java index 6e73c92070b..d986bf522e5 100644 --- a/core/src/main/java/org/elasticsearch/ingest/processor/GsubProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/processor/GsubProcessor.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.processor; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessor; import org.elasticsearch.ingest.core.AbstractProcessorFactory; import org.elasticsearch.ingest.core.IngestDocument; @@ -28,6 +29,9 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException; +import static org.elasticsearch.ingest.core.ConfigurationUtils.readStringProperty; + /** * Processor that allows to search for patterns in field content and replace them with corresponding string replacement. * Support fields of string type only, throws exception if a field is of a different type. @@ -79,10 +83,15 @@ public final class GsubProcessor extends AbstractProcessor { public static final class Factory extends AbstractProcessorFactory { @Override public GsubProcessor doCreate(String processorTag, Map config) throws Exception { - String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); - String pattern = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pattern"); - String replacement = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "replacement"); - Pattern searchPattern = Pattern.compile(pattern); + String field = readStringProperty(TYPE, processorTag, config, "field"); + String pattern = readStringProperty(TYPE, processorTag, config, "pattern"); + String replacement = readStringProperty(TYPE, processorTag, config, "replacement"); + Pattern searchPattern; + try { + searchPattern = Pattern.compile(pattern); + } catch (Exception e) { + throw newConfigurationException(TYPE, processorTag, "pattern", "Invalid regex pattern. " + e.getMessage()); + } return new GsubProcessor(processorTag, field, searchPattern, replacement); } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java index 2440ff68408..628a81223be 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/GsubProcessorFactoryTests.java @@ -84,4 +84,18 @@ public class GsubProcessorFactoryTests extends ESTestCase { assertThat(e.getMessage(), equalTo("[replacement] required property is missing")); } } + + public void testCreateInvalidPattern() throws Exception { + GsubProcessor.Factory factory = new GsubProcessor.Factory(); + Map config = new HashMap<>(); + config.put("field", "field1"); + config.put("pattern", "["); + config.put("replacement", "-"); + try { + factory.create(config); + fail("factory create should have failed"); + } catch(ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. Unclosed character class near index 0\n[\n^")); + } + } } diff --git a/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/GrokProcessor.java b/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/GrokProcessor.java index b4755d61c56..9237821baba 100644 --- a/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/GrokProcessor.java +++ b/modules/ingest-grok/src/main/java/org/elasticsearch/ingest/grok/GrokProcessor.java @@ -27,6 +27,8 @@ import org.elasticsearch.ingest.core.IngestDocument; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.ingest.core.ConfigurationUtils.newConfigurationException; + public final class GrokProcessor extends AbstractProcessor { public static final String TYPE = "grok"; @@ -82,7 +84,12 @@ public final class GrokProcessor extends AbstractProcessor { patternBank.putAll(customPatternBank); } - Grok grok = new Grok(patternBank, matchPattern); + Grok grok; + try { + grok = new Grok(patternBank, matchPattern); + } catch (Exception e) { + throw newConfigurationException(TYPE, processorTag, "pattern", "Invalid regex pattern. " + e.getMessage()); + } return new GrokProcessor(processorTag, grok, matchField); } diff --git a/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java b/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java index db98090af39..3880d389c52 100644 --- a/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java +++ b/modules/ingest-grok/src/test/java/org/elasticsearch/ingest/grok/GrokProcessorFactoryTests.java @@ -84,4 +84,33 @@ public class GrokProcessorFactoryTests extends ESTestCase { assertThat(processor.getGrok(), notNullValue()); assertThat(processor.getGrok().match("foo!"), equalTo(true)); } + + public void testCreateWithInvalidPattern() throws Exception { + GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap()); + Map config = new HashMap<>(); + config.put("field", "_field"); + config.put("pattern", "["); + try { + factory.create(config); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. premature end of char-class")); + } + + } + + public void testCreateWithInvalidPatternDefinition() throws Exception { + GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap()); + Map config = new HashMap<>(); + config.put("field", "_field"); + config.put("pattern", "%{MY_PATTERN:name}!"); + config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "[")); + try { + factory.create(config); + fail("should fail"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), equalTo("[pattern] Invalid regex pattern. premature end of char-class")); + } + + } } From ff3fd99074e4a616d172ab4cb0580872f6cecbbe Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 29 Mar 2016 11:48:05 +0100 Subject: [PATCH 02/12] Prevents exception being raised when ordering by an aggregation which wasn't collected If a terms aggregation was ordered by a metric nested in a single bucket aggregator which did not collect any documents (e.g. a filters aggregation which did not match in that term bucket) an ArrayOutOfBoundsException would be thrown when the ordering code tried to retrieve the value for the metric. This fix fixes all numeric metric aggregators so they return their default value when a bucket ordinal is requested which was not collected. Closes #17225 --- .../metrics/avg/AvgAggregator.java | 5 ++- .../metrics/max/MaxAggregator.java | 5 ++- .../metrics/min/MinAggregator.java | 5 ++- .../metrics/stats/StatsAggregator.java | 21 ++++++--- .../extended/ExtendedStatsAggregator.java | 35 ++++++++++----- .../metrics/sum/SumAggregator.java | 5 ++- .../search/aggregations/metrics/AvgIT.java | 36 +++++++++++++++ .../search/aggregations/metrics/SumIT.java | 36 +++++++++++++++ .../messy/tests/ExtendedStatsTests.java | 45 +++++++++++++++++++ .../messy/tests/HDRPercentileRanksTests.java | 36 +++++++++++++++ .../messy/tests/HDRPercentilesTests.java | 38 ++++++++++++++++ .../elasticsearch/messy/tests/MaxTests.java | 38 +++++++++++++++- .../elasticsearch/messy/tests/MinTests.java | 37 +++++++++++++++ .../elasticsearch/messy/tests/StatsTests.java | 40 +++++++++++++++++ .../tests/TDigestPercentileRanksTests.java | 37 +++++++++++++++ .../messy/tests/TDigestPercentilesTests.java | 39 ++++++++++++++++ .../metrics/AbstractNumericTestCase.java | 2 + 17 files changed, 440 insertions(+), 20 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index 34312cbb696..11417b1c615 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -94,7 +94,10 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { @Override public double metric(long owningBucketOrd) { - return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd); + if (valuesSource == null || owningBucketOrd >= sums.size()) { + return Double.NaN; + } + return sums.get(owningBucketOrd) / counts.get(owningBucketOrd); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index d207fdd53ae..2862dc2cad2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -96,7 +96,10 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { @Override public double metric(long owningBucketOrd) { - return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd); + if (valuesSource == null || owningBucketOrd >= maxes.size()) { + return Double.NEGATIVE_INFINITY; + } + return maxes.get(owningBucketOrd); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index 80aacff9312..f0a6c8f3a31 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -95,7 +95,10 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { @Override public double metric(long owningBucketOrd) { - return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd); + if (valuesSource == null || owningBucketOrd >= mins.size()) { + return Double.POSITIVE_INFINITY; + } + return mins.get(owningBucketOrd); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java index e94edc0f140..3a497d70df0 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java @@ -128,12 +128,23 @@ public class StatsAggregator extends NumericMetricsAggregator.MultiValue { @Override public double metric(String name, long owningBucketOrd) { + if (valuesSource == null || owningBucketOrd >= counts.size()) { + switch(InternalStats.Metrics.resolve(name)) { + case count: return 0; + case sum: return 0; + case min: return Double.POSITIVE_INFINITY; + case max: return Double.NEGATIVE_INFINITY; + case avg: return Double.NaN; + default: + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + } + } switch(InternalStats.Metrics.resolve(name)) { - case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd); - case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd); - case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd); - case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd); - case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd); + case count: return counts.get(owningBucketOrd); + case sum: return sums.get(owningBucketOrd); + case min: return mins.get(owningBucketOrd); + case max: return maxes.get(owningBucketOrd); + case avg: return sums.get(owningBucketOrd) / counts.get(owningBucketOrd); default: throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 2dfab325127..104cd36367b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -140,20 +141,34 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue @Override public double metric(String name, long owningBucketOrd) { + if (valuesSource == null || owningBucketOrd >= counts.size()) { + switch(InternalExtendedStats.Metrics.resolve(name)) { + case count: return 0; + case sum: return 0; + case min: return Double.POSITIVE_INFINITY; + case max: return Double.NEGATIVE_INFINITY; + case avg: return Double.NaN; + case sum_of_squares: return 0; + case variance: return Double.NaN; + case std_deviation: return Double.NaN; + case std_upper: return Double.NaN; + case std_lower: return Double.NaN; + default: + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + } + } switch(InternalExtendedStats.Metrics.resolve(name)) { - case count: return valuesSource == null ? 0 : counts.get(owningBucketOrd); - case sum: return valuesSource == null ? 0 : sums.get(owningBucketOrd); - case min: return valuesSource == null ? Double.POSITIVE_INFINITY : mins.get(owningBucketOrd); - case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd); - case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd); - case sum_of_squares: return valuesSource == null ? 0 : sumOfSqrs.get(owningBucketOrd); - case variance: return valuesSource == null ? Double.NaN : variance(owningBucketOrd); - case std_deviation: return valuesSource == null ? Double.NaN : Math.sqrt(variance(owningBucketOrd)); + case count: return counts.get(owningBucketOrd); + case sum: return sums.get(owningBucketOrd); + case min: return mins.get(owningBucketOrd); + case max: return maxes.get(owningBucketOrd); + case avg: return sums.get(owningBucketOrd) / counts.get(owningBucketOrd); + case sum_of_squares: return sumOfSqrs.get(owningBucketOrd); + case variance: return variance(owningBucketOrd); + case std_deviation: return Math.sqrt(variance(owningBucketOrd)); case std_upper: - if (valuesSource == null) { return Double.NaN; } return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) + (Math.sqrt(variance(owningBucketOrd)) * this.sigma); case std_lower: - if (valuesSource == null) { return Double.NaN; } return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) * this.sigma); default: throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index 29dcaf47dda..79f6ad4e564 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -87,7 +87,10 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { @Override public double metric(long owningBucketOrd) { - return valuesSource == null ? 0 : sums.get(owningBucketOrd); + if (valuesSource == null || owningBucketOrd >= sums.size()) { + return 0.0; + } + return sums.get(owningBucketOrd); } @Override diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index d7e873616e9..a8187578d71 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -31,8 +31,11 @@ import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.SearchScript; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.avg.Avg; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -47,9 +50,12 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -317,6 +323,36 @@ public class AvgIT extends AbstractNumericTestCase { assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20)); } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>avg", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(avg("avg").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Avg avg = filter.getAggregations().get("avg"); + assertThat(avg, notNullValue()); + assertThat(avg.value(), equalTo(Double.NaN)); + + } + } + /** * Mock plugin for the {@link ExtractFieldScriptEngine} */ diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index adafc55fade..462356d4108 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -31,8 +31,11 @@ import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.SearchScript; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -47,9 +50,12 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -312,6 +318,36 @@ public class SumIT extends AbstractNumericTestCase { assertThat(sum.getValue(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>sum", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Sum sum = filter.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.value(), equalTo(0.0)); + + } + } + /** * Mock plugin for the {@link ExtractFieldScriptEngine} */ diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java index 4642d4662c9..c38e8f2a979 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ExtendedStatsTests.java @@ -24,20 +24,26 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.missing.Missing; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; +import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats.Bounds; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.missing; @@ -538,6 +544,45 @@ public class ExtendedStatsTests extends AbstractNumericTestCase { } } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>extendedStats.avg", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + ExtendedStats extendedStats = filter.getAggregations().get("extendedStats"); + assertThat(extendedStats, notNullValue()); + assertThat(extendedStats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(extendedStats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(extendedStats.getAvg(), equalTo(Double.NaN)); + assertThat(extendedStats.getSum(), equalTo(0.0)); + assertThat(extendedStats.getCount(), equalTo(0L)); + assertThat(extendedStats.getStdDeviation(), equalTo(Double.NaN)); + assertThat(extendedStats.getSumOfSquares(), equalTo(0.0)); + assertThat(extendedStats.getVariance(), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.LOWER), equalTo(Double.NaN)); + assertThat(extendedStats.getStdDeviationBound(Bounds.UPPER), equalTo(Double.NaN)); + + } + } private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { ShardSearchFailure[] failures = response.getShardFailures(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentileRanksTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentileRanksTests.java index 4fe11c67b83..21fb5be6b9d 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentileRanksTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentileRanksTests.java @@ -24,9 +24,11 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; @@ -41,9 +43,12 @@ import java.util.Map; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -463,4 +468,35 @@ public class HDRPercentileRanksTests extends AbstractNumericTestCase { } } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .subAggregation(filter("filter", termQuery("value", 100)) + .subAggregation(percentileRanks("ranks").method(PercentilesMethod.HDR).values(99).field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentilesTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentilesTests.java index aec3c43edcf..9c21928798a 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentilesTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HDRPercentilesTests.java @@ -25,9 +25,11 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; @@ -41,9 +43,12 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -443,4 +448,37 @@ public class HDRPercentilesTests extends AbstractNumericTestCase { } } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + .subAggregation(filter("filter", termQuery("value", 100)) + .subAggregation(percentiles("percentiles").method(PercentilesMethod.HDR).field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + + } + } + } \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MaxTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MaxTests.java index ee20202a894..22af6dd486e 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MaxTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MaxTests.java @@ -23,20 +23,26 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.max.Max; - import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.max; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -293,4 +299,34 @@ public class MaxTests extends AbstractNumericTestCase { assertThat(max.getName(), equalTo("max")); assertThat(max.getValue(), equalTo(11.0)); } + + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>max", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(max("max").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Max max = filter.getAggregations().get("max"); + assertThat(max, notNullValue()); + assertThat(max.value(), equalTo(Double.NEGATIVE_INFINITY)); + + } + } } \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinTests.java index 36bf5c25911..f61aad9b137 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinTests.java @@ -23,20 +23,27 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.min.Min; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.min; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -305,4 +312,34 @@ public class MinTests extends AbstractNumericTestCase { assertThat(min.getName(), equalTo("min")); assertThat(min.getValue(), equalTo(1.0)); } + + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>min", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(min("min").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Min min = filter.getAggregations().get("min"); + assertThat(min, notNullValue()); + assertThat(min.value(), equalTo(Double.POSITIVE_INFINITY)); + + } + } } \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java index b06d3395b2b..9af5e086a34 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/StatsTests.java @@ -24,20 +24,27 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.stats.Stats; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -399,6 +406,39 @@ public class StatsTests extends AbstractNumericTestCase { assertThat(stats.getCount(), equalTo(20L)); } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Order.compound(Order.aggregation("filter>stats.avg", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Stats stats = filter.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(stats.getAvg(), equalTo(Double.NaN)); + assertThat(stats.getSum(), equalTo(0.0)); + assertThat(stats.getCount(), equalTo(0L)); + + } + } private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception { ShardSearchFailure[] failures = response.getShardFailures(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java index 540d0cfe5eb..2e59b798297 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentileRanksTests.java @@ -25,13 +25,16 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; import java.util.Arrays; import java.util.Collection; @@ -41,9 +44,12 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -425,4 +431,35 @@ public class TDigestPercentileRanksTests extends AbstractNumericTestCase { } } + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation(terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>ranks.99", true))) + .subAggregation(filter("filter", termQuery("value", 100)) + .subAggregation(percentileRanks("ranks").method(PercentilesMethod.TDIGEST).values(99).field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + PercentileRanks ranks = filter.getAggregations().get("ranks"); + assertThat(ranks, notNullValue()); + assertThat(ranks.percent(99), equalTo(Double.NaN)); + + } + } + } \ No newline at end of file diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java index 1bf0b00657d..69d3c281ca8 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/TDigestPercentilesTests.java @@ -25,13 +25,17 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.groovy.GroovyPlugin; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.AbstractNumericTestCase; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregatorBuilder; +import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod; + import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -40,9 +44,12 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -407,4 +414,36 @@ public class TDigestPercentilesTests extends AbstractNumericTestCase { previous = p99; } } + + @Override + public void testOrderByEmptyAggregation() throws Exception { + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value").order(Terms.Order.compound(Terms.Order.aggregation("filter>percentiles.99", true))) + .subAggregation(filter("filter", termQuery("value", 100)) + .subAggregation(percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value")))) + .get(); + + assertHitCount(searchResponse, 10); + + Terms terms = searchResponse.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + List buckets = terms.getBuckets(); + assertThat(buckets, notNullValue()); + assertThat(buckets.size(), equalTo(10)); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); + assertThat(bucket.getDocCount(), equalTo(1L)); + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertThat(filter.getDocCount(), equalTo(0L)); + Percentiles percentiles = filter.getAggregations().get("percentiles"); + assertThat(percentiles, notNullValue()); + assertThat(percentiles.percentile(99), equalTo(Double.NaN)); + + } + } } \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java index ece26be8239..703119a7a14 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -97,4 +97,6 @@ public abstract class AbstractNumericTestCase extends ESIntegTestCase { public abstract void testScriptMultiValued() throws Exception; public abstract void testScriptMultiValuedWithParams() throws Exception; + + public abstract void testOrderByEmptyAggregation() throws Exception; } \ No newline at end of file From 52daed0732bfe37a103a3a2965363de46fcdd5e9 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 29 Mar 2016 14:56:22 +0200 Subject: [PATCH 03/12] Update-by-query rest tests: fixed bad yaml and deleted a client-dependent test --- .../test/update_by_query/10_basic.yaml | 9 ++++++--- .../test/update_by_query/20_validation.yaml | 14 -------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml index 083cb44d01f..224ea3b87f4 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml @@ -20,7 +20,8 @@ - match: {noops: 0} - match: {throttled_millis: 0} - gte: { took: 0 } - - is_false: created # Update by query can't create + # Update by query can't create + - is_false: created - is_false: task --- @@ -70,7 +71,8 @@ body: { "text": "test" } - do: indices.refresh: {} - - do: # Creates a new version for reindex to miss on scan. + # Creates a new version for reindex to miss on scan. + - do: index: index: test type: foo @@ -111,7 +113,8 @@ body: { "text": "test" } - do: indices.refresh: {} - - do: # Creates a new version for reindex to miss on scan. + # Creates a new version for reindex to miss on scan. + - do: index: index: test type: foo diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml index 175ec65903a..dc8fd4e7e5a 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml @@ -12,20 +12,6 @@ index: test conflicts: cat ---- -"invalid scroll_size fails": - - do: - index: - index: test - type: test - id: 1 - body: { "text": "test" } - - do: - catch: /Failed to parse int parameter \[scroll_size\] with value \[cat\]/ - update_by_query: - index: test - scroll_size: cat - --- "invalid size fails": - do: From 3087d2b88227e1bd14643f34a6c3dc2d4701bf03 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 29 Mar 2016 14:39:15 +0200 Subject: [PATCH 04/12] Fixed bad YAML in reindex REST test: 50_routing.yaml --- .../test/resources/rest-api-spec/test/reindex/50_routing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/50_routing.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/50_routing.yaml index 9a6b7245c4f..5c72aff84c0 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/50_routing.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/50_routing.yaml @@ -35,7 +35,7 @@ type: test id: 1 body: { "company": "cat" } - routing: + routing: null - do: indices.refresh: {} From 798e4281fa9391083a602b4e5bbbf90213bd5108 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 29 Mar 2016 15:06:10 +0200 Subject: [PATCH 05/12] Added experimental annotation to the update-by-query and reindex docs --- docs/reference/docs/reindex.asciidoc | 2 ++ docs/reference/docs/update-by-query.asciidoc | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index fba45983521..a825a8eca44 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1,6 +1,8 @@ [[docs-reindex]] == Reindex API +experimental[The reindex API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + The most basic form of `_reindex` just copies documents from one index to another. This will copy documents from the `twitter` index into the `new_twitter` index: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index eb6d80415fc..f7ddf1d087c 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -1,6 +1,8 @@ [[docs-update-by-query]] == Update By Query API +experimental[The update-by-query API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + The simplest usage of `_update_by_query` just performs an update on every document in the index without changing the source. This is useful to <> or some other online From 60793a848eab9838c47043a68cc0c54d101c06f7 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 29 Mar 2016 16:21:25 +0200 Subject: [PATCH 06/12] test: make sure we don't flush during indexing the percolator queries --- .../index/percolator/PercolatorQueryCacheTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java index 3f3d90bafbd..e10a63bca2c 100644 --- a/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java +++ b/core/src/test/java/org/elasticsearch/index/percolator/PercolatorQueryCacheTests.java @@ -125,6 +125,8 @@ public class PercolatorQueryCacheTests extends ESTestCase { IndexWriter indexWriter = new IndexWriter( directory, newIndexWriterConfig(new MockAnalyzer(random())) + .setMergePolicy(NoMergePolicy.INSTANCE) + .setMaxBufferedDocs(16) ); boolean legacyFormat = randomBoolean(); From 9d37f459b567cfd7282e91c5847076eb843836e9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 24 Mar 2016 10:01:20 +0100 Subject: [PATCH 07/12] percolator: Make explain use the two phase iterator So that we don't eveluate percolator queries that don't match. Closes #17314 --- .../index/query/PercolatorQuery.java | 7 ++-- .../index/query/PercolatorQueryTests.java | 35 ++++++++++++++++++- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/query/PercolatorQuery.java b/core/src/main/java/org/elasticsearch/index/query/PercolatorQuery.java index 0b22b17f65e..f4199e55223 100644 --- a/core/src/main/java/org/elasticsearch/index/query/PercolatorQuery.java +++ b/core/src/main/java/org/elasticsearch/index/query/PercolatorQuery.java @@ -135,9 +135,12 @@ public final class PercolatorQuery extends Query implements Accountable { public Explanation explain(LeafReaderContext leafReaderContext, int docId) throws IOException { Scorer scorer = scorer(leafReaderContext); if (scorer != null) { - int result = scorer.iterator().advance(docId); + TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator(); + int result = twoPhaseIterator.approximation().advance(docId); if (result == docId) { - return Explanation.match(scorer.score(), "PercolatorQuery"); + if (twoPhaseIterator.matches()) { + return Explanation.match(scorer.score(), "PercolatorQuery"); + } } } return Explanation.noMatch("PercolatorQuery"); diff --git a/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java index cbcfba55b97..61cf0f4803e 100644 --- a/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PhraseQuery; @@ -60,6 +61,7 @@ import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PercolatorQueryTests extends ESTestCase { @@ -147,10 +149,38 @@ public class PercolatorQueryTests extends ESTestCase { assertThat(topDocs.totalHits, equalTo(5)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); + Explanation explanation = shardSearcher.explain(builder.build(), 0); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); + + explanation = shardSearcher.explain(builder.build(), 1); + assertThat(explanation.isMatch(), is(false)); + assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); + explanation = shardSearcher.explain(builder.build(), 2); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score)); + assertThat(topDocs.scoreDocs[2].doc, equalTo(3)); + explanation = shardSearcher.explain(builder.build(), 3); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score)); + + explanation = shardSearcher.explain(builder.build(), 4); + assertThat(explanation.isMatch(), is(false)); + assertThat(topDocs.scoreDocs[3].doc, equalTo(5)); + explanation = shardSearcher.explain(builder.build(), 5); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[3].score)); + + explanation = shardSearcher.explain(builder.build(), 6); + assertThat(explanation.isMatch(), is(false)); + assertThat(topDocs.scoreDocs[4].doc, equalTo(7)); + explanation = shardSearcher.explain(builder.build(), 7); + assertThat(explanation.isMatch(), is(true)); + assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[4].score)); } public void testDuel() throws Exception { @@ -236,11 +266,14 @@ public class PercolatorQueryTests extends ESTestCase { new MatchAllDocsQuery() ); TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10); - assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits)); assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length)); for (int j = 0; j < topDocs1.scoreDocs.length; j++) { assertThat(topDocs1.scoreDocs[j].doc, equalTo(topDocs2.scoreDocs[j].doc)); + assertThat(topDocs1.scoreDocs[j].score, equalTo(topDocs2.scoreDocs[j].score)); + Explanation explain1 = shardSearcher.explain(builder1.build(), topDocs1.scoreDocs[j].doc); + Explanation explain2 = shardSearcher.explain(builder2.build(), topDocs2.scoreDocs[j].doc); + assertThat(explain1.toHtml(), equalTo(explain2.toHtml())); } } From 2064fe3985a7dd01b5ec73d9f55073ab788d1c27 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 22 Mar 2016 16:46:54 -0700 Subject: [PATCH 08/12] add type conversion support to ConvertProcessor --- .../ingest/processor/ConvertProcessor.java | 30 ++++- .../ConvertProcessorFactoryTests.java | 18 ++- .../processor/ConvertProcessorTests.java | 103 +++++++++++++++--- docs/reference/ingest/ingest-node.asciidoc | 15 ++- 4 files changed, 145 insertions(+), 21 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java index b6274ff83e5..1a6ce94e3db 100644 --- a/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/processor/ConvertProcessor.java @@ -73,6 +73,23 @@ public final class ConvertProcessor extends AbstractProcessor { public Object convert(Object value) { return value.toString(); } + }, AUTO { + @Override + public Object convert(Object value) { + if (!(value instanceof String)) { + return value; + } + try { + return BOOLEAN.convert(value); + } catch (IllegalArgumentException e) { } + try { + return INTEGER.convert(value); + } catch (IllegalArgumentException e) {} + try { + return FLOAT.convert(value); + } catch (IllegalArgumentException e) {} + return value; + } }; @Override @@ -94,11 +111,13 @@ public final class ConvertProcessor extends AbstractProcessor { public static final String TYPE = "convert"; private final String field; + private final String targetField; private final Type convertType; - ConvertProcessor(String tag, String field, Type convertType) { + ConvertProcessor(String tag, String field, String targetField, Type convertType) { super(tag); this.field = field; + this.targetField = targetField; this.convertType = convertType; } @@ -106,6 +125,10 @@ public final class ConvertProcessor extends AbstractProcessor { return field; } + String getTargetField() { + return targetField; + } + Type getConvertType() { return convertType; } @@ -128,7 +151,7 @@ public final class ConvertProcessor extends AbstractProcessor { } else { newValue = convertType.convert(oldValue); } - document.setFieldValue(field, newValue); + document.setFieldValue(targetField, newValue); } @Override @@ -141,8 +164,9 @@ public final class ConvertProcessor extends AbstractProcessor { public ConvertProcessor doCreate(String processorTag, Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); String typeProperty = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "type"); + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", field); Type convertType = Type.fromString(processorTag, "type", typeProperty); - return new ConvertProcessor(processorTag, field, convertType); + return new ConvertProcessor(processorTag, field, targetField, convertType); } } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java index 831e87436ba..f54f04c0cf8 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorFactoryTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.ingest.processor; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -44,6 +43,7 @@ public class ConvertProcessorFactoryTests extends ESTestCase { ConvertProcessor convertProcessor = factory.create(config); assertThat(convertProcessor.getTag(), equalTo(processorTag)); assertThat(convertProcessor.getField(), equalTo("field1")); + assertThat(convertProcessor.getTargetField(), equalTo("field1")); assertThat(convertProcessor.getConvertType(), equalTo(type)); } @@ -88,4 +88,20 @@ public class ConvertProcessorFactoryTests extends ESTestCase { assertThat(e.getMessage(), Matchers.equalTo("[type] required property is missing")); } } + + public void testCreateWithExplicitTargetField() throws Exception { + ConvertProcessor.Factory factory = new ConvertProcessor.Factory(); + Map config = new HashMap<>(); + ConvertProcessor.Type type = randomFrom(ConvertProcessor.Type.values()); + config.put("field", "field1"); + config.put("target_field", "field2"); + config.put("type", type.toString()); + String processorTag = randomAsciiOfLength(10); + config.put(AbstractProcessorFactory.TAG_KEY, processorTag); + ConvertProcessor convertProcessor = factory.create(config); + assertThat(convertProcessor.getTag(), equalTo(processorTag)); + assertThat(convertProcessor.getField(), equalTo("field1")); + assertThat(convertProcessor.getTargetField(), equalTo("field2")); + assertThat(convertProcessor.getConvertType(), equalTo(type)); + } } diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java index 1350ebab601..936875cf73a 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java @@ -34,6 +34,7 @@ import java.util.Map; import static org.elasticsearch.ingest.processor.ConvertProcessor.Type; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; public class ConvertProcessorTests extends ESTestCase { @@ -41,7 +42,7 @@ public class ConvertProcessorTests extends ESTestCase { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); int randomInt = randomInt(); String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomInt); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(randomInt)); } @@ -57,7 +58,7 @@ public class ConvertProcessorTests extends ESTestCase { expectedList.add(randomInt); } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList)); } @@ -68,7 +69,7 @@ public class ConvertProcessorTests extends ESTestCase { String value = "string-" + randomAsciiOfLengthBetween(1, 10); ingestDocument.setFieldValue(fieldName, value); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.INTEGER); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.INTEGER); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -84,7 +85,7 @@ public class ConvertProcessorTests extends ESTestCase { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomFloat); expectedResult.put(fieldName, randomFloat); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, Float.class), equalTo(randomFloat)); } @@ -100,7 +101,7 @@ public class ConvertProcessorTests extends ESTestCase { expectedList.add(randomFloat); } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList)); } @@ -111,7 +112,7 @@ public class ConvertProcessorTests extends ESTestCase { String value = "string-" + randomAsciiOfLengthBetween(1, 10); ingestDocument.setFieldValue(fieldName, value); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.FLOAT); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.FLOAT); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -129,7 +130,7 @@ public class ConvertProcessorTests extends ESTestCase { } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, booleanString); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, Boolean.class), equalTo(randomBoolean)); } @@ -149,7 +150,7 @@ public class ConvertProcessorTests extends ESTestCase { expectedList.add(randomBoolean); } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList)); } @@ -166,7 +167,7 @@ public class ConvertProcessorTests extends ESTestCase { } ingestDocument.setFieldValue(fieldName, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.BOOLEAN); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.BOOLEAN); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -200,7 +201,7 @@ public class ConvertProcessorTests extends ESTestCase { } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.STRING); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(expectedFieldValue)); } @@ -236,7 +237,7 @@ public class ConvertProcessorTests extends ESTestCase { expectedList.add(randomValueString); } String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, Type.STRING); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, Type.STRING); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(expectedList)); } @@ -245,7 +246,7 @@ public class ConvertProcessorTests extends ESTestCase { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); Type type = randomFrom(Type.values()); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, type); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, fieldName, type); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -257,7 +258,7 @@ public class ConvertProcessorTests extends ESTestCase { public void testConvertNullField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", null)); Type type = randomFrom(Type.values()); - Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", type); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", type); try { processor.execute(ingestDocument); fail("processor execute should have failed"); @@ -265,4 +266,80 @@ public class ConvertProcessorTests extends ESTestCase { assertThat(e.getMessage(), equalTo("Field [field] is null, cannot be converted to type [" + type + "]")); } } + + public void testAutoConvertNotString() throws Exception { + Object randomValue; + switch(randomIntBetween(0, 2)) { + case 0: + float randomFloat = randomFloat(); + randomValue = randomFloat; + break; + case 1: + int randomInt = randomInt(); + randomValue = randomInt; + break; + case 2: + boolean randomBoolean = randomBoolean(); + randomValue = randomBoolean; + break; + default: + throw new UnsupportedOperationException(); + } + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomValue)); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); + processor.execute(ingestDocument); + Object convertedValue = ingestDocument.getFieldValue("field", Object.class); + assertThat(convertedValue, sameInstance(randomValue)); + } + + public void testAutoConvertStringNotMatched() throws Exception { + String value = "notAnIntFloatOrBool"; + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", value)); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); + processor.execute(ingestDocument); + Object convertedValue = ingestDocument.getFieldValue("field", Object.class); + assertThat(convertedValue, sameInstance(value)); + } + + public void testAutoConvertMatchBoolean() throws Exception { + boolean randomBoolean = randomBoolean(); + String booleanString = Boolean.toString(randomBoolean); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", booleanString)); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); + processor.execute(ingestDocument); + Object convertedValue = ingestDocument.getFieldValue("field", Object.class); + assertThat(convertedValue, equalTo(randomBoolean)); + } + + public void testAutoConvertMatchInteger() throws Exception { + int randomInt = randomInt(); + String randomString = Integer.toString(randomInt); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); + processor.execute(ingestDocument); + Object convertedValue = ingestDocument.getFieldValue("field", Object.class); + assertThat(convertedValue, equalTo(randomInt)); + } + + public void testAutoConvertMatchFloat() throws Exception { + float randomFloat = randomFloat(); + String randomString = Float.toString(randomFloat); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); + processor.execute(ingestDocument); + Object convertedValue = ingestDocument.getFieldValue("field", Object.class); + assertThat(convertedValue, equalTo(randomFloat)); + } + + public void testTargetField() throws Exception { + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + int randomInt = randomInt(); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, String.valueOf(randomInt)); + String targetField = fieldName + randomAsciiOfLength(5); + Processor processor = new ConvertProcessor(randomAsciiOfLength(10), fieldName, targetField, Type.INTEGER); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt))); + assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt)); + + } } diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 10b640dbaf1..146e033736a 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -668,18 +668,25 @@ Accepts a single value or an array of values. Converts an existing field's value to a different type, such as converting a string to an integer. If the field value is an array, all members will be converted. -The supported types include: `integer`, `float`, `string`, and `boolean`. +The supported types include: `integer`, `float`, `string`, `boolean`, and `auto`. Specifying `boolean` will set the field to true if its string value is equal to `true` (ignore case), to false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise. +Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type. +For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. And +a value of `"242.15"` will "automatically" be converted to `242.15` of type `float`. If a provided field cannot +be appropriately converted, the Convert Processor will still process successfully and leave the field value as-is. In +such a case, `target_field` will still be updated with the unconverted field value. + [[convert-options]] .Convert Options [options="header"] |====== -| Name | Required | Default | Description -| `field` | yes | - | The field whose value is to be converted -| `type` | yes | - | The type to convert the existing value to +| Name | Required | Default | Description +| `field` | yes | - | The field whose value is to be converted +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `type` | yes | - | The type to convert the existing value to |====== [source,js] From 833fc8420fb792aab5ef5310d5787d621509d6d0 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Tue, 29 Mar 2016 08:19:15 -0700 Subject: [PATCH 09/12] split long line in ConvertProcessorTests --- .../elasticsearch/ingest/processor/ConvertProcessorTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java index 936875cf73a..0efacb145bd 100644 --- a/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/processor/ConvertProcessorTests.java @@ -304,7 +304,8 @@ public class ConvertProcessorTests extends ESTestCase { public void testAutoConvertMatchBoolean() throws Exception { boolean randomBoolean = randomBoolean(); String booleanString = Boolean.toString(randomBoolean); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", booleanString)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), + Collections.singletonMap("field", booleanString)); Processor processor = new ConvertProcessor(randomAsciiOfLength(10), "field", "field", Type.AUTO); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); From 48b4f086e07e1d00a7f662f04fe4099a27e0c1c2 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Sun, 27 Mar 2016 10:03:12 +0200 Subject: [PATCH 10/12] Replication operation that try to perform the primary phase on a replica should be retried In extreme cases a local primary shard can be replaced with a replica while a replication request is in flight and the primary action is applied to the shard (via `acquirePrimaryOperationLock()). #17044 changed the exception used in that method to something that isn't recognized as `TransportActions.isShardNotAvailableException`, causing the operation to fail immediately instead of retrying. This commit fixes this by check the primary flag before acquiring the lock. This is safe to do as an IndexShard will never be demoted once a primary. Closes #17358 --- .../TransportReplicationAction.java | 11 ++- .../elasticsearch/index/shard/IndexShard.java | 1 - .../TransportReplicationActionTests.java | 99 +++++++++++++++---- 3 files changed, 87 insertions(+), 24 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index d70e271fa26..c1106a709fe 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -549,8 +549,9 @@ public abstract class TransportReplicationAction(); reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener); @@ -181,7 +183,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertPhase(task, "waiting_for_retry"); block = ClusterBlocks.builder() - .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block)); assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, ClusterBlockException.class); assertIndexShardUninitialized(); @@ -196,7 +198,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); // no replicas in oder to skip the replication part setState(clusterService, state(index, true, - randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); ReplicationTask task = maybeTask(); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); @@ -221,7 +223,7 @@ public class TransportReplicationActionTests extends ESTestCase { final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); final List capturedRequests = - transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); + transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); @@ -234,7 +236,7 @@ public class TransportReplicationActionTests extends ESTestCase { * before the relocation target, there is a time span where relocation source believes active primary to be on * relocation target and relocation target believes active primary to be on relocation source. This results in replication * requests being sent back and forth. - * + *

* This test checks that replication request is not routed back from relocation target to relocation source in case of * stale index routing table on relocation target. */ @@ -271,7 +273,7 @@ public class TransportReplicationActionTests extends ESTestCase { IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); final List capturedRequests = - transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); + transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); @@ -282,7 +284,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; // no replicas in oder to skip the replication part setState(clusterService, state(index, true, - randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(new ShardId("unknown_index", "_na_", 0)).timeout("1ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -299,6 +301,61 @@ public class TransportReplicationActionTests extends ESTestCase { assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class); } + public void testStalePrimaryShardOnReroute() throws InterruptedException { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + // no replicas in order to skip the replication part + setState(clusterService, stateWithActivePrimary(index, true, randomInt(3))); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Request request = new Request(shardId); + boolean timeout = randomBoolean(); + if (timeout) { + request.timeout("0s"); + } else { + request.timeout("1h"); + } + PlainActionFuture listener = new PlainActionFuture<>(); + ReplicationTask task = maybeTask(); + + TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener); + reroutePhase.run(); + CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); + assertThat(capturedRequests, arrayWithSize(1)); + assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + assertPhase(task, "waiting_on_primary"); + transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId)); + + + if (timeout) { + // we always try at least one more time on timeout + assertThat(listener.isDone(), equalTo(false)); + capturedRequests = transport.getCapturedRequestsAndClear(); + assertThat(capturedRequests, arrayWithSize(1)); + assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + assertPhase(task, "waiting_on_primary"); + transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId)); + assertListenerThrows("must throw index not found exception", listener, ElasticsearchException.class); + assertPhase(task, "failed"); + } else { + assertThat(listener.isDone(), equalTo(false)); + // generate a CS change + setState(clusterService, clusterService.state()); + capturedRequests = transport.getCapturedRequestsAndClear(); + assertThat(capturedRequests, arrayWithSize(1)); + assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + } + } + + private ElasticsearchException randomRetryPrimaryException(ShardId shardId) { + return randomFrom( + new ShardNotFoundException(shardId), + new IndexNotFoundException(shardId.getIndex()), + new IndexShardClosedException(shardId), + new EngineClosedException(shardId), + new TransportReplicationAction.RetryOnPrimaryException(shardId, "hello") + ); + } + public void testRoutePhaseExecutesRequest() { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -449,7 +506,7 @@ public class TransportReplicationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); TransportReplicationAction.PrimaryPhase primaryPhase = actionWithRelocatingReplicasAfterPrimaryOp.new PrimaryPhase( - task, request, createTransportChannel(listener)); + task, request, createTransportChannel(listener)); primaryPhase.run(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0); @@ -485,7 +542,7 @@ public class TransportReplicationActionTests extends ESTestCase { PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); TransportReplicationAction.PrimaryPhase primaryPhase = actionWithDeletedIndexAfterPrimaryOp.new PrimaryPhase( - task, request, createTransportChannel(listener)); + task, request, createTransportChannel(listener)); primaryPhase.run(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat("replication phase should be skipped if index gets deleted after primary operation", transport.capturedRequestsByTargetNode().size(), equalTo(0)); @@ -529,8 +586,8 @@ public class TransportReplicationActionTests extends ESTestCase { setState(clusterService, state(index, true, ShardRoutingState.STARTED, replicaStates)); logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}", - request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry", - clusterService.state().prettyPrint()); + request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry", + clusterService.state().prettyPrint()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); PlainActionFuture listener = new PlainActionFuture<>(); @@ -646,7 +703,7 @@ public class TransportReplicationActionTests extends ESTestCase { TransportChannel channel = createTransportChannel(listener, error::set); TransportReplicationAction.ReplicationPhase replicationPhase = - action.new ReplicationPhase(task, request, new Response(), request.shardId(), channel, reference); + action.new ReplicationPhase(task, request, new Response(), request.shardId(), channel, reference); assertThat(replicationPhase.totalShards(), equalTo(totalShards)); assertThat(replicationPhase.pending(), equalTo(assignedReplicas)); @@ -656,7 +713,7 @@ public class TransportReplicationActionTests extends ESTestCase { HashMap nodesSentTo = new HashMap<>(); boolean executeOnReplica = - action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings()); + action.shouldExecuteReplication(clusterService.state().getMetaData().index(shardId.getIndex()).getSettings()); for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) { // no duplicate requests Request replicationRequest = (Request) capturedRequest.request; @@ -819,7 +876,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); // one replica to make sure replication is attempted setState(clusterService, state(index, true, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); indexShardRouting.set(primaryShard); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); @@ -856,7 +913,7 @@ public class TransportReplicationActionTests extends ESTestCase { public void testReplicasCounter() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); setState(clusterService, state(shardId.getIndexName(), true, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); final ReplicationTask task = maybeTask(); @@ -895,7 +952,7 @@ public class TransportReplicationActionTests extends ESTestCase { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); setState(clusterService, state(index, true, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); Request request = new Request(shardId).timeout("100ms"); PlainActionFuture listener = new PlainActionFuture<>(); @@ -915,7 +972,7 @@ public class TransportReplicationActionTests extends ESTestCase { final ShardId shardId = new ShardId(index, "_na_", 0); boolean localPrimary = true; setState(clusterService, state(index, localPrimary, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); Action action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { @Override protected void resolveRequest(MetaData metaData, String concreteIndex, Request request) { @@ -967,7 +1024,7 @@ public class TransportReplicationActionTests extends ESTestCase { // publish a new cluster state boolean localPrimaryOnRetry = randomBoolean(); setState(clusterService, state(index, localPrimaryOnRetry, - ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); CapturingTransport.CapturedRequest[] primaryRetry = transport.getCapturedRequestsAndClear(); // the request should be retried @@ -1083,8 +1140,8 @@ public class TransportReplicationActionTests extends ESTestCase { ClusterService clusterService, ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, null, threadPool, - new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), - new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME); + new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), + new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME); } @Override From c356b30cff3d80b912c1ac519e02f41ce921875c Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 29 Mar 2016 09:29:21 -0400 Subject: [PATCH 11/12] Update task management docs to reflect the latest changes in the interface Brings docs in line with new list task syntax and adds task cancellation API docs. --- docs/reference/cluster.asciidoc | 2 +- docs/reference/cluster/nodes-task.asciidoc | 49 --------- docs/reference/cluster/pending.asciidoc | 5 + docs/reference/cluster/tasks.asciidoc | 103 +++++++++++++++++++ docs/reference/docs/reindex.asciidoc | 40 +++---- docs/reference/docs/update-by-query.asciidoc | 42 ++++---- 6 files changed, 152 insertions(+), 89 deletions(-) delete mode 100644 docs/reference/cluster/nodes-task.asciidoc create mode 100644 docs/reference/cluster/tasks.asciidoc diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index a42371b1054..7955239fc5d 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -45,7 +45,7 @@ include::cluster/nodes-stats.asciidoc[] include::cluster/nodes-info.asciidoc[] -include::cluster/nodes-task.asciidoc[] +include::cluster/tasks.asciidoc[] include::cluster/nodes-hot-threads.asciidoc[] diff --git a/docs/reference/cluster/nodes-task.asciidoc b/docs/reference/cluster/nodes-task.asciidoc deleted file mode 100644 index 85a8e758699..00000000000 --- a/docs/reference/cluster/nodes-task.asciidoc +++ /dev/null @@ -1,49 +0,0 @@ -[[nodes-task]] -== Nodes Task API - -The nodes task management API retrieves information about the tasks currently -executing on one or more nodes in the cluster. - -[source,js] --------------------------------------------------- -GET /_tasks <1> -GET /_tasks/nodeId1,nodeId2 <2> -GET /_tasks/nodeId1,nodeId2/cluster:* <3> --------------------------------------------------- -// AUTOSENSE - -<1> Retrieves all tasks currently running on all nodes in the cluster. -<2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <> for more info about how to select individual nodes. -<3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`. - -The result will look similar to the following: - -[source,js] --------------------------------------------------- -{ - "nodes": { - "fDlEl7PrQi6F-awHZ3aaDw": { - "name": "Gazer", - "transport_address": "127.0.0.1:9300", - "host": "127.0.0.1", - "ip": "127.0.0.1:9300", - "tasks": [ - { - "node": "fDlEl7PrQi6F-awHZ3aaDw", - "id": 105, - "type": "transport", - "action": "cluster:monitor/nodes/tasks" - }, - { - "node": "fDlEl7PrQi6F-awHZ3aaDw", - "id": 106, - "type": "direct", - "action": "cluster:monitor/nodes/tasks[n]", - "parent_node": "fDlEl7PrQi6F-awHZ3aaDw", - "parent_id": 105 - } - ] - } - } -} --------------------------------------------------- diff --git a/docs/reference/cluster/pending.asciidoc b/docs/reference/cluster/pending.asciidoc index 4997a035ba2..84315073129 100644 --- a/docs/reference/cluster/pending.asciidoc +++ b/docs/reference/cluster/pending.asciidoc @@ -5,6 +5,11 @@ The pending cluster tasks API returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. +NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the +<> which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create +index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task +might be reported by both task api and pending cluster tasks API. + [source,js] -------------------------------------------------- $ curl -XGET 'http://localhost:9200/_cluster/pending_tasks' diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc new file mode 100644 index 00000000000..bb03231fc83 --- /dev/null +++ b/docs/reference/cluster/tasks.asciidoc @@ -0,0 +1,103 @@ +[[tasks]] +== Task Management API + +experimental[The Task Management API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + +[float] +=== Current Tasks Information + +The task management API allows to retrieve information about the tasks currently +executing on one or more nodes in the cluster. + +[source,js] +-------------------------------------------------- +GET /_tasks <1> +GET /_tasks?nodes=nodeId1,nodeId2 <2> +GET /_tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3> +-------------------------------------------------- +// AUTOSENSE + +<1> Retrieves all tasks currently running on all nodes in the cluster. +<2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <> for more info about how to select individual nodes. +<3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`. + +The result will look similar to the following: + +[source,js] +-------------------------------------------------- +{ + "nodes" : { + "oTUltX4IQMOUUVeiohTt8A" : { + "name" : "Tamara Rahn", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "tasks" : { + "oTUltX4IQMOUUVeiohTt8A:124" : { + "node" : "oTUltX4IQMOUUVeiohTt8A", + "id" : 124, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1458585884904, + "running_time_in_nanos" : 47402, + "parent_task_id" : "oTUltX4IQMOUUVeiohTt8A:123" + }, + "oTUltX4IQMOUUVeiohTt8A:123" : { + "node" : "oTUltX4IQMOUUVeiohTt8A", + "id" : 123, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1458585884904, + "running_time_in_nanos" : 236042 + } + } + } + } +} + +-------------------------------------------------- + +It is also possible to retrieve information for a particular task, or all children of a particular +tasks using the following two commands: + +[source,js] +-------------------------------------------------- +GET /_tasks/taskId1 +GET /_tasks?parent_task_id=parentTaskId1 +-------------------------------------------------- +// AUTOSENSE + +The task API can be also used to wait for completion of a particular task. The following call will +block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is completed. + +[source,js] +-------------------------------------------------- +GET /_tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s +-------------------------------------------------- +// AUTOSENSE + + +[float] +=== Task Cancellation + +If a long-running task supports cancellation, it can be cancelled by the following command: + +[source,js] +-------------------------------------------------- +POST /_tasks/taskId1/_cancel +-------------------------------------------------- +// AUTOSENSE + +The task cancellation command supports the same task selection parameters as the list tasks command, so multiple tasks +can be cancelled at the same time. For example, the following command will cancel all reindex tasks running on the +nodes `nodeId1` and `nodeId2`. + +[source,js] +-------------------------------------------------- +POST /_tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex +-------------------------------------------------- +// AUTOSENSE + + + + diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index a825a8eca44..5d57d4a9073 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -390,7 +390,7 @@ from aborting the operation. === Works with the Task API While Reindex is running you can fetch their status using the -<>: +<>: [source,js] -------------------------------------------------- @@ -413,24 +413,26 @@ The responses looks like: "testattr" : "test", "portsfile" : "true" }, - "tasks" : [ { - "node" : "r1A2WoRbTwKZ516z6NEs5A", - "id" : 36619, - "type" : "transport", - "action" : "indices:data/write/reindex", - "status" : { <1> - "total" : 6154, - "updated" : 3500, - "created" : 0, - "deleted" : 0, - "batches" : 36, - "version_conflicts" : 0, - "noops" : 0, - "retries": 0, - "throttled_millis": 0 - }, - "description" : "" - } ] + "tasks" : { + "r1A2WoRbTwKZ516z6NEs5A:36619" : { + "node" : "r1A2WoRbTwKZ516z6NEs5A", + "id" : 36619, + "type" : "transport", + "action" : "indices:data/write/reindex", + "status" : { <1> + "total" : 6154, + "updated" : 3500, + "created" : 0, + "deleted" : 0, + "batches" : 36, + "version_conflicts" : 0, + "noops" : 0, + "retries": 0, + "throttled_millis": 0 + }, + "description" : "" + } + } } } } diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index f7ddf1d087c..84bd61f3e9b 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -235,11 +235,11 @@ from aborting the operation. === Works with the Task API While Update By Query is running you can fetch their status using the -<>: +<>: [source,js] -------------------------------------------------- -POST /_tasks/?pretty&detailed=true&action=byquery +POST /_tasks/?pretty&detailed=true&action=*byquery -------------------------------------------------- // AUTOSENSE @@ -258,24 +258,26 @@ The responses looks like: "testattr" : "test", "portsfile" : "true" }, - "tasks" : [ { - "node" : "r1A2WoRbTwKZ516z6NEs5A", - "id" : 36619, - "type" : "transport", - "action" : "indices:data/write/update/byquery", - "status" : { <1> - "total" : 6154, - "updated" : 3500, - "created" : 0, - "deleted" : 0, - "batches" : 36, - "version_conflicts" : 0, - "noops" : 0, - "retries": 0, - "throttled_millis": 0 - }, - "description" : "" - } ] + "tasks" : { + "r1A2WoRbTwKZ516z6NEs5A:36619" : { + "node" : "r1A2WoRbTwKZ516z6NEs5A", + "id" : 36619, + "type" : "transport", + "action" : "indices:data/write/update/byquery", + "status" : { <1> + "total" : 6154, + "updated" : 3500, + "created" : 0, + "deleted" : 0, + "batches" : 36, + "version_conflicts" : 0, + "noops" : 0, + "retries": 0, + "throttled_millis": 0 + }, + "description" : "" + } + } } } } From 361adcf3870457e87d3fc94e9c799ffea2382ec8 Mon Sep 17 00:00:00 2001 From: Yanjun Huang Date: Sat, 26 Mar 2016 23:37:42 -0700 Subject: [PATCH 12/12] Add limit to total number of fields in mapping. #17357 This is to prevent mapping explosion when dynamic keys such as UUID are used as field names. index.mapping.total_fields.limit specifies the total number of fields an index can have. An exception will be thrown when the limit is reached. The default limit is 1000. Value 0 means no limit. This setting is runtime adjustable Closes #11443 --- .../common/settings/IndexScopedSettings.java | 1 + .../index/mapper/MapperService.java | 12 ++++++++- .../cluster/SimpleClusterStateIT.java | 5 +++- .../index/mapper/MapperServiceTests.java | 26 +++++++++++++++++++ .../mapping/dynamic/field-mapping.asciidoc | 6 +++++ 5 files changed, 48 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index fb498283d7b..37a071a7cf3 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -128,6 +128,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, + MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 73b94e60b46..f46586ccb1a 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -84,6 +84,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public static final String DEFAULT_MAPPING = "_default_"; public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING = + Setting.longSetting("index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope); @@ -289,6 +291,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { // deserializing cluster state that was sent by the master node, // this check will be skipped. checkNestedFieldsLimit(fullPathObjectMappers); + checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size()); } Set parentTypes = this.parentTypes; @@ -403,11 +406,18 @@ public class MapperService extends AbstractIndexComponent implements Closeable { actualNestedFields++; } } - if (allowedNestedFields >= 0 && actualNestedFields > allowedNestedFields) { + if (actualNestedFields > allowedNestedFields) { throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName() + "] has been exceeded"); } } + private void checkTotalFieldsLimit(long totalMappers) { + long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING); + if (allowedTotalFields < totalMappers) { + throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName() + "] has been exceeded"); + } + } + public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { String defaultMappingSource; if (PercolatorFieldMapper.TYPE_NAME.equals(mappingType)) { diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 6916cfc4e31..da5f3149d6c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.CollectionAssertions; import org.junit.Before; @@ -145,7 +146,9 @@ public class SimpleClusterStateIT extends ESIntegTestCase { int numberOfShards = scaledRandomIntBetween(1, cluster().numDataNodes()); // if the create index is ack'ed, then all nodes have successfully processed the cluster state assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards, + IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0, + MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) .addMapping("type", mapping) .setTimeout("60s").get()); ensureGreen(); // wait for green state, so its both green, and there are no more pending events diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 58244ce7ad0..bf7f8e0b5cc 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -30,15 +30,21 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Rule; import org.junit.rules.ExpectedException; +import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.function.Function; import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; @@ -135,4 +141,24 @@ public class MapperServiceTests extends ESSingleNodeTestCase { assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING)); } + public void testTotalFieldsExceedsLimit() throws Throwable { + Function mapping = type -> { + try { + return XContentFactory.jsonBuilder().startObject().startObject(type).startObject("properties") + .startObject("field1").field("type", "string") + .endObject().endObject().endObject().endObject().string(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + createIndex("test1").mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + //set total number of fields to 1 to trigger an exception + try { + createIndex("test2", Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1).build()) + .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_UPDATE, false); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Limit of total fields [1] in index [test2] has been exceeded")); + } + } } diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index f8612958f9c..238052a9739 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -30,6 +30,12 @@ detected. All other datatypes must be mapped explicitly. Besides the options listed below, dynamic field mapping rules can be further customised with <>. +[[total-fields-limit]] +==== Total fields limit + +To avoid mapping explosion, Index has a default limit of 1000 total number of fields. +The default setting can be updated with `index.mapping.total_fields.limit`. + [[date-detection]] ==== Date detection