From e2949d7df1c785ea62f7c68ca565a9f5b9047fa9 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 11 Feb 2015 12:31:02 +0000 Subject: [PATCH 001/236] make InternalAggregation.reduce(ReduceContext) use template pattern sub-classes of InternalAggregation now implement doReduce(ReduceContext) that is called from InternalAggregation.reduce(ReduceContext) which is now final --- .../search/aggregations/InternalAggregation.java | 7 +++++-- .../search/aggregations/InternalAggregations.java | 2 +- .../bucket/InternalSingleBucketAggregation.java | 2 +- .../aggregations/bucket/filters/InternalFilters.java | 2 +- .../aggregations/bucket/geogrid/InternalGeoHashGrid.java | 2 +- .../aggregations/bucket/histogram/InternalHistogram.java | 2 +- .../search/aggregations/bucket/range/InternalRange.java | 2 +- .../bucket/significant/InternalSignificantTerms.java | 2 +- .../bucket/significant/UnmappedSignificantTerms.java | 4 ++-- .../search/aggregations/bucket/terms/InternalTerms.java | 2 +- .../search/aggregations/bucket/terms/UnmappedTerms.java | 4 ++-- .../search/aggregations/metrics/avg/InternalAvg.java | 2 +- .../metrics/cardinality/InternalCardinality.java | 2 +- .../aggregations/metrics/geobounds/InternalGeoBounds.java | 2 +- .../search/aggregations/metrics/max/InternalMax.java | 2 +- .../search/aggregations/metrics/min/InternalMin.java | 2 +- .../metrics/percentiles/AbstractInternalPercentiles.java | 2 +- .../metrics/scripted/InternalScriptedMetric.java | 2 +- .../search/aggregations/metrics/stats/InternalStats.java | 2 +- .../metrics/stats/extended/InternalExtendedStats.java | 4 ++-- .../search/aggregations/metrics/sum/InternalSum.java | 2 +- .../aggregations/metrics/tophits/InternalTopHits.java | 2 +- .../metrics/valuecount/InternalValueCount.java | 2 +- 23 files changed, 30 insertions(+), 27 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 6dcee411e92..456b1b391b6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -146,7 +145,11 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St * try reusing an existing get instance (typically the first in the given list) to save on redundant object * construction. */ - public abstract InternalAggregation reduce(ReduceContext reduceContext); + public final InternalAggregation reduce(ReduceContext reduceContext) { + return doReduce(reduceContext); + } + + public abstract InternalAggregation doReduce(ReduceContext reduceContext); public Object getProperty(String path) { AggregationPath aggPath = AggregationPath.parse(path); diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 6a33c0312af..ec4625e2387 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -165,7 +165,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl for (Map.Entry> entry : aggByName.entrySet()) { List aggregations = entry.getValue(); InternalAggregation first = aggregations.get(0); // the list can't be empty as it's created on demand - reducedAggregations.add(first.reduce(new InternalAggregation.ReduceContext(aggregations, context.bigArrays(), context.scriptService()))); + reducedAggregations.add(first.doReduce(new InternalAggregation.ReduceContext(aggregations, context.bigArrays(), context.scriptService()))); } return new InternalAggregations(reducedAggregations); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index 31d105d5ead..2bccb4234e3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -69,7 +69,7 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio protected abstract InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations); @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); long docCount = 0L; List subAggregationsList = new ArrayList<>(aggregations.size()); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java index 2642c99a2de..505547487a6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java @@ -191,7 +191,7 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); List> bucketsList = null; for (InternalAggregation aggregation : aggregations) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 0d09f05694d..c30935c5c3d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -188,7 +188,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen } @Override - public InternalGeoHashGrid reduce(ReduceContext reduceContext) { + public InternalGeoHashGrid doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); LongObjectPagedHashMap> buckets = null; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index c7909442016..544f06998ce 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -412,7 +412,7 @@ public class InternalHistogram extends Inter } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List reducedBuckets = reduceBuckets(reduceContext); // adding empty buckets if needed diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index d436e139287..0bb00d03122 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -258,7 +258,7 @@ public class InternalRange extends InternalMulti } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); @SuppressWarnings("unchecked") List[] rangeList = new List[ranges.size()]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 199daecf5da..53949937bbb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -156,7 +156,7 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); long globalSubsetSize = 0; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index c457c1331b8..bb812741913 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -68,10 +68,10 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { for (InternalAggregation aggregation : reduceContext.aggregations()) { if (!(aggregation instanceof UnmappedSignificantTerms)) { - return aggregation.reduce(reduceContext); + return aggregation.doReduce(reduceContext); } } return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index b8b45e20ce7..a6ca9d4400c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -160,7 +160,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); Multimap buckets = ArrayListMultimap.create(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index a515596868e..1fffb9508a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -81,10 +81,10 @@ public class UnmappedTerms extends InternalTerms { } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { for (InternalAggregation agg : reduceContext.aggregations()) { if (!(agg instanceof UnmappedTerms)) { - return agg.reduce(reduceContext); + return agg.doReduce(reduceContext); } } return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index dcdecbe3b67..8c795a55332 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -78,7 +78,7 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalAvg reduce(ReduceContext reduceContext) { + public InternalAvg doReduce(ReduceContext reduceContext) { long count = 0; double sum = 0; for (InternalAggregation aggregation : reduceContext.aggregations()) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java index 2fd964e5f1f..c8341135fb4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java @@ -99,7 +99,7 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); InternalCardinality reduced = null; for (InternalAggregation aggregation : aggregations) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index cdda6597c14..eb6a61c960d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -73,7 +73,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { double top = Double.NEGATIVE_INFINITY; double bottom = Double.POSITIVE_INFINITY; double posLeft = Double.POSITIVE_INFINITY; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java index 90486f3b620..7cae1444c63 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java @@ -76,7 +76,7 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalMax reduce(ReduceContext reduceContext) { + public InternalMax doReduce(ReduceContext reduceContext) { double max = Double.NEGATIVE_INFINITY; for (InternalAggregation aggregation : reduceContext.aggregations()) { max = Math.max(max, ((InternalMax) aggregation).max); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java index 554152e486c..0974314826c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java @@ -77,7 +77,7 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalMin reduce(ReduceContext reduceContext) { + public InternalMin doReduce(ReduceContext reduceContext) { double min = Double.POSITIVE_INFINITY; for (InternalAggregation aggregation : reduceContext.aggregations()) { min = Math.min(min, ((InternalMin) aggregation).min); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index 67f33934bf6..19d056e00cd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -60,7 +60,7 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega public abstract double value(double key); @Override - public AbstractInternalPercentiles reduce(ReduceContext reduceContext) { + public AbstractInternalPercentiles doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); TDigestState merged = null; for (InternalAggregation aggregation : aggregations) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index f204f8d5478..c7176e0e1e1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -81,7 +81,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregationObjects = new ArrayList<>(); for (InternalAggregation aggregation : reduceContext.aggregations()) { InternalScriptedMetric mapReduceAggregation = (InternalScriptedMetric) aggregation; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 86bda11cd8e..7186fee979c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -148,7 +148,7 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue } @Override - public InternalStats reduce(ReduceContext reduceContext) { + public InternalStats doReduce(ReduceContext reduceContext) { long count = 0; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 9a700690530..9f88bf4f429 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -143,13 +143,13 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat } @Override - public InternalExtendedStats reduce(ReduceContext reduceContext) { + public InternalExtendedStats doReduce(ReduceContext reduceContext) { double sumOfSqrs = 0; for (InternalAggregation aggregation : reduceContext.aggregations()) { InternalExtendedStats stats = (InternalExtendedStats) aggregation; sumOfSqrs += stats.getSumOfSquares(); } - final InternalStats stats = super.reduce(reduceContext); + final InternalStats stats = super.doReduce(reduceContext); return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, valueFormatter, getMetaData()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index b16663db26a..f653c082c79 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -76,7 +76,7 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalSum reduce(ReduceContext reduceContext) { + public InternalSum doReduce(ReduceContext reduceContext) { double sum = 0; for (InternalAggregation aggregation : reduceContext.aggregations()) { sum += ((InternalSum) aggregation).sum; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index e4fad4ef692..8c5eafa2961 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -91,7 +91,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); TopDocs[] shardDocs = new TopDocs[aggregations.size()]; InternalSearchHits[] shardHits = new InternalSearchHits[aggregations.size()]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java index 062e88fce5f..b8b675c2eee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java @@ -76,7 +76,7 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single } @Override - public InternalAggregation reduce(ReduceContext reduceContext) { + public InternalAggregation doReduce(ReduceContext reduceContext) { long valueCount = 0; for (InternalAggregation aggregation : reduceContext.aggregations()) { valueCount += ((InternalValueCount) aggregation).value; From c60bb4d73bd2ff67247291a58a44eaa50269cd51 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 11 Feb 2015 16:19:48 +0000 Subject: [PATCH 002/236] Adds reducers list to InternalAggregation.reduce() The list of reducers is fed through from the AggregatorFactory --- .../search/aggregations/Aggregator.java | 2 +- .../aggregations/AggregatorFactory.java | 29 +++-- .../aggregations/InternalAggregation.java | 16 ++- .../InternalMultiBucketAggregation.java | 5 +- .../aggregations/NonCollectingAggregator.java | 12 +- .../bucket/BucketsAggregator.java | 14 +- .../InternalSingleBucketAggregation.java | 5 +- .../bucket/SingleBucketAggregator.java | 7 +- .../bucket/children/InternalChildren.java | 9 +- .../children/ParentToChildrenAggregator.java | 25 ++-- .../bucket/filter/FilterAggregator.java | 23 ++-- .../bucket/filter/InternalFilter.java | 8 +- .../bucket/filters/FiltersAggregator.java | 26 ++-- .../bucket/filters/InternalFilters.java | 7 +- .../bucket/geogrid/GeoHashGridAggregator.java | 12 +- .../bucket/geogrid/GeoHashGridParser.java | 17 ++- .../bucket/geogrid/InternalGeoHashGrid.java | 8 +- .../bucket/global/GlobalAggregator.java | 17 ++- .../bucket/global/InternalGlobal.java | 8 +- .../bucket/histogram/HistogramAggregator.java | 23 ++-- .../histogram/InternalDateHistogram.java | 7 +- .../bucket/histogram/InternalHistogram.java | 13 +- .../bucket/missing/InternalMissing.java | 8 +- .../bucket/missing/MissingAggregator.java | 22 ++-- .../bucket/nested/InternalNested.java | 9 +- .../bucket/nested/InternalReverseNested.java | 9 +- .../bucket/nested/NestedAggregator.java | 105 ++++++++------- .../nested/ReverseNestedAggregator.java | 63 +++++---- .../bucket/range/InternalRange.java | 13 +- .../bucket/range/RangeAggregator.java | 123 +++++++++--------- .../bucket/range/date/InternalDateRange.java | 11 +- .../range/geodistance/GeoDistanceParser.java | 13 +- .../geodistance/InternalGeoDistance.java | 11 +- .../bucket/range/ipv4/InternalIPv4Range.java | 11 +- ...balOrdinalsSignificantTermsAggregator.java | 43 +++--- .../significant/InternalSignificantTerms.java | 6 +- .../significant/SignificantLongTerms.java | 10 +- .../SignificantLongTermsAggregator.java | 17 ++- .../significant/SignificantStringTerms.java | 10 +- .../SignificantStringTermsAggregator.java | 16 ++- .../SignificantTermsAggregatorFactory.java | 40 ++++-- .../significant/UnmappedSignificantTerms.java | 6 +- .../terms/AbstractStringTermsAggregator.java | 15 ++- .../bucket/terms/DoubleTerms.java | 14 +- .../bucket/terms/DoubleTermsAggregator.java | 13 +- .../GlobalOrdinalsStringTermsAggregator.java | 20 +-- .../bucket/terms/InternalTerms.java | 11 +- .../aggregations/bucket/terms/LongTerms.java | 14 +- .../bucket/terms/LongTermsAggregator.java | 64 +++++---- .../bucket/terms/StringTerms.java | 14 +- .../bucket/terms/StringTermsAggregator.java | 13 +- .../bucket/terms/TermsAggregator.java | 6 +- .../bucket/terms/TermsAggregatorFactory.java | 45 ++++--- .../bucket/terms/UnmappedTerms.java | 9 +- .../metrics/InternalMetricsAggregation.java | 6 +- .../InternalNumericMetricsAggregation.java | 13 +- .../metrics/MetricsAggregator.java | 7 +- .../metrics/NumericMetricsAggregator.java | 17 ++- .../metrics/avg/AvgAggregator.java | 37 +++--- .../aggregations/metrics/avg/InternalAvg.java | 9 +- .../cardinality/CardinalityAggregator.java | 10 +- .../CardinalityAggregatorFactory.java | 13 +- .../cardinality/InternalCardinality.java | 8 +- .../geobounds/GeoBoundsAggregator.java | 22 ++-- .../metrics/geobounds/InternalGeoBounds.java | 8 +- .../aggregations/metrics/max/InternalMax.java | 8 +- .../metrics/max/MaxAggregator.java | 35 ++--- .../aggregations/metrics/min/InternalMin.java | 8 +- .../metrics/min/MinAggregator.java | 35 ++--- .../AbstractInternalPercentiles.java | 9 +- .../AbstractPercentilesAggregator.java | 7 +- .../percentiles/InternalPercentileRanks.java | 10 +- .../percentiles/InternalPercentiles.java | 10 +- .../PercentileRanksAggregator.java | 24 ++-- .../percentiles/PercentilesAggregator.java | 21 ++- .../scripted/InternalScriptedMetric.java | 11 +- .../scripted/ScriptedMetricAggregator.java | 16 ++- .../metrics/stats/InternalStats.java | 7 +- .../metrics/stats/StatsAggegator.java | 59 +++++---- .../extended/ExtendedStatsAggregator.java | 30 +++-- .../stats/extended/InternalExtendedStats.java | 10 +- .../aggregations/metrics/sum/InternalSum.java | 8 +- .../metrics/sum/SumAggregator.java | 37 +++--- .../metrics/tophits/TopHitsAggregator.java | 17 ++- .../valuecount/InternalValueCount.java | 9 +- .../valuecount/ValueCountAggregator.java | 27 ++-- .../search/aggregations/reducers/Reducer.java | 63 +++++++++ .../aggregations/reducers/ReducerFactory.java | 88 +++++++++++++ .../ValuesSourceAggregatorFactory.java | 21 ++- .../SignificanceHeuristicTests.java | 24 +++- 90 files changed, 1181 insertions(+), 640 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index fd9519499a8..bce1f9bc196 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -105,7 +105,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { * Build an empty aggregation. */ public abstract InternalAggregation buildEmptyAggregation(); - + /** Aggregation mode for sub aggregations. */ public enum SubAggCollectionMode { diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 256700bada5..f49a328fd16 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -23,10 +23,13 @@ import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; import java.io.IOException; +import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -38,6 +41,7 @@ public abstract class AggregatorFactory { protected String type; protected AggregatorFactory parent; protected AggregatorFactories factories = AggregatorFactories.EMPTY; + protected List reducers = Collections.emptyList(); protected Map metaData; /** @@ -79,7 +83,8 @@ public abstract class AggregatorFactory { return parent; } - protected abstract Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException; + protected abstract Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException; /** * Creates the aggregator @@ -92,7 +97,7 @@ public abstract class AggregatorFactory { * @return The created aggregator */ public final Aggregator create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - return createInternal(context, parent, collectsFromSingleBucket, this.metaData); + return createInternal(context, parent, collectsFromSingleBucket, this.reducers, this.metaData); } public void doValidate() { @@ -102,16 +107,18 @@ public abstract class AggregatorFactory { this.metaData = metaData; } + + public void setReducers(List reducers) { + this.reducers = reducers; + } + + /** * Utility method. Given an {@link AggregatorFactory} that creates {@link Aggregator}s that only know how * to collect bucket 0, this returns an aggregator that can collect any bucket. */ protected static Aggregator asMultiBucketAggregator(final AggregatorFactory factory, final AggregationContext context, final Aggregator parent) throws IOException { - final Aggregator first = factory.create(context, parent, true); - final BigArrays bigArrays = context.bigArrays(); - return new Aggregator() { - - ObjectArray aggregators; + final Aggregator first = factory.create(context, parent, truegator> aggregators; ObjectArray collectors; { @@ -187,9 +194,9 @@ public abstract class AggregatorFactory { LeafBucketCollector collector = collectors.get(bucket); if (collector == null) { Aggregator aggregator = aggregators.get(bucket); - if (aggregator == null) { - aggregator = factory.create(context, parent, true); - aggregator.preCollection(); + if (aggregator == null) { + aggregator = factory.create(context, parent, true); + aggregator.preCollection(); aggregators.set(bucket, aggregator); } collector = aggregator.getLeafCollector(ctx); @@ -197,7 +204,7 @@ public abstract class AggregatorFactory { collectors.set(bucket, collector); } collector.collect(doc, 0); - } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 456b1b391b6..828a1a7ee0f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; @@ -116,6 +117,8 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St protected Map metaData; + private List reducers; + /** Constructs an un initialized addAggregation (used for serialization) **/ protected InternalAggregation() {} @@ -124,8 +127,9 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St * * @param name The name of the get. */ - protected InternalAggregation(String name, Map metaData) { + protected InternalAggregation(String name, List reducers, Map metaData) { this.name = name; + this.reducers = reducers; this.metaData = metaData; } @@ -146,7 +150,11 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St * construction. */ public final InternalAggregation reduce(ReduceContext reduceContext) { - return doReduce(reduceContext); + InternalAggregation aggResult = doReduce(reduceContext); + for (Reducer reducer : reducers) { + aggResult = reducer.reduce(aggResult, reduceContext); + } + return aggResult; } public abstract InternalAggregation doReduce(ReduceContext reduceContext); @@ -180,6 +188,10 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St return metaData; } + public List reducers() { + return reducers; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index bd6d8d2728c..ebd2637ac56 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.List; import java.util.Map; @@ -30,8 +31,8 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation public InternalMultiBucketAggregation() { } - public InternalMultiBucketAggregation(String name, Map metaData) { - super(name, metaData); + public InternalMultiBucketAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java index 33c4215e27a..9b64c647b38 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -31,12 +33,14 @@ import java.util.Map; */ public abstract class NonCollectingAggregator extends AggregatorBase { - protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, AggregatorFactories subFactories, Map metaData) throws IOException { - super(name, subFactories, context, parent, metaData); + protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, AggregatorFactories subFactories, + List reducers, Map metaData) throws IOException { + super(name, subFactories, context, parent, reducers, metaData); } - protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - this(name, context, parent, AggregatorFactories.EMPTY, metaData); + protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + this(name, context, parent, AggregatorFactories.EMPTY, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index e4d0260cf93..b7c8fe7ccfc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -31,6 +32,7 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -42,9 +44,9 @@ public abstract class BucketsAggregator extends AggregatorBase { private IntArray docCounts; public BucketsAggregator(String name, AggregatorFactories factories, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, factories, context, parent, metaData); - bigArrays = context.bigArrays(); + AggregationContext context, Aggregator parent, + List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, reducers, metaData); docCounts = bigArrays.newIntArray(1, true); } @@ -110,11 +112,11 @@ public abstract class BucketsAggregator extends AggregatorBase { */ protected final InternalAggregations bucketAggregations(long bucket) throws IOException { final InternalAggregation[] aggregations = new InternalAggregation[subAggregators.length]; - for (int i = 0; i < subAggregators.length; i++) { + for (int i = 0; i < subAggregators.length; i++) { aggregations[i] = subAggregators[i].buildAggregation(bucket); - } + } return new InternalAggregations(Arrays.asList(aggregations)); - } + } /** * Utility method to build empty aggregations of the sub aggregators. diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index 2bccb4234e3..95157da9e77 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -47,8 +48,8 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio * @param docCount The document count in the single bucket. * @param aggregations The already built sub-aggregations that are associated with the bucket. */ - protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, metaData); + protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, reducers, metaData); this.docCount = docCount; this.aggregations = aggregations; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java index d8b884a88e4..202f02c4a22 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -31,8 +33,9 @@ import java.util.Map; public abstract class SingleBucketAggregator extends BucketsAggregator { protected SingleBucketAggregator(String name, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, + List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java index 427637b9da7..cfac7f834bc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java @@ -23,8 +23,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalChildren extends InternalSingleBucketAggregation implements public InternalChildren() { } - public InternalChildren(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalChildren(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalChildren extends InternalSingleBucketAggregation implements @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalChildren(name, docCount, subAggregations, getMetaData()); + return new InternalChildren(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index b4769f05baf..eefbd853444 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -36,6 +36,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -70,8 +71,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Filter childFilter, Filter parentFilter, - ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, long maxOrd, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, + long maxOrd, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.parentType = parentType; // these two filters are cached in the parser this.childFilter = childFilter; @@ -84,12 +86,13 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalChildren(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalChildren(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalChildren(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalChildren(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } @Override @@ -199,21 +202,25 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { @Override public InternalAggregation buildEmptyAggregation() { - return new InternalChildren(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalChildren(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } }; } @Override - protected Aggregator doCreateInternal(ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, + AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, List reducers, + Map metaData) throws IOException { long maxOrd = valuesSource.globalMaxOrd(aggregationContext.searchContext().searcher(), parentType); - return new ParentToChildrenAggregator(name, factories, aggregationContext, parent, parentType, childFilter, parentFilter, valuesSource, maxOrd, metaData); + return new ParentToChildrenAggregator(name, factories, aggregationContext, parent, parentType, childFilter, parentFilter, + valuesSource, maxOrd, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index d5b15dba1ca..da728f1ee04 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -29,9 +30,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -45,9 +48,9 @@ public class FilterAggregator extends SingleBucketAggregator { org.apache.lucene.search.Filter filter, AggregatorFactories factories, AggregationContext aggregationContext, - Aggregator parent, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + super(name, factories, aggregationContext, parent, reducers, metaData); this.filter = filter; } @@ -58,23 +61,24 @@ public class FilterAggregator extends SingleBucketAggregator { // no need to provide deleted docs to the filter final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.getDocIdSet(ctx, null)); return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - if (bits.get(doc)) { + if (bits.get(doc)) { collectBucket(sub, doc, bucket); } - } + } }; } @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalFilter(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalFilter(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalFilter(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalFilter(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } public static class Factory extends AggregatorFactory { @@ -87,8 +91,9 @@ public class FilterAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new FilterAggregator(name, filter, factories, context, parent, metaData); + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new FilterAggregator(name, filter, factories, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java index c3d84b9fe51..0429ea20a59 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -48,8 +50,8 @@ public class InternalFilter extends InternalSingleBucketAggregation implements F InternalFilter() {} // for serialization - InternalFilter(String name, long docCount, InternalAggregations subAggregations, Map metaData) { - super(name, docCount, subAggregations, metaData); + InternalFilter(String name, long docCount, InternalAggregations subAggregations, List reducers, Map metaData) { + super(name, docCount, subAggregations, reducers, metaData); } @Override @@ -59,6 +61,6 @@ public class InternalFilter extends InternalSingleBucketAggregation implements F @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalFilter(name, docCount, subAggregations, getMetaData()); + return new InternalFilter(name, docCount, subAggregations, reducers(), getMetaData()); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index b97a5442ced..931ead734fb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,6 +34,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -59,8 +61,9 @@ public class FiltersAggregator extends BucketsAggregator { private final boolean keyed; public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.keyed = keyed; this.filters = filters.toArray(new KeyedFilter[filters.size()]); } @@ -73,16 +76,16 @@ public class FiltersAggregator extends BucketsAggregator { final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].filter.getDocIdSet(ctx, null)); - } + } return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - for (int i = 0; i < bits.length; i++) { - if (bits[i].get(doc)) { + for (int i = 0; i < bits.length; i++) { + if (bits[i].get(doc)) { collectBucket(sub, doc, bucketOrd(bucket, i)); } - } } + } }; } @@ -95,7 +98,7 @@ public class FiltersAggregator extends BucketsAggregator { InternalFilters.Bucket bucket = new InternalFilters.Bucket(filter.key, bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); buckets.add(bucket); } - return new InternalFilters(name, buckets, keyed, metaData()); + return new InternalFilters(name, buckets, keyed, reducers(), metaData()); } @Override @@ -106,7 +109,7 @@ public class FiltersAggregator extends BucketsAggregator { InternalFilters.Bucket bucket = new InternalFilters.Bucket(filters[i].key, 0, subAggs, keyed); buckets.add(bucket); } - return new InternalFilters(name, buckets, keyed, metaData()); + return new InternalFilters(name, buckets, keyed, reducers(), metaData()); } final long bucketOrd(long owningBucketOrdinal, int filterOrd) { @@ -125,8 +128,9 @@ public class FiltersAggregator extends BucketsAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new FiltersAggregator(name, factories, filters, keyed, context, parent, metaData); + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new FiltersAggregator(name, factories, filters, keyed, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java index 505547487a6..1e4c882ef5f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -163,8 +164,8 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F public InternalFilters() {} // for serialization - public InternalFilters(String name, List buckets, boolean keyed, Map metaData) { - super(name, metaData); + public InternalFilters(String name, List buckets, boolean keyed, List reducers, Map metaData) { + super(name, reducers, metaData); this.buckets = buckets; this.keyed = keyed; } @@ -211,7 +212,7 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F } } - InternalFilters reduced = new InternalFilters(name, new ArrayList(bucketsList.size()), keyed, getMetaData()); + InternalFilters reduced = new InternalFilters(name, new ArrayList(bucketsList.size()), keyed, reducers(), getMetaData()); for (List sameRangeList : bucketsList) { reduced.buckets.add((sameRangeList.get(0)).reduce(sameRangeList, reduceContext)); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 7e9f4682207..c2c646f5702 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -28,12 +28,14 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,10 @@ public class GeoHashGridAggregator extends BucketsAggregator { private final LongHash bucketOrds; public GeoHashGridAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, - int requiredSize, int shardSize, AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + int requiredSize, + int shardSize, AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.requiredSize = requiredSize; this.shardSize = shardSize; @@ -126,12 +130,12 @@ public class GeoHashGridAggregator extends BucketsAggregator { bucket.aggregations = bucketAggregations(bucket.bucketOrd); list[i] = bucket; } - return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list), metaData()); + return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list), reducers(), metaData()); } @Override public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metaData()); + return new InternalGeoHashGrid(name, requiredSize, Collections. emptyList(), reducers(), metaData()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java index e1ce0a38c13..24b6d490c9f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BucketUtils; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -43,6 +44,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -123,9 +125,11 @@ public class GeoHashGridParser implements Aggregator.Parser { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, + Collections. emptyList(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { public InternalAggregation buildEmptyAggregation() { return aggregation; } @@ -133,12 +137,15 @@ public class GeoHashGridParser implements Aggregator.Parser { } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) + throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } ValuesSource.Numeric cellIdSource = new CellIdSource(valuesSource, precision); - return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, aggregationContext, parent, metaData); + return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, aggregationContext, parent, reducers, + metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index c30935c5c3d..83428f8c209 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -32,6 +32,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -170,8 +171,9 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen InternalGeoHashGrid() { } // for serialization - public InternalGeoHashGrid(String name, int requiredSize, Collection buckets, Map metaData) { - super(name, metaData); + public InternalGeoHashGrid(String name, int requiredSize, Collection buckets, List reducers, + Map metaData) { + super(name, reducers, metaData); this.requiredSize = requiredSize; this.buckets = buckets; } @@ -218,7 +220,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } - return new InternalGeoHashGrid(getName(), requiredSize, Arrays.asList(list), getMetaData()); + return new InternalGeoHashGrid(getName(), requiredSize, Arrays.asList(list), reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 7862eade5d6..edecdd749dd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -28,9 +28,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -38,8 +40,9 @@ import java.util.Map; */ public class GlobalAggregator extends SingleBucketAggregator { - public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext, Map metaData) throws IOException { - super(name, subFactories, aggregationContext, null, metaData); + public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext, List reducers, + Map metaData) throws IOException { + super(name, subFactories, aggregationContext, null, reducers, metaData); } @Override @@ -50,14 +53,15 @@ public class GlobalAggregator extends SingleBucketAggregator { public void collect(int doc, long bucket) throws IOException { assert bucket == 0 : "global aggregator can only be a top level aggregator"; collectBucket(sub, doc, bucket); - } + } }; } @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0 : "global aggregator can only be a top level aggregator"; - return new InternalGlobal(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalGlobal(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override @@ -72,7 +76,8 @@ public class GlobalAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (parent != null) { throw new AggregationExecutionException("Aggregation [" + parent.name() + "] cannot have a global " + "sub-aggregation [" + name + "]. Global aggregations can only be defined as top level aggregations"); @@ -80,7 +85,7 @@ public class GlobalAggregator extends SingleBucketAggregator { if (collectsFromSingleBucket == false) { throw new ElasticsearchIllegalStateException(); } - return new GlobalAggregator(name, factories, context, metaData); + return new GlobalAggregator(name, factories, context, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java index 6e317f26952..157d2c5c7f9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,8 @@ public class InternalGlobal extends InternalSingleBucketAggregation implements G InternalGlobal() {} // for serialization - InternalGlobal(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + InternalGlobal(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +62,6 @@ public class InternalGlobal extends InternalSingleBucketAggregation implements G @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalGlobal(name, docCount, subAggregations, getMetaData()); + return new InternalGlobal(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index a39a488a615..0a6a8bce732 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -62,9 +63,10 @@ public class HistogramAggregator extends BucketsAggregator { boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, InternalHistogram.Factory histogramFactory, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + AggregationContext aggregationContext, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + super(name, factories, aggregationContext, parent, reducers, metaData); this.rounding = rounding; this.order = order; this.keyed = keyed; @@ -130,13 +132,14 @@ public class HistogramAggregator extends BucketsAggregator { // value source will be null for unmapped fields InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, metaData()); + return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, metaData()); + return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), + metaData()); } @Override @@ -167,12 +170,15 @@ public class HistogramAggregator extends BucketsAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, null, null, config.formatter(), histogramFactory, aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, null, null, config.formatter(), + histogramFactory, aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -185,7 +191,8 @@ public class HistogramAggregator extends BucketsAggregator { extendedBounds.processAndValidate(name, aggregationContext.searchContext(), config.parser()); roundedBounds = extendedBounds.round(rounding); } - return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, config.formatter(), histogramFactory, aggregationContext, parent, metaData); + return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, + config.formatter(), histogramFactory, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 63cab59ad6b..9f9ad81c953 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -75,8 +76,10 @@ public class InternalDateHistogram { @Override public InternalHistogram create(String name, List buckets, InternalOrder order, - long minDocCount, EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, metaData); + long minDocCount, + EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 544f06998ce..a33cdb49b3c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -233,8 +234,9 @@ public class InternalHistogram extends Inter } public InternalHistogram create(String name, List buckets, InternalOrder order, long minDocCount, - EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, metaData); + EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } public B createBucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { @@ -259,8 +261,8 @@ public class InternalHistogram extends Inter InternalHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, - @Nullable ValueFormatter formatter, boolean keyed, Factory factory, Map metaData) { - super(name, metaData); + @Nullable ValueFormatter formatter, boolean keyed, Factory factory, List reducers, Map metaData) { + super(name, reducers, metaData); this.buckets = buckets; this.order = order; assert (minDocCount == 0) == (emptyBucketInfo != null); @@ -432,7 +434,8 @@ public class InternalHistogram extends Inter CollectionUtil.introSort(reducedBuckets, order.comparator()); } - return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, formatter, keyed, getMetaData()); + return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), + getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java index d314e44e901..0245f117835 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -50,8 +52,8 @@ public class InternalMissing extends InternalSingleBucketAggregation implements InternalMissing() { } - InternalMissing(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + InternalMissing(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -61,6 +63,6 @@ public class InternalMissing extends InternalSingleBucketAggregation implements @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalMissing(name, docCount, subAggregations, getMetaData()); + return new InternalMissing(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java index 1b65bde9904..eb81c6a5ec1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java @@ -26,12 +26,14 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -42,8 +44,9 @@ public class MissingAggregator extends SingleBucketAggregator { private final ValuesSource valuesSource; public MissingAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; } @@ -69,12 +72,13 @@ public class MissingAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalMissing(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalMissing(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMissing(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalMissing(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory { @@ -84,13 +88,15 @@ public class MissingAggregator extends SingleBucketAggregator { } @Override - protected MissingAggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MissingAggregator(name, factories, null, aggregationContext, parent, metaData); + protected MissingAggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MissingAggregator(name, factories, null, aggregationContext, parent, reducers, metaData); } @Override - protected MissingAggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MissingAggregator(name, factories, valuesSource, aggregationContext, parent, metaData); + protected MissingAggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MissingAggregator(name, factories, valuesSource, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java index 8b434a3fd24..86ad26edab3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalNested extends InternalSingleBucketAggregation implements N public InternalNested() { } - public InternalNested(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalNested(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalNested extends InternalSingleBucketAggregation implements N @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalNested(name, docCount, subAggregations, getMetaData()); + return new InternalNested(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java index eec7345d317..6dfaad42b03 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalReverseNested extends InternalSingleBucketAggregation imple public InternalReverseNested() { } - public InternalReverseNested(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalReverseNested(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalReverseNested extends InternalSingleBucketAggregation imple @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalReverseNested(name, docCount, subAggregations, getMetaData()); + return new InternalReverseNested(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 3fa459525f2..459802f62a3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -39,9 +39,11 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,8 +57,8 @@ public class NestedAggregator extends SingleBucketAggregator { private DocIdSetIterator childDocs; private BitSet parentDocs; - public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, Map metaData, FilterCachingPolicy filterCachingPolicy) throws IOException { - super(name, factories, aggregationContext, parentAggregator, metaData); + public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData, FilterCachingPolicy filterCachingPolicy) throws IOException { + super(name, factories, aggregationContext, parentAggregator, reducers, metaData); childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter(), null, filterCachingPolicy); } @@ -64,68 +66,69 @@ public class NestedAggregator extends SingleBucketAggregator { public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // Reset parentFilter, so we resolve the parentDocs for each new segment being searched this.parentFilter = null; - // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here. + // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here. DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); - if (DocIdSets.isEmpty(childDocIdSet)) { - childDocs = null; - } else { - childDocs = childDocIdSet.iterator(); - } + if (DocIdSets.isEmpty(childDocIdSet)) { + childDocs = null; + } else { + childDocs = childDocIdSet.iterator(); + } return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int parentDoc, long bucket) throws IOException { - // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected + // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected - // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip: - if (parentDoc == 0 || childDocs == null) { - return; - } - if (parentFilter == null) { - // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs - // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed. - // So the trick is to set at the last moment just before needed and we can use its child filter as the - // parent filter. + // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip: + if (parentDoc == 0 || childDocs == null) { + return; + } + if (parentFilter == null) { + // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs + // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed. + // So the trick is to set at the last moment just before needed and we can use its child filter as the + // parent filter. - // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption - // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during - // aggs execution + // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption + // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during + // aggs execution Filter parentFilterNotCached = findClosestNestedPath(parent()); - if (parentFilterNotCached == null) { - parentFilterNotCached = NonNestedDocsFilter.INSTANCE; - } - parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); + if (parentFilterNotCached == null) { + parentFilterNotCached = NonNestedDocsFilter.INSTANCE; + } + parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); BitDocIdSet parentSet = parentFilter.getDocIdSet(ctx); - if (DocIdSets.isEmpty(parentSet)) { - // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations. - childDocs = null; - return; - } else { - parentDocs = parentSet.bits(); - } - } + if (DocIdSets.isEmpty(parentSet)) { + // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations. + childDocs = null; + return; + } else { + parentDocs = parentSet.bits(); + } + } - final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); - int childDocId = childDocs.docID(); - if (childDocId <= prevParentDoc) { - childDocId = childDocs.advance(prevParentDoc + 1); - } + final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + int childDocId = childDocs.docID(); + if (childDocId <= prevParentDoc) { + childDocId = childDocs.advance(prevParentDoc + 1); + } - for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) { + for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) { collectBucket(sub, childDocId, bucket); } - } + } }; } @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } private static Filter findClosestNestedPath(Aggregator parent) { @@ -151,33 +154,35 @@ public class NestedAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } MapperService.SmartNameObjectMapper mapper = context.searchContext().smartNameObjectMapper(path); if (mapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } ObjectMapper objectMapper = mapper.mapper(); if (objectMapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[nested] nested path [" + path + "] is not nested"); } - return new NestedAggregator(name, factories, objectMapper, context, parent, metaData, filterCachingPolicy); + return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData, filterCachingPolicy); } private final static class Unmapped extends NonCollectingAggregator { - public Unmapped(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public Unmapped(String name, AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 4dbeec5898f..b64abf55b10 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -40,9 +40,11 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -52,8 +54,10 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { private final BitDocIdSetFilter parentFilter; - public ReverseNestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + public ReverseNestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); if (objectMapper == null) { parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); } else { @@ -64,33 +68,33 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { @Override protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives - // must belong to parent docs that is alive. For this reason acceptedDocs can be null here. + // In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives + // must belong to parent docs that is alive. For this reason acceptedDocs can be null here. BitDocIdSet docIdSet = parentFilter.getDocIdSet(ctx); final BitSet parentDocs; - if (DocIdSets.isEmpty(docIdSet)) { + if (DocIdSets.isEmpty(docIdSet)) { return LeafBucketCollector.NO_OP_COLLECTOR; - } else { - parentDocs = docIdSet.bits(); - } + } else { + parentDocs = docIdSet.bits(); + } final LongIntOpenHashMap bucketOrdToLastCollectedParentDoc = new LongIntOpenHashMap(32); return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int childDoc, long bucket) throws IOException { - // fast forward to retrieve the parentDoc this childDoc belongs to - final int parentDoc = parentDocs.nextSetBit(childDoc); - assert childDoc <= parentDoc && parentDoc != DocIdSetIterator.NO_MORE_DOCS; + // fast forward to retrieve the parentDoc this childDoc belongs to + final int parentDoc = parentDocs.nextSetBit(childDoc); + assert childDoc <= parentDoc && parentDoc != DocIdSetIterator.NO_MORE_DOCS; if (bucketOrdToLastCollectedParentDoc.containsKey(bucket)) { - int lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.lget(); - if (parentDoc > lastCollectedParentDoc) { + int lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.lget(); + if (parentDoc > lastCollectedParentDoc) { collectBucket(sub, parentDoc, bucket); - bucketOrdToLastCollectedParentDoc.lset(parentDoc); - } - } else { + bucketOrdToLastCollectedParentDoc.lset(parentDoc); + } + } else { collectBucket(sub, parentDoc, bucket); bucketOrdToLastCollectedParentDoc.put(bucket, parentDoc); - } - } + } + } }; } @@ -105,12 +109,13 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalReverseNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalReverseNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalReverseNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } Filter getParentFilter() { @@ -127,7 +132,8 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { // Early validation NestedAggregator closestNestedAggregator = findClosestNestedAggregator(parent); if (closestNestedAggregator == null) { @@ -138,11 +144,11 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { if (path != null) { MapperService.SmartNameObjectMapper mapper = context.searchContext().smartNameObjectMapper(path); if (mapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } objectMapper = mapper.mapper(); if (objectMapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested"); @@ -150,18 +156,19 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { } else { objectMapper = null; } - return new ReverseNestedAggregator(name, factories, objectMapper, context, parent, metaData); + return new ReverseNestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData); } private final static class Unmapped extends NonCollectingAggregator { - public Unmapped(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public Unmapped(String name, AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalReverseNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 0bb00d03122..59277b9a42f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -219,8 +220,9 @@ public class InternalRange extends InternalMulti return TYPE.name(); } - public R create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return (R) new InternalRange<>(name, ranges, formatter, keyed, metaData); + public R create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + return (R) new InternalRange<>(name, ranges, formatter, keyed, reducers, metaData); } @@ -236,8 +238,9 @@ public class InternalRange extends InternalMulti public InternalRange() {} // for serialization - public InternalRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, metaData); + public InternalRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + super(name, reducers, metaData); this.ranges = ranges; this.formatter = formatter; this.keyed = keyed; @@ -277,7 +280,7 @@ public class InternalRange extends InternalMulti for (int i = 0; i < this.ranges.size(); ++i) { ranges.add((B) rangeList[i].get(0).reduce(rangeList[i], reduceContext)); } - return getFactory().create(name, ranges, formatter, keyed, getMetaData()); + return getFactory().create(name, ranges, formatter, keyed, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 47011b8dc49..14fe9ddd3bc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -104,10 +105,10 @@ public class RangeAggregator extends BucketsAggregator { List ranges, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + super(name, factories, aggregationContext, parent, reducers, metaData); assert valuesSource != null; this.valuesSource = valuesSource; this.formatter = format != null ? format.formatter() : null; @@ -139,64 +140,64 @@ public class RangeAggregator extends BucketsAggregator { final LeafBucketCollector sub) throws IOException { final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - values.setDocument(doc); - final int valuesCount = values.count(); - for (int i = 0, lo = 0; i < valuesCount; ++i) { - final double value = values.valueAt(i); + values.setDocument(doc); + final int valuesCount = values.count(); + for (int i = 0, lo = 0; i < valuesCount; ++i) { + final double value = values.valueAt(i); lo = collect(doc, value, bucket, lo); - } + } + } + + private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { + int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes + int mid = (lo + hi) >>> 1; + while (lo <= hi) { + if (value < ranges[mid].from) { + hi = mid - 1; + } else if (value >= maxTo[mid]) { + lo = mid + 1; + } else { + break; } + mid = (lo + hi) >>> 1; + } + if (lo > hi) return lo; // no potential candidate - private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { - int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes - int mid = (lo + hi) >>> 1; - while (lo <= hi) { - if (value < ranges[mid].from) { - hi = mid - 1; - } else if (value >= maxTo[mid]) { - lo = mid + 1; - } else { - break; - } - mid = (lo + hi) >>> 1; - } - if (lo > hi) return lo; // no potential candidate + // binary search the lower bound + int startLo = lo, startHi = mid; + while (startLo <= startHi) { + final int startMid = (startLo + startHi) >>> 1; + if (value >= maxTo[startMid]) { + startLo = startMid + 1; + } else { + startHi = startMid - 1; + } + } - // binary search the lower bound - int startLo = lo, startHi = mid; - while (startLo <= startHi) { - final int startMid = (startLo + startHi) >>> 1; - if (value >= maxTo[startMid]) { - startLo = startMid + 1; - } else { - startHi = startMid - 1; - } - } + // binary search the upper bound + int endLo = mid, endHi = hi; + while (endLo <= endHi) { + final int endMid = (endLo + endHi) >>> 1; + if (value < ranges[endMid].from) { + endHi = endMid - 1; + } else { + endLo = endMid + 1; + } + } - // binary search the upper bound - int endLo = mid, endHi = hi; - while (endLo <= endHi) { - final int endMid = (endLo + endHi) >>> 1; - if (value < ranges[endMid].from) { - endHi = endMid - 1; - } else { - endLo = endMid + 1; - } - } + assert startLo == lowBound || value >= maxTo[startLo - 1]; + assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; - assert startLo == lowBound || value >= maxTo[startLo - 1]; - assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; - - for (int i = startLo; i <= endHi; ++i) { - if (ranges[i].matches(value)) { + for (int i = startLo; i <= endHi; ++i) { + if (ranges[i].matches(value)) { collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); - } - } - - return endHi + 1; } + } + + return endHi + 1; + } }; } @@ -215,7 +216,7 @@ public class RangeAggregator extends BucketsAggregator { buckets.add(bucket); } // value source can be null in the case of unmapped fields - return rangeFactory.create(name, buckets, formatter, keyed, metaData()); + return rangeFactory.create(name, buckets, formatter, keyed, reducers(), metaData()); } @Override @@ -229,7 +230,7 @@ public class RangeAggregator extends BucketsAggregator { buckets.add(bucket); } // value source can be null in the case of unmapped fields - return rangeFactory.create(name, buckets, formatter, keyed, metaData()); + return rangeFactory.create(name, buckets, formatter, keyed, reducers(), metaData()); } private static final void sortRanges(final Range[] ranges) { @@ -266,10 +267,10 @@ public class RangeAggregator extends BucketsAggregator { ValueFormat format, AggregationContext context, Aggregator parent, - InternalRange.Factory factory, + InternalRange.Factory factory, List reducers, Map metaData) throws IOException { - super(name, context, parent, metaData); + super(name, context, parent, reducers, metaData); this.ranges = ranges; ValueParser parser = format != null ? format.parser() : ValueParser.RAW; for (Range range : this.ranges) { @@ -287,7 +288,7 @@ public class RangeAggregator extends BucketsAggregator { for (RangeAggregator.Range range : ranges) { buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, formatter)); } - return factory.create(name, buckets, formatter, keyed, metaData()); + return factory.create(name, buckets, formatter, keyed, reducers(), metaData()); } } @@ -305,13 +306,15 @@ public class RangeAggregator extends BucketsAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), aggregationContext, parent, rangeFactory, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new Unmapped(name, ranges, keyed, config.format(), aggregationContext, parent, rangeFactory, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java index 785df76e824..b679a6bc3d5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -120,8 +121,9 @@ public class InternalDateRange extends InternalRange { } @Override - public InternalDateRange create(String name, List ranges, ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalDateRange(name, ranges, formatter, keyed, metaData); + public InternalDateRange create(String name, List ranges, ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalDateRange(name, ranges, formatter, keyed, reducers, metaData); } @Override @@ -132,8 +134,9 @@ public class InternalDateRange extends InternalRange { InternalDateRange() {} // for serialization - InternalDateRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, ranges, formatter, keyed, metaData); + InternalDateRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + super(name, ranges, formatter, keyed, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index 713b94595f5..fdaabb075cd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.GeoPointParser; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -179,14 +180,18 @@ public class GeoDistanceParser implements Aggregator.Parser { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, null, aggregationContext, parent, rangeFactory, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new Unmapped(name, ranges, keyed, null, aggregationContext, parent, rangeFactory, reducers, metaData); } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) + throws IOException { DistanceSource distanceSource = new DistanceSource(valuesSource, distanceType, origin, unit); - return new RangeAggregator(name, factories, distanceSource, null, rangeFactory, ranges, keyed, aggregationContext, parent, metaData); + return new RangeAggregator(name, factories, distanceSource, null, rangeFactory, ranges, keyed, aggregationContext, parent, + reducers, metaData); } private static class DistanceSource extends ValuesSource.Numeric { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java index da2c41d5233..0fef2e2ba00 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -108,8 +109,9 @@ public class InternalGeoDistance extends InternalRange ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalGeoDistance(name, ranges, formatter, keyed, metaData); + public InternalGeoDistance create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalGeoDistance(name, ranges, formatter, keyed, reducers, metaData); } @Override @@ -120,8 +122,9 @@ public class InternalGeoDistance extends InternalRange ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, ranges, formatter, keyed, metaData); + public InternalGeoDistance(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + super(name, ranges, formatter, keyed, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java index 9b608aa42d4..be2f8e52f8f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -117,8 +118,9 @@ public class InternalIPv4Range extends InternalRange { } @Override - public InternalIPv4Range create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalIPv4Range(name, ranges, keyed, metaData); + public InternalIPv4Range create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalIPv4Range(name, ranges, keyed, reducers, metaData); } @Override @@ -129,8 +131,9 @@ public class InternalIPv4Range extends InternalRange { public InternalIPv4Range() {} // for serialization - public InternalIPv4Range(String name, List ranges, boolean keyed, Map metaData) { - super(name, ranges, ValueFormatter.IPv4, keyed, metaData); + public InternalIPv4Range(String name, List ranges, boolean keyed, List reducers, + Map metaData) { + super(name, ranges, ValueFormatter.IPv4, keyed, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index fc8e5e4b7f7..c7e260faf63 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -36,6 +37,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -49,9 +51,10 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, - SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { + SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { - super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, metaData); + super(name, factories, valuesSource, maxOrd, null, bucketCountThresholds, includeExclude, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -62,8 +65,8 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri @Override public void collect(int doc, long bucket) throws IOException { super.collect(doc, bucket); - numCollectedDocs++; - } + numCollectedDocs++; + } }; } @@ -124,7 +127,9 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -133,7 +138,9 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override @@ -145,8 +152,8 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri private final LongHash bucketOrds; - public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { - super(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggFactory, metaData); + public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggFactory, reducers, metaData); bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } @@ -157,20 +164,20 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri @Override public void collect(int doc, long bucket) throws IOException { assert bucket == 0; - numCollectedDocs++; - globalOrds.setDocument(doc); - final int numOrds = globalOrds.cardinality(); - for (int i = 0; i < numOrds; i++) { - final long globalOrd = globalOrds.ordAt(i); - long bucketOrd = bucketOrds.add(globalOrd); - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; + numCollectedDocs++; + globalOrds.setDocument(doc); + final int numOrds = globalOrds.cardinality(); + for (int i = 0; i < numOrds; i++) { + final long globalOrd = globalOrds.ordAt(i); + long bucketOrd = bucketOrds.add(globalOrd); + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); - } else { + } else { collectBucket(sub, doc, bucketOrd); } - } } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 53949937bbb..91ad85364e7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.ArrayList; import java.util.Arrays; @@ -122,8 +123,9 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } } - protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { - super(name, metaData); + protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, Map metaData) { + super(name, reducers, metaData); this.requiredSize = requiredSize; this.minDocCount = minDocCount; this.buckets = buckets; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index c4f97942ef2..bfb0b70458b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -159,9 +160,11 @@ public class SignificantLongTerms extends InternalSignificantTerms { } // for serialization public SignificantLongTerms(long subsetSize, long supersetSize, String name, @Nullable ValueFormatter formatter, - int requiredSize, long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { + int requiredSize, + long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, + List reducers, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, metaData); + super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); this.formatter = formatter; } @@ -173,7 +176,8 @@ public class SignificantLongTerms extends InternalSignificantTerms { @Override InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets) { - return new SignificantLongTerms(subsetSize, supersetSize, getName(), formatter, requiredSize, minDocCount, significanceHeuristic, buckets, getMetaData()); + return new SignificantLongTerms(subsetSize, supersetSize, getName(), formatter, requiredSize, minDocCount, significanceHeuristic, + buckets, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java index 0b8d5813721..f67c533956c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.LongTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -36,6 +37,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -45,9 +47,12 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { public SignificantLongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, BucketCountThresholds bucketCountThresholds, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, Map metaData) throws IOException { + AggregationContext aggregationContext, + Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, + List reducers, Map metaData) throws IOException { - super(name, factories, valuesSource, format, null, bucketCountThresholds, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, metaData); + super(name, factories, valuesSource, format, null, bucketCountThresholds, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -102,7 +107,9 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { bucket.aggregations = bucketAggregations(bucket.bucketOrd); list[i] = bucket; } - return new SignificantLongTerms(subsetSize, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantLongTerms(subsetSize, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -111,7 +118,9 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantLongTerms(0, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantLongTerms(0, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index ff4d5c94e05..295fadd41b9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -152,8 +153,10 @@ public class SignificantStringTerms extends InternalSignificantTerms { SignificantStringTerms() {} // for serialization public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, - long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, metaData); + long minDocCount, + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, + Map metaData) { + super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); } @Override @@ -164,7 +167,8 @@ public class SignificantStringTerms extends InternalSignificantTerms { @Override InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets) { - return new SignificantStringTerms(subsetSize, supersetSize, getName(), requiredSize, minDocCount, significanceHeuristic, buckets, getMetaData()); + return new SignificantStringTerms(subsetSize, supersetSize, getName(), requiredSize, minDocCount, significanceHeuristic, buckets, + reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java index fb65fd7d6f8..2638e82c607 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.StringTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -35,6 +36,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -48,9 +50,11 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { public SignificantStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, - SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { + SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) + throws IOException { - super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, metaData); + super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -107,7 +111,9 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -116,7 +122,9 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index 7536bd05b69..7b85a76b21f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -46,6 +47,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -64,8 +66,10 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { - return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggregatorFactory, metaData); + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { + return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, includeExclude, + aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }, @@ -74,10 +78,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource; IndexSearcher indexSearcher = aggregationContext.searchContext().searcher(); - return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggregatorFactory, metaData); + return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }, @@ -86,8 +91,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { - return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggregatorFactory, metaData); + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { + return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories, + (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, includeExclude, + aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }; @@ -108,7 +116,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac abstract Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException; + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException; @Override public String toString() { @@ -145,9 +154,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -156,7 +167,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -179,7 +191,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } } assert execution != null; - return execution.create(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, this, metaData); + return execution.create(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, this, + reducers, metaData); } @@ -197,7 +210,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac if (includeExclude != null) { longFilter = includeExclude.convertToLongFilter(); } - return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), bucketCountThresholds, aggregationContext, parent, this, longFilter, metaData); + return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), + bucketCountThresholds, aggregationContext, parent, this, longFilter, reducers, metaData); } throw new AggregationExecutionException("sigfnificant_terms aggregation cannot be applied to field [" + config.fieldContext().field() + diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index bb812741913..f382237dacf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -24,9 +24,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -56,10 +56,10 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { UnmappedSignificantTerms() {} // for serialization - public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, Map metaData) { + public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, List reducers, Map metaData) { //We pass zero for index/subset sizes because for the purpose of significant term analysis // we assume an unmapped index's size is irrelevant to the proceedings. - super(0, 0, name, requiredSize, minDocCount, JLHScore.INSTANCE, BUCKETS, metaData); + super(0, 0, name, requiredSize, minDocCount, JLHScore.INSTANCE, BUCKETS, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java index e87821e4e38..363895c5a39 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java @@ -22,27 +22,30 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; abstract class AbstractStringTermsAggregator extends TermsAggregator { protected final boolean showTermDocCountError; - public AbstractStringTermsAggregator(String name, AggregatorFactories factories, - AggregationContext context, Aggregator parent, - Terms.Order order, BucketCountThresholds bucketCountThresholds, - SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, context, parent, bucketCountThresholds, order, subAggCollectMode, metaData); + public AbstractStringTermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, + Terms.Order order, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, bucketCountThresholds, order, subAggCollectMode, reducers, metaData); this.showTermDocCountError = showTermDocCountError; } @Override public InternalAggregation buildEmptyAggregation() { - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Collections.emptyList(), showTermDocCountError, 0, 0, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Collections. emptyList(), showTermDocCountError, 0, 0, + reducers(), metaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index c004f6e1e90..0e6ca403407 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -156,8 +157,11 @@ public class DoubleTerms extends InternalTerms { DoubleTerms() {} // for serialization - public DoubleTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public DoubleTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, + long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); this.formatter = formatter; } @@ -167,8 +171,10 @@ public class DoubleTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new DoubleTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, + long docCountError, long otherDocCount, List reducers, Map metaData) { + return new DoubleTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, + otherDocCount, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index e71be14dc5b..ea98734b94e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -33,6 +34,7 @@ import org.elasticsearch.search.aggregations.support.format.ValueFormat; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -41,8 +43,11 @@ import java.util.Map; public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, Map metaData) throws IOException { - super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, showTermDocCountError, longFilter, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, + IncludeExclude.LongFilter longFilter, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, + showTermDocCountError, longFilter, reducers, metaData); } @Override @@ -73,7 +78,9 @@ public class DoubleTermsAggregator extends LongTermsAggregator { for (int i = 0; i < buckets.length; ++i) { buckets[i] = convertToDouble(buckets[i]); } - return new DoubleTerms(terms.getName(), terms.order, terms.formatter, terms.requiredSize, terms.shardSize, terms.minDocCount, Arrays.asList(buckets), terms.showTermDocCountError, terms.docCountError, terms.otherDocCount, terms.getMetaData()); + return new DoubleTerms(terms.getName(), terms.order, terms.formatter, terms.requiredSize, terms.shardSize, terms.minDocCount, + Arrays.asList(buckets), terms.showTermDocCountError, terms.docCountError, terms.otherDocCount, terms.reducers(), + terms.getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5b0ad6082b8..bff09e07e4a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -44,11 +44,13 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -71,8 +73,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, - IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, metaData); + IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, reducers, metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; } @@ -196,7 +198,9 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr bucket.docCountError = 0; } - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } /** @@ -261,8 +265,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, - Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectionMode, showTermDocCountError, metaData); + Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectionMode, showTermDocCountError, reducers, metaData); bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } @@ -329,8 +333,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private RandomAccessOrds segmentOrds; public LowCardinality(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, valuesSource, order, bucketCountThresholds, null, aggregationContext, parent, collectionMode, showTermDocCountError, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, order, bucketCountThresholds, null, aggregationContext, parent, collectionMode, showTermDocCountError, reducers, metaData); assert factories == null || factories.count() == 0; this.segmentDocCounts = context.bigArrays().newIntArray(1, true); } @@ -409,7 +413,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr } final long ord = i - 1; // remember we do +1 when counting final long globalOrd = mapping == null ? ord : mapping.getGlobalOrd(ord); - incrementBucketDocCount(globalOrd, inc); + incrementBucketDocCount(globalOrd, inc); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index a6ca9d4400c..75b82d4778c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.ArrayList; @@ -121,8 +122,9 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple protected InternalTerms() {} // for serialization - protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, metaData); + protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, + boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, Map metaData) { + super(name, reducers, metaData); this.order = order; this.requiredSize = requiredSize; this.shardSize = shardSize; @@ -220,9 +222,10 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple } else { docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; } - return newAggregation(name, Arrays.asList(list), showTermDocCountError, docCountError, otherDocCount, getMetaData()); + return newAggregation(name, Arrays.asList(list), showTermDocCountError, docCountError, otherDocCount, reducers(), getMetaData()); } - protected abstract InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData); + protected abstract InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, + long otherDocCount, List reducers, Map metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 1a7c2b4d0ee..b8edad21dd9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -155,8 +156,11 @@ public class LongTerms extends InternalTerms { LongTerms() {} // for serialization - public LongTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public LongTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); this.formatter = formatter; } @@ -166,8 +170,10 @@ public class LongTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new LongTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, + long docCountError, long otherDocCount, List reducers, Map metaData) { + return new LongTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, + otherDocCount, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index a570b06360f..ef1150f1d7e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude.LongFilter; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -39,6 +40,7 @@ import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -53,15 +55,17 @@ public class LongTermsAggregator extends TermsAggregator { private LongFilter longFilter; public LongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, bucketCountThresholds, order, subAggCollectMode, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, + SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, + List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, bucketCountThresholds, order, subAggCollectMode, reducers, metaData); this.valuesSource = valuesSource; this.showTermDocCountError = showTermDocCountError; this.formatter = format != null ? format.formatter() : null; this.longFilter = longFilter; bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } - + @Override public boolean needsScores() { return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); @@ -76,30 +80,30 @@ public class LongTermsAggregator extends TermsAggregator { final LeafBucketCollector sub) throws IOException { final SortedNumericDocValues values = getValues(valuesSource, ctx); return new LeafBucketCollectorBase(sub, values) { - @Override - public void collect(int doc, long owningBucketOrdinal) throws IOException { - assert owningBucketOrdinal == 0; - values.setDocument(doc); - final int valuesCount = values.count(); + @Override + public void collect(int doc, long owningBucketOrdinal) throws IOException { + assert owningBucketOrdinal == 0; + values.setDocument(doc); + final int valuesCount = values.count(); - long previous = Long.MAX_VALUE; - for (int i = 0; i < valuesCount; ++i) { - final long val = values.valueAt(i); - if (previous != val || i == 0) { - if ((longFilter == null) || (longFilter.accept(val))) { - long bucketOrdinal = bucketOrds.add(val); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = - 1 - bucketOrdinal; + long previous = Long.MAX_VALUE; + for (int i = 0; i < valuesCount; ++i) { + final long val = values.valueAt(i); + if (previous != val || i == 0) { + if ((longFilter == null) || (longFilter.accept(val))) { + long bucketOrdinal = bucketOrds.add(val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = - 1 - bucketOrdinal; collectExistingBucket(sub, doc, bucketOrdinal); - } else { + } else { collectBucket(sub, doc, bucketOrdinal); - } - } - - previous = val; - } + } } + + previous = val; } + } + } }; } @@ -148,7 +152,7 @@ public class LongTermsAggregator extends TermsAggregator { list[i] = bucket; otherDocCount -= bucket.docCount; } - + runDeferredCollections(survivingBucketOrds); //Now build the aggs @@ -156,14 +160,18 @@ public class LongTermsAggregator extends TermsAggregator { list[i].aggregations = bucketAggregations(list[i].bucketOrd); list[i].docCountError = 0; } - - return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + + return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } - - + + @Override public InternalAggregation buildEmptyAggregation() { - return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Collections.emptyList(), showTermDocCountError, 0, 0, metaData()); + return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Collections. emptyList(), showTermDocCountError, 0, 0, + reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 7caec199df3..ef9ec91e80c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -150,8 +151,11 @@ public class StringTerms extends InternalTerms { StringTerms() {} // for serialization - public StringTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public StringTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); } @Override @@ -160,8 +164,10 @@ public class StringTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new StringTerms(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, + long docCountError, long otherDocCount, List reducers, Map metaData) { + return new StringTerms(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, + otherDocCount, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index 9d731a25529..4d5310b4c19 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -31,11 +31,13 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -49,9 +51,12 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, - IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { + IncludeExclude includeExclude, AggregationContext aggregationContext, + Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, + Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, metaData); + super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, + metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; bucketOrds = new BytesRefHash(1, aggregationContext.bigArrays()); @@ -158,7 +163,9 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { bucket.docCountError = 0; } - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index ef254bb0594..4c4ad7ee31c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -28,11 +28,13 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.Aggregation; import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.CompoundOrder; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -135,8 +137,8 @@ public abstract class TermsAggregator extends BucketsAggregator { protected final Set aggsUsedForSorting = new HashSet<>(); protected final SubAggCollectionMode collectMode; - public TermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, BucketCountThresholds bucketCountThresholds, Terms.Order order, SubAggCollectionMode collectMode, Map metaData) throws IOException { - super(name, factories, context, parent, metaData); + public TermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, BucketCountThresholds bucketCountThresholds, Terms.Order order, SubAggCollectionMode collectMode, List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, reducers, metaData); this.bucketCountThresholds = bucketCountThresholds; this.order = InternalOrder.validate(order, this); this.collectMode = collectMode; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 6fbbd306411..a9cb4ea19cb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -1,4 +1,4 @@ -/* +List metaData) throws IOException { - return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, + aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -64,8 +68,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -79,8 +83,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -93,11 +97,12 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { if (includeExclude != null || factories.count() > 0) { - return GLOBAL_ORDINALS.create(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return GLOBAL_ORDINALS.create(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } - return new GlobalOrdinalsStringTermsAggregator.LowCardinality(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return new GlobalOrdinalsStringTermsAggregator.LowCardinality(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -124,7 +129,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException; + SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException; abstract boolean needsGlobalOrdinals(); @@ -152,9 +157,11 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, factories, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, factories, reducers, metaData) { { // even in the case of an unmapped aggregator, validate the order InternalOrder.validate(order, this); @@ -167,7 +174,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -217,7 +225,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) { - super(name, order, requiredSize, shardSize, minDocCount, BUCKETS, false, 0, 0, metaData); + public UnmappedTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List reducers, + Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, BUCKETS, false, 0, 0, reducers, metaData); } @Override @@ -91,7 +93,8 @@ public class UnmappedTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { + protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, + long otherDocCount, List reducers, Map metaData) { throw new UnsupportedOperationException("How did you get there?"); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java index e3a9476e56a..8facf4c1ae5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java @@ -20,14 +20,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import java.util.List; import java.util.Map; public abstract class InternalMetricsAggregation extends InternalAggregation { protected InternalMetricsAggregation() {} // for serialization - protected InternalMetricsAggregation(String name, Map metaData) { - super(name, metaData); + protected InternalMetricsAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index 0c301e30bde..e9323615fc3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.List; @@ -35,8 +36,8 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA protected SingleValue() {} - protected SingleValue(String name, Map metaData) { - super(name, metaData); + protected SingleValue(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } public String getValueAsString() { @@ -64,8 +65,8 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA protected MultiValue() {} - protected MultiValue(String name, Map metaData) { - super(name, metaData); + protected MultiValue(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } public abstract double value(String name); @@ -92,8 +93,8 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA private InternalNumericMetricsAggregation() {} // for serialization - private InternalNumericMetricsAggregation(String name, Map metaData) { - super(name, metaData); + private InternalNumericMetricsAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java index f29e063d61a..f3160cf464c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java @@ -22,14 +22,17 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; public abstract class MetricsAggregator extends AggregatorBase { - protected MetricsAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, AggregatorFactories.EMPTY, context, parent, metaData); + protected MetricsAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, AggregatorFactories.EMPTY, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java index 66adf3ed74e..6342df383ed 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -29,14 +31,16 @@ import java.util.Map; */ public abstract class NumericMetricsAggregator extends MetricsAggregator { - private NumericMetricsAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + private NumericMetricsAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public static abstract class SingleValue extends NumericMetricsAggregator { - protected SingleValue(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + protected SingleValue(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public abstract double metric(long owningBucketOrd); @@ -44,8 +48,9 @@ public abstract class NumericMetricsAggregator extends MetricsAggregator { public static abstract class MultiValue extends NumericMetricsAggregator { - protected MultiValue(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + protected MultiValue(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public abstract boolean hasMetric(String name); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index 94a2e26c7b8..3f0035330b8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -37,6 +38,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -51,8 +53,9 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { ValueFormatter formatter; public AvgAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name,context, parent, metaData); + AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -72,22 +75,22 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); sums = bigArrays.grow(sums, bucket + 1); - values.setDocument(doc); - final int valueCount = values.count(); + values.setDocument(doc); + final int valueCount = values.count(); counts.increment(bucket, valueCount); - double sum = 0; - for (int i = 0; i < valueCount; i++) { - sum += values.valueAt(i); - } + double sum = 0; + for (int i = 0; i < valueCount; i++) { + sum += values.valueAt(i); + } sums.increment(bucket, sum); } }; @@ -103,12 +106,12 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= sums.size()) { return buildEmptyAggregation(); } - return new InternalAvg(name, sums.get(bucket), counts.get(bucket), formatter, metaData()); + return new InternalAvg(name, sums.get(bucket), counts.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalAvg(name, 0.0, 0l, formatter, metaData()); + return new InternalAvg(name, 0.0, 0l, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +121,15 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new AvgAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new AvgAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new AvgAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new AvgAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index 8c795a55332..f30cee32b31 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -25,10 +25,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -56,8 +58,9 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i InternalAvg() {} // for serialization - public InternalAvg(String name, double sum, long count, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalAvg(String name, double sum, long count, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.sum = sum; this.count = count; this.valueFormatter = formatter; @@ -85,7 +88,7 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i count += ((InternalAvg) aggregation).count; sum += ((InternalAvg) aggregation).sum; } - return new InternalAvg(getName(), sum, count, valueFormatter, getMetaData()); + return new InternalAvg(getName(), sum, count, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java index e4c2acce93c..98c911c2025 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java @@ -42,11 +42,13 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -66,8 +68,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private ValueFormatter formatter; public CardinalityAggregator(String name, ValuesSource valuesSource, boolean rehash, int precision, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.rehash = rehash; this.precision = precision; @@ -156,12 +158,12 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue // this Aggregator (and its HLL++ counters) is released. HyperLogLogPlusPlus copy = new HyperLogLogPlusPlus(precision, BigArrays.NON_RECYCLING_INSTANCE, 1); copy.merge(0, counts, owningBucketOrdinal); - return new InternalCardinality(name, copy, formatter, metaData()); + return new InternalCardinality(name, copy, formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalCardinality(name, null, formatter, metaData()); + return new InternalCardinality(name, null, formatter, reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java index 2d063dd5bd9..d2341bb2647 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java @@ -22,12 +22,14 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { @@ -46,16 +48,19 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - return new CardinalityAggregator(name, null, true, precision(parent), config.formatter(), context, parent, metaData); + protected Aggregator createUnmapped(AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + return new CardinalityAggregator(name, null, true, precision(parent), config.formatter(), context, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (!(valuesSource instanceof ValuesSource.Numeric) && !rehash) { throw new AggregationExecutionException("Turning off rehashing for cardinality aggregation [" + name + "] on non-numeric values in not allowed"); } - return new CardinalityAggregator(name, valuesSource, rehash, precision(parent), config.formatter(), context, parent, metaData); + return new CardinalityAggregator(name, valuesSource, rehash, precision(parent), config.formatter(), context, parent, reducers, + metaData); } /* diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java index c8341135fb4..434140e74f6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -53,8 +54,9 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation private HyperLogLogPlusPlus counts; - InternalCardinality(String name, HyperLogLogPlusPlus counts, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + InternalCardinality(String name, HyperLogLogPlusPlus counts, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.counts = counts; this.valueFormatter = formatter; } @@ -107,7 +109,7 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation if (cardinality.counts != null) { if (reduced == null) { reduced = new InternalCardinality(name, new HyperLogLogPlusPlus(cardinality.counts.precision(), - BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, getMetaData()); + BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, reducers(), getMetaData()); } reduced.merge(cardinality); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java index 44e7fd195c0..53e5c534094 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java @@ -30,12 +30,14 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; public final class GeoBoundsAggregator extends MetricsAggregator { @@ -50,8 +52,10 @@ public final class GeoBoundsAggregator extends MetricsAggregator { DoubleArray negRights; protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, - Aggregator parent, ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, Map metaData) throws IOException { - super(name, aggregationContext, parent, metaData); + Aggregator parent, + ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List reducers, Map metaData) + throws IOException { + super(name, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.wrapLongitude = wrapLongitude; if (valuesSource != null) { @@ -149,13 +153,13 @@ public final class GeoBoundsAggregator extends MetricsAggregator { double posRight = posRights.get(owningBucketOrdinal); double negLeft = negLefts.get(owningBucketOrdinal); double negRight = negRights.get(owningBucketOrdinal); - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metaData()); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { return new InternalGeoBounds(name, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, metaData()); + Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, reducers(), metaData()); } @Override @@ -173,14 +177,16 @@ public final class GeoBoundsAggregator extends MetricsAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new GeoBoundsAggregator(name, aggregationContext, parent, null, wrapLongitude, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new GeoBoundsAggregator(name, aggregationContext, parent, null, wrapLongitude, reducers, metaData); } @Override protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, - Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new GeoBoundsAggregator(name, aggregationContext, parent, valuesSource, wrapLongitude, metaData); + Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new GeoBoundsAggregator(name, aggregationContext, parent, valuesSource, wrapLongitude, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index eb6a61c960d..f67734bdd09 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.List; @@ -56,8 +57,9 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo } InternalGeoBounds(String name, double top, double bottom, double posLeft, double posRight, - double negLeft, double negRight, boolean wrapLongitude, Map metaData) { - super(name, metaData); + double negLeft, double negRight, + boolean wrapLongitude, List reducers, Map metaData) { + super(name, reducers, metaData); this.top = top; this.bottom = bottom; this.posLeft = posLeft; @@ -103,7 +105,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo negRight = bounds.negRight; } } - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, getMetaData()); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java index 7cae1444c63..a181db30d98 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java @@ -25,10 +25,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,8 +57,8 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i InternalMax() {} // for serialization - public InternalMax(String name, double max, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalMax(String name, double max, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.valueFormatter = formatter; this.max = max; } @@ -81,7 +83,7 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i for (InternalAggregation aggregation : reduceContext.aggregations()) { max = Math.max(max, ((InternalMax) aggregation).max); } - return new InternalMax(name, max, valueFormatter, getMetaData()); + return new InternalMax(name, max, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index 88edddc286c..0c97ba38ac3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -51,8 +53,9 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray maxes; public MaxAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -71,22 +74,22 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MAX.select(allValues, Double.NEGATIVE_INFINITY); return new LeafBucketCollectorBase(sub, allValues) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= maxes.size()) { - long from = maxes.size(); + long from = maxes.size(); maxes = bigArrays.grow(maxes, bucket + 1); - maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY); - } - final double value = values.get(doc); + maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY); + } + final double value = values.get(doc); double max = maxes.get(bucket); - max = Math.max(max, value); + max = Math.max(max, value); maxes.set(bucket, max); } @@ -103,12 +106,12 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= maxes.size()) { return buildEmptyAggregation(); } - return new InternalMax(name, maxes.get(bucket), formatter, metaData()); + return new InternalMax(name, maxes.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMax(name, Double.NEGATIVE_INFINITY, formatter, metaData()); + return new InternalMax(name, Double.NEGATIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +121,15 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MaxAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MaxAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MaxAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MaxAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java index 0974314826c..9917f966403 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java @@ -25,10 +25,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -56,8 +58,8 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i InternalMin() {} // for serialization - public InternalMin(String name, double min, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalMin(String name, double min, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.min = min; this.valueFormatter = formatter; } @@ -82,7 +84,7 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i for (InternalAggregation aggregation : reduceContext.aggregations()) { min = Math.min(min, ((InternalMin) aggregation).min); } - return new InternalMin(getName(), min, this.valueFormatter, getMetaData()); + return new InternalMin(getName(), min, this.valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index 438272e2bc1..c80b7b8f064 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -51,8 +53,9 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray mins; public MinAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { mins = context.bigArrays().newDoubleArray(1, false); @@ -71,22 +74,22 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MIN.select(allValues, Double.POSITIVE_INFINITY); return new LeafBucketCollectorBase(sub, allValues) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= mins.size()) { - long from = mins.size(); + long from = mins.size(); mins = bigArrays.grow(mins, bucket + 1); - mins.fill(from, mins.size(), Double.POSITIVE_INFINITY); - } - final double value = values.get(doc); + mins.fill(from, mins.size(), Double.POSITIVE_INFINITY); + } + final double value = values.get(doc); double min = mins.get(bucket); - min = Math.min(min, value); + min = Math.min(min, value); mins.set(bucket, min); } @@ -103,12 +106,12 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= mins.size()) { return buildEmptyAggregation(); } - return new InternalMin(name, mins.get(bucket), formatter, metaData()); + return new InternalMin(name, mins.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMin(name, Double.POSITIVE_INFINITY, formatter, metaData()); + return new InternalMin(name, Double.POSITIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +121,15 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MinAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MinAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MinAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MinAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index 19d056e00cd..7ae2ad9ec60 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -44,8 +45,9 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega AbstractInternalPercentiles() {} // for serialization public AbstractInternalPercentiles(String name, double[] keys, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, metaData); + super(name, reducers, metaData); this.keys = keys; this.state = state; this.keyed = keyed; @@ -70,10 +72,11 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega } merged.add(percentiles.state); } - return createReduced(getName(), keys, merged, keyed, getMetaData()); + return createReduced(getName(), keys, merged, keyed, reducers(), getMetaData()); } - protected abstract AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData); + protected abstract AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData); @Override protected void doReadFrom(StreamInput in) throws IOException { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java index 31a097f0b47..8dd75b59110 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java @@ -31,11 +31,13 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; public abstract class AbstractPercentilesAggregator extends NumericMetricsAggregator.MultiValue { @@ -53,8 +55,9 @@ public abstract class AbstractPercentilesAggregator extends NumericMetricsAggreg public AbstractPercentilesAggregator(String name, ValuesSource.Numeric valuesSource, AggregationContext context, Aggregator parent, double[] keys, double compression, boolean keyed, - @Nullable ValueFormatter formatter, Map metaData) throws IOException { - super(name, context, parent, metaData); + @Nullable ValueFormatter formatter, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.keyed = keyed; this.formatter = formatter; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java index 190ca363ed3..687e1822b64 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Iterator; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class InternalPercentileRanks extends AbstractInternalPercentiles impleme InternalPercentileRanks() {} // for serialization public InternalPercentileRanks(String name, double[] cdfValues, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, cdfValues, state, keyed, formatter, metaData); + super(name, cdfValues, state, keyed, formatter, reducers, metaData); } @Override @@ -77,8 +80,9 @@ public class InternalPercentileRanks extends AbstractInternalPercentiles impleme return percent(key); } - protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData) { - return new InternalPercentileRanks(name, keys, merged, keyed, valueFormatter, metaData); + protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData) { + return new InternalPercentileRanks(name, keys, merged, keyed, valueFormatter, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java index 5e7d47803d8..357921aeb91 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Iterator; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class InternalPercentiles extends AbstractInternalPercentiles implements InternalPercentiles() {} // for serialization public InternalPercentiles(String name, double[] percents, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, percents, state, keyed, formatter, metaData); + super(name, percents, state, keyed, formatter, reducers, metaData); } @Override @@ -77,8 +80,9 @@ public class InternalPercentiles extends AbstractInternalPercentiles implements return percentile(key); } - protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData) { - return new InternalPercentiles(name, keys, merged, keyed, valueFormatter, metaData); + protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData) { + return new InternalPercentiles(name, keys, merged, keyed, valueFormatter, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java index 0383e33e8a7..9d14e3b70c3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -30,6 +31,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -37,10 +39,10 @@ import java.util.Map; */ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { - public PercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, - Aggregator parent, double[] percents, double compression, boolean keyed, @Nullable ValueFormatter formatter, - Map metaData) throws IOException { - super(name, valuesSource, context, parent, percents, compression, keyed, formatter, metaData); + public PercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, Aggregator parent, double[] percents, + double compression, boolean keyed, @Nullable ValueFormatter formatter, List reducers, Map metaData) + throws IOException { + super(name, valuesSource, context, parent, percents, compression, keyed, formatter, reducers, metaData); } @Override @@ -49,13 +51,13 @@ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { if (state == null) { return buildEmptyAggregation(); } else { - return new InternalPercentileRanks(name, keys, state, keyed, formatter, metaData()); + return new InternalPercentileRanks(name, keys, state, keyed, formatter, reducers(), metaData()); } } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalPercentileRanks(name, keys, new TDigestState(compression), keyed, formatter, metaData()); + return new InternalPercentileRanks(name, keys, new TDigestState(compression), keyed, formatter, reducers(), metaData()); } @Override @@ -83,15 +85,19 @@ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { return new PercentileRanksAggregator(name, null, aggregationContext, parent, values, compression, keyed, config.formatter(), + reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentileRanksAggregator(name, valuesSource, aggregationContext, parent, values, compression, - keyed, config.formatter(), metaData); + keyed, + config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java index 4dd99b73cd9..1a9a839bb75 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -30,6 +31,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -38,9 +40,10 @@ import java.util.Map; public class PercentilesAggregator extends AbstractPercentilesAggregator { public PercentilesAggregator(String name, Numeric valuesSource, AggregationContext context, - Aggregator parent, double[] percents, double compression, boolean keyed, @Nullable ValueFormatter formatter, + Aggregator parent, double[] percents, + double compression, boolean keyed, @Nullable ValueFormatter formatter, List reducers, Map metaData) throws IOException { - super(name, valuesSource, context, parent, percents, compression, keyed, formatter, metaData); + super(name, valuesSource, context, parent, percents, compression, keyed, formatter, reducers, metaData); } @Override @@ -49,7 +52,7 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { if (state == null) { return buildEmptyAggregation(); } else { - return new InternalPercentiles(name, keys, state, keyed, formatter, metaData()); + return new InternalPercentiles(name, keys, state, keyed, formatter, reducers(), metaData()); } } @@ -65,7 +68,7 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { @Override public InternalAggregation buildEmptyAggregation() { - return new InternalPercentiles(name, keys, new TDigestState(compression), keyed, formatter, metaData()); + return new InternalPercentiles(name, keys, new TDigestState(compression), keyed, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -83,15 +86,19 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { return new PercentilesAggregator(name, null, aggregationContext, parent, percents, compression, keyed, config.formatter(), + reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentilesAggregator(name, valuesSource, aggregationContext, parent, percents, compression, - keyed, config.formatter(), metaData); + keyed, + config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index c7176e0e1e1..2a3900afc46 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -28,6 +28,7 @@ import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -61,13 +62,13 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement private InternalScriptedMetric() { } - private InternalScriptedMetric(String name, Map metaData) { - super(name, metaData); + private InternalScriptedMetric(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } public InternalScriptedMetric(String name, Object aggregation, String scriptLang, ScriptType scriptType, String reduceScript, - Map reduceParams, Map metaData) { - this(name, metaData); + Map reduceParams, List reducers, Map metaData) { + this(name, reducers, metaData); this.aggregation = aggregation; this.scriptType = scriptType; this.reduceScript = reduceScript; @@ -104,7 +105,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement aggregation = aggregationObjects; } return new InternalScriptedMetric(firstAggregation.getName(), aggregation, firstAggregation.scriptLang, firstAggregation.scriptType, - firstAggregation.reduceScript, firstAggregation.reduceParams, getMetaData()); + firstAggregation.reduceScript, firstAggregation.reduceParams, reducers(), getMetaData()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index e9260d852ad..22781a18612 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; @@ -57,8 +58,9 @@ public class ScriptedMetricAggregator extends MetricsAggregator { protected ScriptedMetricAggregator(String name, String scriptLang, ScriptType initScriptType, String initScript, ScriptType mapScriptType, String mapScript, ScriptType combineScriptType, String combineScript, ScriptType reduceScriptType, - String reduceScript, Map params, Map reduceParams, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + String reduceScript, Map params, Map reduceParams, AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.scriptService = context.searchContext().scriptService(); this.scriptLang = scriptLang; this.reduceScriptType = reduceScriptType; @@ -112,12 +114,13 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } else { aggregation = params.get("_agg"); } - return new InternalScriptedMetric(name, aggregation, scriptLang, reduceScriptType, reduceScript, reduceParams, metaData()); + return new InternalScriptedMetric(name, aggregation, scriptLang, reduceScriptType, reduceScript, reduceParams, reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalScriptedMetric(name, null, scriptLang, reduceScriptType, reduceScript, reduceParams, metaData()); + return new InternalScriptedMetric(name, null, scriptLang, reduceScriptType, reduceScript, reduceParams, reducers(), metaData()); } public static class Factory extends AggregatorFactory { @@ -151,7 +154,8 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } @@ -164,7 +168,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { reduceParams = deepCopyParams(this.reduceParams, context.searchContext()); } return new ScriptedMetricAggregator(name, scriptLang, initScriptType, initScript, mapScriptType, mapScript, combineScriptType, - combineScript, reduceScriptType, reduceScript, params, reduceParams, context, parent, metaData); + combineScript, reduceScriptType, reduceScript, params, reduceParams, context, parent, reducers, metaData); } @SuppressWarnings({ "unchecked" }) diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 7186fee979c..5133012aabd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -26,10 +26,12 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -69,8 +71,9 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue protected InternalStats() {} // for serialization public InternalStats(String name, long count, double sum, double min, double max, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, metaData); + super(name, reducers, metaData); this.count = count; this.sum = sum; this.min = min; @@ -160,7 +163,7 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue max = Math.max(max, stats.getMax()); sum += stats.getSum(); } - return new InternalStats(name, count, sum, min, max, valueFormatter, getMetaData()); + return new InternalStats(name, count, sum, min, max, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java index 8f431578fef..8a454b6cb73 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,8 +57,9 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { public StatsAggegator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { final BigArrays bigArrays = context.bigArrays(); @@ -80,35 +83,35 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= counts.size()) { - final long from = counts.size(); + final long from = counts.size(); final long overSize = BigArrays.overSize(bucket + 1); - counts = bigArrays.resize(counts, overSize); - sums = bigArrays.resize(sums, overSize); - mins = bigArrays.resize(mins, overSize); - maxes = bigArrays.resize(maxes, overSize); - mins.fill(from, overSize, Double.POSITIVE_INFINITY); - maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); - } + counts = bigArrays.resize(counts, overSize); + sums = bigArrays.resize(sums, overSize); + mins = bigArrays.resize(mins, overSize); + maxes = bigArrays.resize(maxes, overSize); + mins.fill(from, overSize, Double.POSITIVE_INFINITY); + maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); + } - values.setDocument(doc); - final int valuesCount = values.count(); + values.setDocument(doc); + final int valuesCount = values.count(); counts.increment(bucket, valuesCount); - double sum = 0; + double sum = 0; double min = mins.get(bucket); double max = maxes.get(bucket); - for (int i = 0; i < valuesCount; i++) { - double value = values.valueAt(i); - sum += value; - min = Math.min(min, value); - max = Math.max(max, value); - } + for (int i = 0; i < valuesCount; i++) { + double value = values.valueAt(i); + sum += value; + min = Math.min(min, value); + max = Math.max(max, value); + } sums.increment(bucket, sum); mins.set(bucket, min); maxes.set(bucket, max); @@ -145,12 +148,12 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { return buildEmptyAggregation(); } return new InternalStats(name, counts.get(bucket), sums.get(bucket), mins.get(bucket), - maxes.get(bucket), formatter, metaData()); + maxes.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, formatter, metaData()); + return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -160,13 +163,15 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new StatsAggegator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new StatsAggegator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new StatsAggegator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new StatsAggegator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 75dc354f874..ae1bc68965d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,10 +57,10 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue DoubleArray maxes; DoubleArray sumOfSqrs; - public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, - @Nullable ValueFormatter formatter, AggregationContext context, - Aggregator parent, double sigma, Map metaData) throws IOException { - super(name, context, parent, metaData); + public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, + AggregationContext context, Aggregator parent, double sigma, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; this.sigma = sigma; @@ -167,16 +169,19 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) { if (valuesSource == null) { - return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, metaData()); + return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, + reducers(), metaData()); } assert owningBucketOrdinal < counts.size(); return new InternalExtendedStats(name, counts.get(owningBucketOrdinal), sums.get(owningBucketOrdinal), - mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal), sigma, formatter, metaData()); + mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal), sigma, formatter, + reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, metaData()); + return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, reducers(), + metaData()); } @Override @@ -195,13 +200,16 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new ExtendedStatsAggregator(name, null, config.formatter(), aggregationContext, parent, sigma, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, null, config.formatter(), aggregationContext, parent, sigma, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new ExtendedStatsAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, sigma, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, sigma, reducers, + metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 9f88bf4f429..f5d9c7d1983 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -27,9 +27,11 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -67,8 +69,9 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat InternalExtendedStats() {} // for serialization public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs, - double sigma, @Nullable ValueFormatter formatter, Map metaData) { - super(name, count, sum, min, max, formatter, metaData); + double sigma, + @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, count, sum, min, max, formatter, reducers, metaData); this.sumOfSqrs = sumOfSqrs; this.sigma = sigma; } @@ -150,7 +153,8 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat sumOfSqrs += stats.getSumOfSquares(); } final InternalStats stats = super.doReduce(reduceContext); - return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, valueFormatter, getMetaData()); + return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, + valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index f653c082c79..7f98d6cc4e8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -25,10 +25,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,8 +57,8 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i InternalSum() {} // for serialization - InternalSum(String name, double sum, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + InternalSum(String name, double sum, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.sum = sum; this.valueFormatter = formatter; } @@ -81,7 +83,7 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i for (InternalAggregation aggregation : reduceContext.aggregations()) { sum += ((InternalSum) aggregation).sum; } - return new InternalSum(name, sum, valueFormatter, getMetaData()); + return new InternalSum(name, sum, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index ab6b565a62b..af834af7f7b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,6 +37,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray sums; public SumAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -68,19 +71,19 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { sums = bigArrays.grow(sums, bucket + 1); - values.setDocument(doc); - final int valuesCount = values.count(); - double sum = 0; - for (int i = 0; i < valuesCount; i++) { - sum += values.valueAt(i); - } + values.setDocument(doc); + final int valuesCount = values.count(); + double sum = 0; + for (int i = 0; i < valuesCount; i++) { + sum += values.valueAt(i); + } sums.increment(bucket, sum); } }; @@ -96,12 +99,12 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= sums.size()) { return buildEmptyAggregation(); } - return new InternalSum(name, sums.get(bucket), formatter, metaData()); + return new InternalSum(name, sums.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalSum(name, 0.0, formatter, metaData()); + return new InternalSum(name, 0.0, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -111,13 +114,15 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new SumAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new SumAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new SumAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new SumAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java index e841ded7d91..20aeaae2f5a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java @@ -36,10 +36,12 @@ import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -48,6 +50,7 @@ import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -68,8 +71,9 @@ public class TopHitsAggregator extends MetricsAggregator { final SubSearchContext subSearchContext; final LongObjectPagedHashMap topDocsCollectors; - public TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.fetchPhase = fetchPhase; topDocsCollectors = new LongObjectPagedHashMap<>(1, context.bigArrays()); this.subSearchContext = subSearchContext; @@ -82,8 +86,8 @@ public class TopHitsAggregator extends MetricsAggregator { return sort.needsScores() || subSearchContext.trackScores(); } else { // sort by score - return true; - } + return true; + } } @Override @@ -180,8 +184,9 @@ public class TopHitsAggregator extends MetricsAggregator { } @Override - public Aggregator createInternal(AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new TopHitsAggregator(fetchPhase, subSearchContext, name, aggregationContext, parent, metaData); + public Aggregator createInternal(AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new TopHitsAggregator(fetchPhase, subSearchContext, name, aggregationContext, parent, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java index b8b675c2eee..935eb5e1933 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java @@ -25,9 +25,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -54,8 +56,9 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single InternalValueCount() {} // for serialization - public InternalValueCount(String name, long value, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalValueCount(String name, long value, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.value = value; this.valueFormatter = formatter; } @@ -81,7 +84,7 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single for (InternalAggregation aggregation : reduceContext.aggregations()) { valueCount += ((InternalValueCount) aggregation).value; } - return new InternalValueCount(name, valueCount, valueFormatter, getMetaData()); + return new InternalValueCount(name, valueCount, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java index a74ec061b8e..2bd7b505135 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,6 +37,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { LongArray counts; public ValueCountAggregator(String name, ValuesSource valuesSource, @Nullable ValueFormatter formatter, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -67,17 +70,17 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); values.setDocument(doc); counts.increment(bucket, values.count()); - } + } }; } @@ -92,12 +95,12 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= counts.size()) { return buildEmptyAggregation(); } - return new InternalValueCount(name, counts.get(bucket), formatter, metaData()); + return new InternalValueCount(name, counts.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalValueCount(name, 0l, formatter, metaData()); + return new InternalValueCount(name, 0l, formatter, reducers(), metaData()); } @Override @@ -112,13 +115,15 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new ValueCountAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new ValueCountAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new ValueCountAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, + protected Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new ValueCountAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java new file mode 100644 index 00000000000..c74f6f0b0f2 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; + +public abstract class Reducer { + + /** + * Parses the reducer request and creates the appropriate reducer factory + * for it. + * + * @see {@link ReducerFactory} + */ + public static interface Parser { + + /** + * @return The reducer type this parser is associated with. + */ + String type(); + + /** + * Returns the reducer factory with which this parser is associated. + * + * @param reducerName + * The name of the reducer + * @param parser + * The xcontent parser + * @param context + * The search context + * @return The resolved reducer factory + * @throws java.io.IOException + * When parsing fails + */ + ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException; + + } + + public abstract InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext); + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java new file mode 100644 index 00000000000..64e7d1c7baf --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.support.AggregationContext; + +import java.io.IOException; +import java.util.Map; + +/** + * A factory that knows how to create an {@link Aggregator} of a specific type. + */ +public abstract class ReducerFactory implements Streamable { + + protected String name; + protected String type; + protected Map metaData; + + /** + * Constructs a new reducer factory. + * + * @param name + * The aggregation name + * @param type + * The aggregation type + */ + public ReducerFactory(String name, String type) { + this.name = name; + this.type = type; + } + + /** + * Validates the state of this factory (makes sure the factory is properly configured) + */ + public final void validate() { + doValidate(); + } + + protected abstract Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + Map metaData) throws IOException; + + /** + * Creates the reducer + * + * @param context + * The aggregation context + * @param parent + * The parent aggregator (if this is a top level factory, the + * parent will be {@code null}) + * @param collectsFromSingleBucket + * If true then the created aggregator will only be collected + * with 0 as a bucket ordinal. Some factories can take + * advantage of this in order to return more optimized + * implementations. + * + * @return The created aggregator + */ + public final Reducer create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { + Reducer aggregator = createInternal(context, parent, collectsFromSingleBucket, this.metaData); + return aggregator; + } + + public void doValidate() { + } + + public void setMetaData(Map metaData) { + this.metaData = metaData; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d88f95642c3..dbefc2e2612 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -18,10 +18,16 @@ */ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.AggregationInitializationException; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormat; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,12 +55,13 @@ public abstract class ValuesSourceAggregatorFactory ext } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (config.unmapped()) { - return createUnmapped(context, parent, metaData); + return createUnmapped(context, parent, reducers, metaData); } VS vs = context.valuesSource(config); - return doCreateInternal(vs, context, parent, collectsFromSingleBucket, metaData); + return doCreateInternal(vs, context, parent, collectsFromSingleBucket, reducers, metaData); } @Override @@ -64,9 +71,11 @@ public abstract class ValuesSourceAggregatorFactory ext } } - protected abstract Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException; + protected abstract Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException; - protected abstract Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException; + protected abstract Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException; private void resolveValuesSourceConfigFromAncestors(String aggName, AggregatorFactory parent, Class requiredValuesSourceType) { ValuesSourceConfig config; diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index ec3d17b9294..bdfec315402 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -30,7 +30,16 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.*; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchTestCase; @@ -41,11 +50,16 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * @@ -96,13 +110,15 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { if (randomBoolean()) { BytesRef term = new BytesRef("123.0"); buckets.add(new SignificantLongTerms.Bucket(1, 2, 3, 4, 123, InternalAggregations.EMPTY, null)); - sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets, null); + sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets, + (List) Collections.EMPTY_LIST, null); sTerms[1] = new SignificantLongTerms(); } else { BytesRef term = new BytesRef("someterm"); buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY)); - sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, null); + sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, (List) Collections.EMPTY_LIST, + null); sTerms[1] = new SignificantStringTerms(); } return sTerms; From ae76239b0aefe65991e50572c6d0b0039f2c1c0d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 08:41:21 +0000 Subject: [PATCH 003/236] AggregatorFactories now stores reducers as well as aggregators These reducers will be passed through from the AggregatorParser --- .../aggregations/AggregatorFactories.java | 20 ++++++++++++++++--- .../aggregations/AggregatorFactory.java | 8 +------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 10ea7f74c2c..795d9b5724c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -35,13 +36,19 @@ public class AggregatorFactories { public static final AggregatorFactories EMPTY = new Empty(); private AggregatorFactory[] factories; + private List reducers; public static Builder builder() { return new Builder(); } - private AggregatorFactories(AggregatorFactory[] factories) { + private AggregatorFactories(AggregatorFactory[] factories, List reducers) { this.factories = factories; + this.reducers = reducers; + } + + public List reducers() { + return reducers; } private static Aggregator createAndRegisterContextAware(AggregationContext context, AggregatorFactory factory, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { @@ -100,9 +107,10 @@ public class AggregatorFactories { private static final AggregatorFactory[] EMPTY_FACTORIES = new AggregatorFactory[0]; private static final Aggregator[] EMPTY_AGGREGATORS = new Aggregator[0]; + private static final List EMPTY_REDUCERS = new ArrayList<>(); private Empty() { - super(EMPTY_FACTORIES); + super(EMPTY_FACTORIES, EMPTY_REDUCERS); } @Override @@ -121,6 +129,7 @@ public class AggregatorFactories { private final Set names = new HashSet<>(); private final List factories = new ArrayList<>(); + private List reducers = new ArrayList<>(); public Builder add(AggregatorFactory factory) { if (!names.add(factory.name)) { @@ -130,11 +139,16 @@ public class AggregatorFactories { return this; } + public Builder setReducers(List reducers) { + this.reducers = reducers; + return this; + } + public AggregatorFactories build() { if (factories.isEmpty()) { return EMPTY; } - return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()])); + return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), this.reducers); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index f49a328fd16..3db9e5ddd69 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -41,7 +40,6 @@ public abstract class AggregatorFactory { protected String type; protected AggregatorFactory parent; protected AggregatorFactories factories = AggregatorFactories.EMPTY; - protected List reducers = Collections.emptyList(); protected Map metaData; /** @@ -97,7 +95,7 @@ public abstract class AggregatorFactory { * @return The created aggregator */ public final Aggregator create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - return createInternal(context, parent, collectsFromSingleBucket, this.reducers, this.metaData); + return createInternal(context, parent, collectsFromSingleBucket, this.factories.reducers(), this.metaData); } public void doValidate() { @@ -108,10 +106,6 @@ public abstract class AggregatorFactory { } - public void setReducers(List reducers) { - this.reducers = reducers; - } - /** * Utility method. Given an {@link AggregatorFactory} that creates {@link Aggregator}s that only know how From 1e947c8d1750498725d73b27aa87c65a768c83c0 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 10:51:32 +0000 Subject: [PATCH 004/236] Reducers are now parsed in AggregatorParsers --- .../index/query/CommonTermsQueryBuilder.java | 2 +- .../aggregations/AggregationModule.java | 75 ++++++++------- .../aggregations/AggregatorFactories.java | 26 ++++-- .../aggregations/AggregatorFactory.java | 2 +- .../aggregations/AggregatorParsers.java | 91 +++++++++++++------ .../bucket/nested/NestedAggregatorTest.java | 2 +- 6 files changed, 125 insertions(+), 73 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 9775b3f04d8..e57dd0e0b4e 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -27,7 +27,7 @@ import java.io.IOException; /** * CommonTermsQuery query is a query that executes high-frequency terms in a * optional sub-query to prevent slow queries due to "common" terms like - * stopwords. This query basically builds 2 queries off the {@link #add(Term) + * stopwords. This query basically builds 2 queries off the {@link #addAggregator(Term) * added} terms where low-frequency terms are added to a required boolean clause * and high-frequency terms are added to an optional boolean clause. The * optional clause is only executed if the required "low-frequency' clause diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index 2feaf112104..3910f096246 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; + import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.SpawnModules; @@ -54,6 +55,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStat import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.List; @@ -62,39 +64,40 @@ import java.util.List; */ public class AggregationModule extends AbstractModule implements SpawnModules{ - private List> parsers = Lists.newArrayList(); + private List> aggParsers = Lists.newArrayList(); + private List> reducerParsers = Lists.newArrayList(); public AggregationModule() { - parsers.add(AvgParser.class); - parsers.add(SumParser.class); - parsers.add(MinParser.class); - parsers.add(MaxParser.class); - parsers.add(StatsParser.class); - parsers.add(ExtendedStatsParser.class); - parsers.add(ValueCountParser.class); - parsers.add(PercentilesParser.class); - parsers.add(PercentileRanksParser.class); - parsers.add(CardinalityParser.class); + aggParsers.add(AvgParser.class); + aggParsers.add(SumParser.class); + aggParsers.add(MinParser.class); + aggParsers.add(MaxParser.class); + aggParsers.add(StatsParser.class); + aggParsers.add(ExtendedStatsParser.class); + aggParsers.add(ValueCountParser.class); + aggParsers.add(PercentilesParser.class); + aggParsers.add(PercentileRanksParser.class); + aggParsers.add(CardinalityParser.class); - parsers.add(GlobalParser.class); - parsers.add(MissingParser.class); - parsers.add(FilterParser.class); - parsers.add(FiltersParser.class); - parsers.add(TermsParser.class); - parsers.add(SignificantTermsParser.class); - parsers.add(RangeParser.class); - parsers.add(DateRangeParser.class); - parsers.add(IpRangeParser.class); - parsers.add(HistogramParser.class); - parsers.add(DateHistogramParser.class); - parsers.add(GeoDistanceParser.class); - parsers.add(GeoHashGridParser.class); - parsers.add(NestedParser.class); - parsers.add(ReverseNestedParser.class); - parsers.add(TopHitsParser.class); - parsers.add(GeoBoundsParser.class); - parsers.add(ScriptedMetricParser.class); - parsers.add(ChildrenParser.class); + aggParsers.add(GlobalParser.class); + aggParsers.add(MissingParser.class); + aggParsers.add(FilterParser.class); + aggParsers.add(FiltersParser.class); + aggParsers.add(TermsParser.class); + aggParsers.add(SignificantTermsParser.class); + aggParsers.add(RangeParser.class); + aggParsers.add(DateRangeParser.class); + aggParsers.add(IpRangeParser.class); + aggParsers.add(HistogramParser.class); + aggParsers.add(DateHistogramParser.class); + aggParsers.add(GeoDistanceParser.class); + aggParsers.add(GeoHashGridParser.class); + aggParsers.add(NestedParser.class); + aggParsers.add(ReverseNestedParser.class); + aggParsers.add(TopHitsParser.class); + aggParsers.add(GeoBoundsParser.class); + aggParsers.add(ScriptedMetricParser.class); + aggParsers.add(ChildrenParser.class); } /** @@ -103,14 +106,18 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ * @param parser The parser for the custom aggregator. */ public void addAggregatorParser(Class parser) { - parsers.add(parser); + aggParsers.add(parser); } @Override protected void configure() { - Multibinder multibinder = Multibinder.newSetBinder(binder(), Aggregator.Parser.class); - for (Class parser : parsers) { - multibinder.addBinding().to(parser); + Multibinder multibinderAggParser = Multibinder.newSetBinder(binder(), Aggregator.Parser.class); + for (Class parser : aggParsers) { + multibinderAggParser.addBinding().to(parser); + } + Multibinder multibinderReducerParser = Multibinder.newSetBinder(binder(), Reducer.Parser.class); + for (Class parser : reducerParsers) { + multibinderReducerParser.addBinding().to(parser); } bind(AggregatorParsers.class).asEagerSingleton(); bind(AggregationParseElement.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 795d9b5724c..5103f9c2b7a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -36,18 +37,22 @@ public class AggregatorFactories { public static final AggregatorFactories EMPTY = new Empty(); private AggregatorFactory[] factories; - private List reducers; + private List reducerFactories; public static Builder builder() { return new Builder(); } - private AggregatorFactories(AggregatorFactory[] factories, List reducers) { + private AggregatorFactories(AggregatorFactory[] factories, List reducers) { this.factories = factories; - this.reducers = reducers; + this.reducerFactories = reducers; } - public List reducers() { + public List createReducers() throws IOException { + List reducers = new ArrayList<>(); + for (ReducerFactory factory : this.reducerFactories) { + reducers.add(factory.create(null, null, false)); // NOCOMIT add context, parent etc. + } return reducers; } @@ -107,7 +112,7 @@ public class AggregatorFactories { private static final AggregatorFactory[] EMPTY_FACTORIES = new AggregatorFactory[0]; private static final Aggregator[] EMPTY_AGGREGATORS = new Aggregator[0]; - private static final List EMPTY_REDUCERS = new ArrayList<>(); + private static final List EMPTY_REDUCERS = new ArrayList<>(); private Empty() { super(EMPTY_FACTORIES, EMPTY_REDUCERS); @@ -129,9 +134,9 @@ public class AggregatorFactories { private final Set names = new HashSet<>(); private final List factories = new ArrayList<>(); - private List reducers = new ArrayList<>(); + private final List reducerFactories = new ArrayList<>(); - public Builder add(AggregatorFactory factory) { + public Builder addAggregator(AggregatorFactory factory) { if (!names.add(factory.name)) { throw new ElasticsearchIllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); } @@ -139,8 +144,8 @@ public class AggregatorFactories { return this; } - public Builder setReducers(List reducers) { - this.reducers = reducers; + public Builder addReducer(ReducerFactory reducerFactory) { + this.reducerFactories.add(reducerFactory); return this; } @@ -148,7 +153,8 @@ public class AggregatorFactories { if (factories.isEmpty()) { return EMPTY; } - return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), this.reducers); + // NOCOMMIT work out dependency order of reducer factories + return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), this.reducerFactories); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 3db9e5ddd69..d22fed75a8c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -95,7 +95,7 @@ public abstract class AggregatorFactory { * @return The created aggregator */ public final Aggregator create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - return createInternal(context, parent, collectsFromSingleBucket, this.factories.reducers(), this.metaData); + return createInternal(context, parent, collectsFromSingleBucket, this.factories.createReducers(), this.metaData); } public void doValidate() { diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index b55f6a4f022..e23cf8ef228 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -19,10 +19,13 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -37,21 +40,30 @@ import java.util.regex.Pattern; public class AggregatorParsers { public static final Pattern VALID_AGG_NAME = Pattern.compile("[^\\[\\]>]+"); - private final ImmutableMap parsers; + private final ImmutableMap aggParsers; + private final ImmutableMap reducerParsers; /** * Constructs the AggregatorParsers out of all the given parsers - * - * @param parsers The available aggregator parsers (dynamically injected by the {@link org.elasticsearch.search.aggregations.AggregationModule}). + * + * @param aggParsers + * The available aggregator parsers (dynamically injected by the + * {@link org.elasticsearch.search.aggregations.AggregationModule} + * ). */ @Inject - public AggregatorParsers(Set parsers) { - MapBuilder builder = MapBuilder.newMapBuilder(); - for (Aggregator.Parser parser : parsers) { - builder.put(parser.type(), parser); + public AggregatorParsers(Set aggParsers, Set reducerParsers) { + MapBuilder aggParsersBuilder = MapBuilder.newMapBuilder(); + for (Aggregator.Parser parser : aggParsers) { + aggParsersBuilder.put(parser.type(), parser); } - this.parsers = builder.immutableMap(); + this.aggParsers = aggParsersBuilder.immutableMap(); + MapBuilder reducerParsersBuilder = MapBuilder.newMapBuilder(); + for (Reducer.Parser parser : reducerParsers) { + reducerParsersBuilder.put(parser.type(), parser); + } + this.reducerParsers = reducerParsersBuilder.immutableMap(); } /** @@ -61,7 +73,18 @@ public class AggregatorParsers { * @return The parser associated with the given aggregation type. */ public Aggregator.Parser parser(String type) { - return parsers.get(type); + return aggParsers.get(type); + } + + /** + * Returns the parser that is registered under the given reducer type. + * + * @param type + * The reducer type + * @return The parser associated with the given reducer type. + */ + public Reducer.Parser reducer(String type) { + return reducerParsers.get(type); } /** @@ -98,7 +121,8 @@ public class AggregatorParsers { throw new SearchParseException(context, "Aggregation definition for [" + aggregationName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } - AggregatorFactory factory = null; + AggregatorFactory aggFactory = null; + ReducerFactory reducerFactory = null; AggregatorFactories subFactories = null; Map metaData = null; @@ -126,34 +150,49 @@ public class AggregatorParsers { subFactories = parseAggregators(parser, context, level+1); break; default: - if (factory != null) { - throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + factory.type + "] and [" + fieldName + "]"); + if (aggFactory != null) { + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + + aggFactory.type + "] and [" + fieldName + "]"); } Aggregator.Parser aggregatorParser = parser(fieldName); if (aggregatorParser == null) { - throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + "]"); + Reducer.Parser reducerParser = reducer(fieldName); + if (reducerParser == null) { + throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + + aggregationName + "]"); + } else { + reducerFactory = reducerParser.parse(aggregationName, parser, context); } - factory = aggregatorParser.parse(aggregationName, parser, context); + } else { + aggFactory = aggregatorParser.parse(aggregationName, parser, context); + } } } - if (factory == null) { + if (aggFactory == null && reducerFactory == null) { throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]"); - } + } else if (aggFactory != null) { + if (metaData != null) { + aggFactory.setMetaData(metaData); + } - if (metaData != null) { - factory.setMetaData(metaData); - } + if (subFactories != null) { + aggFactory.subFactories(subFactories); + } - if (subFactories != null) { - factory.subFactories(subFactories); - } + if (level == 0) { + aggFactory.validate(); + } - if (level == 0) { - factory.validate(); + factories.addAggregator(aggFactory); + } else if (reducerFactory != null) { + if (subFactories != null) { + throw new SearchParseException(context, "Aggregation [" + aggregationName + "] cannot define sub-aggregations"); + } + factories.addReducer(reducerFactory); + } else { + throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]"); } - - factories.add(factory); } return factories.build(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java index 7cdff38d7c8..2f9ffafac53 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java @@ -120,7 +120,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeLuceneTestCase AggregationContext context = new AggregationContext(searchContext); AggregatorFactories.Builder builder = AggregatorFactories.builder(); - builder.add(new NestedAggregator.Factory("test", "nested_field", FilterCachingPolicy.ALWAYS_CACHE)); + builder.addAggregator(new NestedAggregator.Factory("test", "nested_field", FilterCachingPolicy.ALWAYS_CACHE)); AggregatorFactories factories = builder.build(); searchContext.aggregations(new SearchContextAggregations(factories)); Aggregator[] aggs = factories.createTopLevelAggregators(context); From 55b82db34638fa15470da2bbf71b4e98861d1203 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 12:32:54 +0000 Subject: [PATCH 005/236] Reducers are now wired end-to-end into the agg framework --- .../aggregations/AggregationModule.java | 2 + .../aggregations/InternalAggregation.java | 20 ++++ .../aggregations/InternalAggregations.java | 3 +- .../bucket/histogram/InternalHistogram.java | 12 +- .../significant/UnmappedSignificantTerms.java | 2 +- .../bucket/terms/UnmappedTerms.java | 2 +- .../metrics/tophits/InternalTopHits.java | 20 ++-- .../metrics/tophits/TopHitsAggregator.java | 6 +- .../reducers/InternalSimpleValue.java | 103 ++++++++++++++++++ .../search/aggregations/reducers/Reducer.java | 45 +++++++- .../aggregations/reducers/ReducerFactory.java | 3 +- .../aggregations/reducers/ReducerStreams.java | 68 ++++++++++++ .../aggregations/reducers/SimpleValue.java | 26 +++++ 13 files changed, 294 insertions(+), 18 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index 3910f096246..cb4bef6ca34 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -98,6 +98,8 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ aggParsers.add(GeoBoundsParser.class); aggParsers.add(ScriptedMetricParser.class); aggParsers.add(ChildrenParser.class); + + // NOCOMMIT reducerParsers.add(FooParser.class); } /** diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 828a1a7ee0f..fb621ea5103 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.search.aggregations; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; @@ -209,6 +213,11 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeGenericValue(metaData); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } doWriteTo(out); } @@ -217,6 +226,17 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St public final void readFrom(StreamInput in) throws IOException { name = in.readString(); metaData = in.readMap(); + int size = in.readVInt(); + if (size == 0) { + reducers = ImmutableList.of(); + } else { + reducers = Lists.newArrayListWithCapacity(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add(reducer); + } + } doReadFrom(in); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index ec4625e2387..c41e8a4ff77 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -165,7 +165,8 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl for (Map.Entry> entry : aggByName.entrySet()) { List aggregations = entry.getValue(); InternalAggregation first = aggregations.get(0); // the list can't be empty as it's created on demand - reducedAggregations.add(first.doReduce(new InternalAggregation.ReduceContext(aggregations, context.bigArrays(), context.scriptService()))); + reducedAggregations.add(first.reduce(new InternalAggregation.ReduceContext(aggregations, context.bigArrays(), context + .scriptService()))); } return new InternalAggregations(reducedAggregations); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index a33cdb49b3c..5c945afddf0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -186,6 +186,14 @@ public class InternalHistogram extends Inter out.writeVLong(docCount); aggregations.writeTo(out); } + + public ValueFormatter getFormatter() { + return formatter; + } + + public boolean getKeyed() { + return keyed; + } } static class EmptyBucketInfo { @@ -224,7 +232,7 @@ public class InternalHistogram extends Inter } - static class Factory { + public static class Factory { protected Factory() { } @@ -283,7 +291,7 @@ public class InternalHistogram extends Inter return buckets; } - protected Factory getFactory() { + public Factory getFactory() { return factory; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index f382237dacf..04099009272 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -71,7 +71,7 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { public InternalAggregation doReduce(ReduceContext reduceContext) { for (InternalAggregation aggregation : reduceContext.aggregations()) { if (!(aggregation instanceof UnmappedSignificantTerms)) { - return aggregation.doReduce(reduceContext); + return aggregation.reduce(reduceContext); } } return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 82c850bcac7..89134a394ec 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -86,7 +86,7 @@ public class UnmappedTerms extends InternalTerms { public InternalAggregation doReduce(ReduceContext reduceContext) { for (InternalAggregation agg : reduceContext.aggregations()) { if (!(agg instanceof UnmappedTerms)) { - return agg.doReduce(reduceContext); + return agg.reduce(reduceContext); } } return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index 8c5eafa2961..b3e4c5cf4c9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -18,9 +18,6 @@ */ package org.elasticsearch.search.aggregations.metrics.tophits; -import java.io.IOException; -import java.util.List; - import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; @@ -35,9 +32,14 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; +import java.io.IOException; +import java.util.List; +import java.util.Map; + /** */ public class InternalTopHits extends InternalMetricsAggregation implements TopHits { @@ -65,16 +67,17 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi InternalTopHits() { } - public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits) { - this.name = name; + public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits, List reducers, + Map metaData) { + super(name, reducers, metaData); this.from = from; this.size = size; this.topDocs = topDocs; this.searchHits = searchHits; } - public InternalTopHits(String name, InternalSearchHits searchHits) { - this.name = name; + public InternalTopHits(String name, InternalSearchHits searchHits, List reducers, Map metaData) { + super(name, reducers, metaData); this.searchHits = searchHits; this.topDocs = Lucene.EMPTY_TOP_DOCS; } @@ -123,7 +126,8 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); hits[i] = (InternalSearchHit) shardHits[scoreDoc.shardIndex].getAt(position); } - return new InternalTopHits(name, new InternalSearchHits(hits, reducedTopDocs.totalHits, reducedTopDocs.getMaxScore())); + return new InternalTopHits(name, new InternalSearchHits(hits, reducedTopDocs.totalHits, reducedTopDocs.getMaxScore()), + reducers(), getMetaData()); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java index 20aeaae2f5a..6abf1917c17 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java @@ -158,13 +158,15 @@ public class TopHitsAggregator extends MetricsAggregator { searchHitFields.sortValues(fieldDoc.fields); } } - return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits()); + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), reducers(), + metaData()); } } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), Lucene.EMPTY_TOP_DOCS, InternalSearchHits.empty()); + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), Lucene.EMPTY_TOP_DOCS, + InternalSearchHits.empty(), reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java new file mode 100644 index 00000000000..7d204c007c6 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationStreams; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class InternalSimpleValue extends InternalNumericMetricsAggregation.SingleValue implements SimpleValue { + + public final static Type TYPE = new Type("simple_value"); + + public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { + @Override + public InternalSimpleValue readResult(StreamInput in) throws IOException { + InternalSimpleValue result = new InternalSimpleValue(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + AggregationStreams.registerStream(STREAM, TYPE.stream()); + } + + private double value; + + InternalSimpleValue() {} // for serialization + + public InternalSimpleValue(String name, double value, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); + this.valueFormatter = formatter; + this.value = value; + } + + @Override + public double value() { + return value; + } + + public double getValue() { + return value; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalMax doReduce(ReduceContext reduceContext) { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + protected void doReadFrom(StreamInput in) throws IOException { + valueFormatter = ValueFormatterStreams.readOptional(in); + value = in.readDouble(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(valueFormatter, out); + out.writeDouble(value); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + boolean hasValue = !Double.isInfinite(value); + builder.field(CommonFields.VALUE, hasValue ? value : null); + if (hasValue && valueFormatter != null) { + builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(value)); + } + return builder; + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index c74f6f0b0f2..d87d9fa72e1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -19,14 +19,19 @@ package org.elasticsearch.search.aggregations.reducers; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Map; -public abstract class Reducer { +public abstract class Reducer implements Streamable { /** * Parses the reducer request and creates the appropriate reducer factory @@ -58,6 +63,44 @@ public abstract class Reducer { } + protected String name; + protected Map metaData; + + protected Reducer() { // for Serialisation + } + + protected Reducer(String name, Map metaData) { + this.name = name; + this.metaData = metaData; + } + + public String name() { + return name; + } + + public Map metaData() { + return metaData; + } + + public abstract Type type(); + public abstract InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext); + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeMap(metaData); + doWriteTo(out); + } + + protected abstract void doWriteTo(StreamOutput out) throws IOException; + + @Override + public final void readFrom(StreamInput in) throws IOException { + name = in.readString(); + metaData = in.readMap(); + doReadFrom(in); + } + + protected abstract void doReadFrom(StreamInput in) throws IOException; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index 64e7d1c7baf..4249cde2dc3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.reducers; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -28,7 +27,7 @@ import java.util.Map; /** * A factory that knows how to create an {@link Aggregator} of a specific type. */ -public abstract class ReducerFactory implements Streamable { +public abstract class ReducerFactory { protected String name; protected String type; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java new file mode 100644 index 00000000000..7a4319e0a2b --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.reducers; + +import com.google.common.collect.ImmutableMap; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A registry for all the dedicated streams in the aggregation module. This is to support dynamic addAggregation that + * know how to stream themselves. + */ +public class ReducerStreams { + + private static ImmutableMap streams = ImmutableMap.of(); + + /** + * A stream that knows how to read an aggregation from the input. + */ + public static interface Stream { + Reducer readResult(StreamInput in) throws IOException; + } + + /** + * Registers the given stream and associate it with the given types. + * + * @param stream The streams to register + * @param types The types associated with the streams + */ + public static synchronized void registerStream(Stream stream, BytesReference... types) { + MapBuilder uStreams = MapBuilder.newMapBuilder(streams); + for (BytesReference type : types) { + uStreams.put(type, stream); + } + streams = uStreams.immutableMap(); + } + + /** + * Returns the stream that is registered for the given type + * + * @param type The given type + * @return The associated stream + */ + public static Stream stream(BytesReference type) { + return streams.get(type); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java new file mode 100644 index 00000000000..e1c510e1a29 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; + +public interface SimpleValue extends NumericMetricsAggregation.SingleValue { + +} From 9cfa6c6af7141bb662de64e979aeb79d385f4a69 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 13:22:22 +0000 Subject: [PATCH 006/236] Basic derivative reducer --- .../aggregations/AggregationModule.java | 3 +- .../TransportAggregationModule.java | 6 +- .../reducers/derivative/DerivativeParser.java | 69 +++++++++ .../derivative/DerivativeReducer.java | 138 ++++++++++++++++++ 4 files changed, 214 insertions(+), 2 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index cb4bef6ca34..d1cb6d96800 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -56,6 +56,7 @@ import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import java.util.List; @@ -99,7 +100,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ aggParsers.add(ScriptedMetricParser.class); aggParsers.add(ChildrenParser.class); - // NOCOMMIT reducerParsers.add(FooParser.class); + reducerParsers.add(DerivativeParser.class); } /** diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index ce09d1e5c69..c99f885462c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -57,6 +57,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExte import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; /** * A module that registers all the transport streams for the addAggregation @@ -89,7 +90,7 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM SignificantStringTerms.registerStreams(); SignificantLongTerms.registerStreams(); UnmappedSignificantTerms.registerStreams(); - InternalGeoHashGrid.registerStreams(); + InternalGeoHashGrid.registerStreams(); DoubleTerms.registerStreams(); UnmappedTerms.registerStreams(); InternalRange.registerStream(); @@ -102,6 +103,9 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM InternalTopHits.registerStreams(); InternalGeoBounds.registerStream(); InternalChildren.registerStream(); + + // Reducers + DerivativeReducer.registerStreams(); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java new file mode 100644 index 00000000000..0e9b1f7f41f --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; + +public class DerivativeParser implements Reducer.Parser { + + public static final ParseField BUCKETS_PATH = new ParseField("bucketsPath"); + + @Override + public String type() { + return DerivativeReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String bucketsPath = null; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPath = parser.text(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "]."); + } + } + + if (bucketsPath == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]"); + } + + return new DerivativeReducer.Factory(reducerName, bucketsPath); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java new file mode 100644 index 00000000000..d2cfae6784b --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class DerivativeReducer extends Reducer { + + public final static Type TYPE = new Type("derivative"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public DerivativeReducer readResult(StreamInput in) throws IOException { + DerivativeReducer result = new DerivativeReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private String bucketsPath; + private static final Function FUNCTION = new Function() { + @Override + public InternalAggregation apply(Aggregation input) { + return (InternalAggregation) input; + } + }; + + public DerivativeReducer() { + } + + public DerivativeReducer(String name, String bucketsPath, Map metadata) { + super(name, metadata); + this.bucketsPath = bucketsPath; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalHistogram histo = (InternalHistogram) aggregation; + List buckets = histo.getBuckets(); + InternalHistogram.Factory factory = histo.getFactory(); + List newBuckets = new ArrayList<>(); + Double lastBucketValue = null; + for (InternalHistogram.Bucket bucket : buckets) { + double thisBucketValue = (double) bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPath) + .getPathElementsAsStringList()); + if (lastBucketValue != null) { + double diff = thisBucketValue - lastBucketValue; + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); + aggs.add(new InternalSimpleValue(bucketsPath, diff, null, new ArrayList(), metaData())); // NOCOMMIT implement formatter for derivative reducer + InternalHistogram.Bucket newBucket = factory.createBucket(((DateTime) bucket.getKey()).getMillis(), bucket.getDocCount(), + new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter()); // NOCOMMIT fix key resolution for dates + newBuckets.add(newBucket); + } else { + newBuckets.add(bucket); + } + lastBucketValue = thisBucketValue; + } + return factory.create(histo.getName(), newBuckets, null, 1, null, null, false, new ArrayList(), histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + bucketsPath = in.readString(); + + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeString(bucketsPath); + } + + public static class Factory extends ReducerFactory { + + private String bucketsPath; + + public Factory(String name, String field) { + super(name, TYPE.name()); + this.bucketsPath = field; + } + + @Override + protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + Map metaData) throws IOException { + return new DerivativeReducer(name, bucketsPath, metaData); + } + + } +} From d65e9a4a90deba1f749b073c2f303f0ea0f593ef Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 13:52:56 +0000 Subject: [PATCH 007/236] Fixing compile issues after rebase with master Mostly due to @jpountz's leaf collector changes --- .../search/aggregations/AggregatorBase.java | 10 +++++++++- .../search/aggregations/AggregatorFactory.java | 14 +++++++++----- .../aggregations/bucket/BucketsAggregator.java | 3 ++- .../GlobalOrdinalsSignificantTermsAggregator.java | 10 +++++----- .../terms/GlobalOrdinalsStringTermsAggregator.java | 5 +++-- .../bucket/terms/TermsAggregatorFactory.java | 3 +-- 6 files changed, 29 insertions(+), 16 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index e23639352cf..661d975a41f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -45,6 +46,7 @@ public abstract class AggregatorBase extends Aggregator { private Map subAggregatorbyName; private DeferringBucketCollector recordingWrapper; + private final List reducers; /** * Constructs a new Aggregator. @@ -55,8 +57,10 @@ public abstract class AggregatorBase extends Aggregator { * @param parent The parent aggregator (may be {@code null} for top level aggregators) * @param metaData The metaData associated with this aggregator */ - protected AggregatorBase(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, Map metaData) throws IOException { + protected AggregatorBase(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, + List reducers, Map metaData) throws IOException { this.name = name; + this.reducers = reducers; this.metaData = metaData; this.parent = parent; this.context = context; @@ -106,6 +110,10 @@ public abstract class AggregatorBase extends Aggregator { return this.metaData; } + public List reducers() { + return this.reducers; + } + /** * Get a {@link LeafBucketCollector} for the given ctx, which should * delegate to the given collector. diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index d22fed75a8c..41aee8f931f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -112,7 +112,11 @@ public abstract class AggregatorFactory { * to collect bucket 0, this returns an aggregator that can collect any bucket. */ protected static Aggregator asMultiBucketAggregator(final AggregatorFactory factory, final AggregationContext context, final Aggregator parent) throws IOException { - final Aggregator first = factory.create(context, parent, truegator> aggregators; + final Aggregator first = factory.create(context, parent, true); + final BigArrays bigArrays = context.bigArrays(); + return new Aggregator() { + + ObjectArray aggregators; ObjectArray collectors; { @@ -188,9 +192,9 @@ public abstract class AggregatorFactory { LeafBucketCollector collector = collectors.get(bucket); if (collector == null) { Aggregator aggregator = aggregators.get(bucket); - if (aggregator == null) { - aggregator = factory.create(context, parent, true); - aggregator.preCollection(); + if (aggregator == null) { + aggregator = factory.create(context, parent, true); + aggregator.preCollection(); aggregators.set(bucket, aggregator); } collector = aggregator.getLeafCollector(ctx); @@ -198,7 +202,7 @@ public abstract class AggregatorFactory { collectors.set(bucket, collector); } collector.collect(doc, 0); - } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index b7c8fe7ccfc..93fa360b113 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.IntArray; -import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -47,6 +47,7 @@ public abstract class BucketsAggregator extends AggregatorBase { AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, context, parent, reducers, metaData); + bigArrays = context.bigArrays(); docCounts = bigArrays.newIntArray(1, true); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index c7e260faf63..0d08c6d5efe 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.reducers.Reducer; @@ -48,12 +48,12 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri protected long numCollectedDocs; protected final SignificantTermsAggregatorFactory termsAggFactory; - public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, - BucketCountThresholds bucketCountThresholds, - IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, + public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, + ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, + IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { - super(name, factories, valuesSource, maxOrd, null, bucketCountThresholds, includeExclude, aggregationContext, parent, + super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, reducers, metaData); this.termsAggFactory = termsAggFactory; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index bff09e07e4a..6f538b384d3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -37,10 +37,10 @@ import org.elasticsearch.index.fielddata.AbstractRandomAccessOrds; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalMapping; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; @@ -74,7 +74,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, reducers, metaData); + super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, + metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index a9cb4ea19cb..59fa19366d6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -1,4 +1,4 @@ -List Date: Thu, 12 Feb 2015 14:01:34 +0000 Subject: [PATCH 008/236] fix to the name of the injected aggregation for derivatives --- .../aggregations/reducers/derivative/DerivativeReducer.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index d2cfae6784b..975ad809adf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -96,9 +96,9 @@ public class DerivativeReducer extends Reducer { double diff = thisBucketValue - lastBucketValue; List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); - aggs.add(new InternalSimpleValue(bucketsPath, diff, null, new ArrayList(), metaData())); // NOCOMMIT implement formatter for derivative reducer + aggs.add(new InternalSimpleValue(name(), diff, null, new ArrayList(), metaData())); // NOCOMMIT implement formatter for derivative reducer InternalHistogram.Bucket newBucket = factory.createBucket(((DateTime) bucket.getKey()).getMillis(), bucket.getDocCount(), - new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter()); // NOCOMMIT fix key resolution for dates + new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter()); // NOCOMMIT fix key resolution to deal with numbers and dates newBuckets.add(newBucket); } else { newBuckets.add(bucket); From f00a9b85578cdfe5a40a2677d3af4be0960670b5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 12 Feb 2015 15:50:11 +0100 Subject: [PATCH 009/236] Minor indentation/validation fix in AggregatorParsers. --- .../aggregations/AggregatorParsers.java | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index e23cf8ef228..62caa385585 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -150,28 +150,35 @@ public class AggregatorParsers { subFactories = parseAggregators(parser, context, level+1); break; default: - if (aggFactory != null) { - throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + if (aggFactory != null) { + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + aggFactory.type + "] and [" + fieldName + "]"); } + if (reducerFactory != null) { + // TODO we would need a .type property on reducers too for this error message? + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + + reducerFactory + "] and [" + fieldName + "]"); + } + Aggregator.Parser aggregatorParser = parser(fieldName); if (aggregatorParser == null) { - Reducer.Parser reducerParser = reducer(fieldName); - if (reducerParser == null) { - throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + Reducer.Parser reducerParser = reducer(fieldName); + if (reducerParser == null) { + throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + "]"); + } else { + reducerFactory = reducerParser.parse(aggregationName, parser, context); + } } else { - reducerFactory = reducerParser.parse(aggregationName, parser, context); + aggFactory = aggregatorParser.parse(aggregationName, parser, context); } - } else { - aggFactory = aggregatorParser.parse(aggregationName, parser, context); - } } } if (aggFactory == null && reducerFactory == null) { throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]"); } else if (aggFactory != null) { + assert reducerFactory == null; if (metaData != null) { aggFactory.setMetaData(metaData); } @@ -185,13 +192,13 @@ public class AggregatorParsers { } factories.addAggregator(aggFactory); - } else if (reducerFactory != null) { + } else { + assert reducerFactory != null; if (subFactories != null) { throw new SearchParseException(context, "Aggregation [" + aggregationName + "] cannot define sub-aggregations"); } + // TODO: should we validate here like aggs? factories.addReducer(reducerFactory); - } else { - throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]"); } } From 3a777545de9df45d688171c942c2234660c8a9b7 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 15:36:02 +0000 Subject: [PATCH 010/236] derivative reducer now works with both date_histogram and histogram --- .../bucket/histogram/InternalDateHistogram.java | 12 ++++++++++-- .../bucket/histogram/InternalHistogram.java | 10 ++++++++-- .../search/aggregations/reducers/ReducerFactory.java | 2 +- .../reducers/derivative/DerivativeReducer.java | 7 ++++--- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 9f9ad81c953..0457ad9e92c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; @@ -83,8 +84,15 @@ public class InternalDateHistogram { } @Override - public InternalDateHistogram.Bucket createBucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { - return new Bucket(key, docCount, aggregations, keyed, formatter, this); + public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { + if (key instanceof Number) { + return new Bucket(((Number) key).longValue(), docCount, aggregations, keyed, formatter, this); + } else if (key instanceof DateTime) { + return new Bucket(((DateTime) key).getMillis(), docCount, aggregations, keyed, formatter, this); + } else { + throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + key + "]"); + } } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 5c945afddf0..d5b3a1384f1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -247,8 +248,13 @@ public class InternalHistogram extends Inter return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } - public B createBucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { - return (B) new Bucket(key, docCount, keyed, formatter, this, aggregations); + public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { + if (key instanceof Number) { + return (B) new Bucket(((Number) key).longValue(), docCount, keyed, formatter, this, aggregations); + } else { + throw new AggregationExecutionException("Expected key of type Number but got [" + key + "]"); + } } protected B createEmptyBucket(boolean keyed, @Nullable ValueFormatter formatter) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index 4249cde2dc3..c4c6b304ba8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -49,7 +49,7 @@ public abstract class ReducerFactory { /** * Validates the state of this factory (makes sure the factory is properly configured) */ - public final void validate() { + public final void validate() { // NOCOMMIT hook in validation doValidate(); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 975ad809adf..76d3bd9b3ac 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -37,7 +37,6 @@ import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; -import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -89,6 +88,7 @@ public class DerivativeReducer extends Reducer { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); Double lastBucketValue = null; + // NOCOMMIT this needs to be improved so that the aggs are cloned correctly to ensure aggs are fully immutable. for (InternalHistogram.Bucket bucket : buckets) { double thisBucketValue = (double) bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPath) .getPathElementsAsStringList()); @@ -97,8 +97,9 @@ public class DerivativeReducer extends Reducer { List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); aggs.add(new InternalSimpleValue(name(), diff, null, new ArrayList(), metaData())); // NOCOMMIT implement formatter for derivative reducer - InternalHistogram.Bucket newBucket = factory.createBucket(((DateTime) bucket.getKey()).getMillis(), bucket.getDocCount(), - new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter()); // NOCOMMIT fix key resolution to deal with numbers and dates + InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), + new InternalAggregations( + aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); } else { newBuckets.add(bucket); From 9805b8359b408e5bde4e0507646b09c6c17515e0 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 15:47:06 +0000 Subject: [PATCH 011/236] can now reference single value metrics directly instead of having to add '.value' to the path --- .../reducers/derivative/DerivativeReducer.java | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 76d3bd9b3ac..9a707687cc2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -25,12 +25,14 @@ import com.google.common.collect.Lists; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; @@ -90,8 +92,7 @@ public class DerivativeReducer extends Reducer { Double lastBucketValue = null; // NOCOMMIT this needs to be improved so that the aggs are cloned correctly to ensure aggs are fully immutable. for (InternalHistogram.Bucket bucket : buckets) { - double thisBucketValue = (double) bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPath) - .getPathElementsAsStringList()); + double thisBucketValue = resolveBucketValue(histo, bucket); if (lastBucketValue != null) { double diff = thisBucketValue - lastBucketValue; @@ -109,6 +110,19 @@ public class DerivativeReducer extends Reducer { return factory.create(histo.getName(), newBuckets, null, 1, null, null, false, new ArrayList(), histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo } + private double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { + Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPath) + .getPathElementsAsStringList()); + if (propertyValue instanceof Number) { + return ((Number) propertyValue).doubleValue(); + } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { + return ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); + } else { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + "must reference either a number value or a single value numeric metric aggregation"); + } + } + @Override public void doReadFrom(StreamInput in) throws IOException { bucketsPath = in.readString(); From 0f22d7e65ea7eb37953d71bc6eee9b5ff2323db7 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 12 Feb 2015 17:13:59 +0000 Subject: [PATCH 012/236] Can now specify a format for the returned derivative values --- .../reducers/derivative/DerivativeParser.java | 13 +++++++++- .../derivative/DerivativeReducer.java | 24 ++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index 0e9b1f7f41f..55259102dfd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -31,6 +33,7 @@ import java.io.IOException; public class DerivativeParser implements Reducer.Parser { public static final ParseField BUCKETS_PATH = new ParseField("bucketsPath"); + public static final ParseField FORMAT = new ParseField("format"); @Override public String type() { @@ -42,6 +45,7 @@ public class DerivativeParser implements Reducer.Parser { XContentParser.Token token; String currentFieldName = null; String bucketsPath = null; + String format = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -49,6 +53,8 @@ public class DerivativeParser implements Reducer.Parser { } else if (token == XContentParser.Token.VALUE_STRING) { if (BUCKETS_PATH.match(currentFieldName)) { bucketsPath = parser.text(); + } else if (FORMAT.match(currentFieldName)) { + format = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -63,7 +69,12 @@ public class DerivativeParser implements Reducer.Parser { + "] for derivative aggregation [" + reducerName + "]"); } - return new DerivativeReducer.Factory(reducerName, bucketsPath); + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new DerivativeReducer.Factory(reducerName, bucketsPath, formatter); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 9a707687cc2..26f40b2824d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import com.google.common.base.Function; import com.google.common.collect.Lists; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.Aggregation; @@ -39,6 +40,7 @@ import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.ArrayList; @@ -62,7 +64,6 @@ public class DerivativeReducer extends Reducer { ReducerStreams.registerStream(STREAM, TYPE.stream()); } - private String bucketsPath; private static final Function FUNCTION = new Function() { @Override public InternalAggregation apply(Aggregation input) { @@ -70,12 +71,16 @@ public class DerivativeReducer extends Reducer { } }; + private ValueFormatter formatter; + private String bucketsPath; + public DerivativeReducer() { } - public DerivativeReducer(String name, String bucketsPath, Map metadata) { + public DerivativeReducer(String name, String bucketsPath, @Nullable ValueFormatter formatter, Map metadata) { super(name, metadata); this.bucketsPath = bucketsPath; + this.formatter = formatter; } @Override @@ -97,9 +102,8 @@ public class DerivativeReducer extends Reducer { double diff = thisBucketValue - lastBucketValue; List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); - aggs.add(new InternalSimpleValue(name(), diff, null, new ArrayList(), metaData())); // NOCOMMIT implement formatter for derivative reducer - InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), - new InternalAggregations( + aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); } else { @@ -136,17 +140,19 @@ public class DerivativeReducer extends Reducer { public static class Factory extends ReducerFactory { - private String bucketsPath; + private final String bucketsPath; + private final ValueFormatter formatter; - public Factory(String name, String field) { + public Factory(String name, String bucketsPath, @Nullable ValueFormatter formatter) { super(name, TYPE.name()); - this.bucketsPath = field; + this.bucketsPath = bucketsPath; + this.formatter = formatter; } @Override protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPath, metaData); + return new DerivativeReducer(name, bucketsPath, formatter, metaData); } } From 18c2cb64b78a6ba7ea6f9285b91dee675e59489a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 13 Feb 2015 14:33:44 +0000 Subject: [PATCH 013/236] Validation of the reducer factories is now called from within the AggregatorFactories --- .../elasticsearch/search/aggregations/AggregatorFactories.java | 3 +++ .../search/aggregations/reducers/ReducerFactory.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 5103f9c2b7a..a4f68b05efb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -106,6 +106,9 @@ public class AggregatorFactories { for (AggregatorFactory factory : factories) { factory.validate(); } + for (ReducerFactory factory : reducerFactories) { + factory.validate(); + } } private final static class Empty extends AggregatorFactories { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index c4c6b304ba8..4249cde2dc3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -49,7 +49,7 @@ public abstract class ReducerFactory { /** * Validates the state of this factory (makes sure the factory is properly configured) */ - public final void validate() { // NOCOMMIT hook in validation + public final void validate() { doValidate(); } From 9357fc4f95f7f2a8a09a0e7fd8d6b695618e7d12 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 13 Feb 2015 15:43:39 +0000 Subject: [PATCH 014/236] bucketsPath is now in the Reducer class since every Reducer implementation will need it --- .../search/aggregations/reducers/Reducer.java | 14 +++++++--- .../aggregations/reducers/ReducerFactory.java | 4 ++- .../reducers/derivative/DerivativeParser.java | 26 ++++++++++++++----- .../derivative/DerivativeReducer.java | 22 +++++++--------- 4 files changed, 43 insertions(+), 23 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index d87d9fa72e1..cfc0f76622b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -63,14 +63,16 @@ public abstract class Reducer implements Streamable { } - protected String name; - protected Map metaData; + private String name; + private String[] bucketsPaths; + private Map metaData; protected Reducer() { // for Serialisation } - protected Reducer(String name, Map metaData) { + protected Reducer(String name, String[] bucketsPaths, Map metaData) { this.name = name; + this.bucketsPaths = bucketsPaths; this.metaData = metaData; } @@ -78,6 +80,10 @@ public abstract class Reducer implements Streamable { return name; } + public String[] bucketsPaths() { + return bucketsPaths; + } + public Map metaData() { return metaData; } @@ -89,6 +95,7 @@ public abstract class Reducer implements Streamable { @Override public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); + out.writeStringArray(bucketsPaths); out.writeMap(metaData); doWriteTo(out); } @@ -98,6 +105,7 @@ public abstract class Reducer implements Streamable { @Override public final void readFrom(StreamInput in) throws IOException { name = in.readString(); + bucketsPaths = in.readStringArray(); metaData = in.readMap(); doReadFrom(in); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index 4249cde2dc3..f904a564dd2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -31,6 +31,7 @@ public abstract class ReducerFactory { protected String name; protected String type; + protected String[] bucketsPaths; protected Map metaData; /** @@ -41,9 +42,10 @@ public abstract class ReducerFactory { * @param type * The aggregation type */ - public ReducerFactory(String name, String type) { + public ReducerFactory(String name, String type, String[] bucketsPaths) { this.name = name; this.type = type; + this.bucketsPaths = bucketsPaths; } /** diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index 55259102dfd..edb416f875a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -29,6 +29,8 @@ import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; public class DerivativeParser implements Reducer.Parser { @@ -44,17 +46,29 @@ public class DerivativeParser implements Reducer.Parser { public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { XContentParser.Token token; String currentFieldName = null; - String bucketsPath = null; + String[] bucketsPaths = null; String format = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { - if (BUCKETS_PATH.match(currentFieldName)) { - bucketsPath = parser.text(); - } else if (FORMAT.match(currentFieldName)) { + if (FORMAT.match(currentFieldName)) { format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -64,7 +78,7 @@ public class DerivativeParser implements Reducer.Parser { } } - if (bucketsPath == null) { + if (bucketsPaths == null) { throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + "] for derivative aggregation [" + reducerName + "]"); } @@ -74,7 +88,7 @@ public class DerivativeParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - return new DerivativeReducer.Factory(reducerName, bucketsPath, formatter); + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 26f40b2824d..2bd42164c46 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -41,6 +41,7 @@ import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; import java.util.ArrayList; @@ -72,14 +73,12 @@ public class DerivativeReducer extends Reducer { }; private ValueFormatter formatter; - private String bucketsPath; public DerivativeReducer() { } - public DerivativeReducer(String name, String bucketsPath, @Nullable ValueFormatter formatter, Map metadata) { - super(name, metadata); - this.bucketsPath = bucketsPath; + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, Map metadata) { + super(name, bucketsPaths, metadata); this.formatter = formatter; } @@ -115,7 +114,7 @@ public class DerivativeReducer extends Reducer { } private double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { - Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPath) + Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPaths()[0]) .getPathElementsAsStringList()); if (propertyValue instanceof Number) { return ((Number) propertyValue).doubleValue(); @@ -129,30 +128,27 @@ public class DerivativeReducer extends Reducer { @Override public void doReadFrom(StreamInput in) throws IOException { - bucketsPath = in.readString(); - + formatter = ValueFormatterStreams.readOptional(in); } @Override public void doWriteTo(StreamOutput out) throws IOException { - out.writeString(bucketsPath); + ValueFormatterStreams.writeOptional(formatter, out); } public static class Factory extends ReducerFactory { - private final String bucketsPath; private final ValueFormatter formatter; - public Factory(String name, String bucketsPath, @Nullable ValueFormatter formatter) { - super(name, TYPE.name()); - this.bucketsPath = bucketsPath; + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; } @Override protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPath, formatter, metaData); + return new DerivativeReducer(name, bucketsPaths, formatter, metaData); } } From 3ab3ffa98928abab3d05100a55729f82e3f4a572 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 10:42:08 +0000 Subject: [PATCH 015/236] First (rough) pass at dependancy resolution for reducers uses the depth-first algorithm from http://en.wikipedia.org/wiki/Topological_sorting#Algorithms Needs some cleaning up --- .../aggregations/AggregatorFactories.java | 71 ++++++++++++++++++- .../aggregations/reducers/ReducerFactory.java | 8 +++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index a4f68b05efb..ad17c533cc0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -19,14 +19,18 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -156,8 +160,73 @@ public class AggregatorFactories { if (factories.isEmpty()) { return EMPTY; } + List orderedReducers = resolveReducerOrder(this.reducerFactories, this.factories); // NOCOMMIT work out dependency order of reducer factories - return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), this.reducerFactories); + return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), orderedReducers); + } + + /* + * L ← Empty list that will contain the sorted nodes + * while there are unmarked nodes do + * select an unmarked node n + * visit(n) + * function visit(node n) + * if n has a temporary mark then stop (not a DAG) + * if n is not marked (i.e. has not been visited yet) then + * mark n temporarily + * for each node m with an edge from n to m do + * visit(m) + * mark n permanently + * unmark n temporarily + * add n to head of L + */ + private List resolveReducerOrder(List reducerFactories, List aggFactories) { + Map reducerFactoriesMap = new HashMap<>(); + for (ReducerFactory factory : reducerFactories) { + reducerFactoriesMap.put(factory.getName(), factory); + } + Set aggFactoryNames = new HashSet<>(); + for (AggregatorFactory aggFactory : aggFactories) { + aggFactoryNames.add(aggFactory.name); + } + List orderedReducers = new LinkedList<>(); + List unmarkedFactories = new ArrayList(reducerFactories); + Set temporarilyMarked = new HashSet(); + while (!unmarkedFactories.isEmpty()) { + ReducerFactory factory = unmarkedFactories.get(0); + resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, factory); + } + List orderedReducerNames = new ArrayList<>(); + for (ReducerFactory reducerFactory : orderedReducers) { + orderedReducerNames.add(reducerFactory.getName()); + } + System.out.println("ORDERED REDUCERS: " + orderedReducerNames); + return orderedReducers; + } + + private void resolveReducerOrder(Set aggFactoryNames, Map reducerFactoriesMap, + List orderedReducers, List unmarkedFactories, Set temporarilyMarked, + ReducerFactory factory) { + if (temporarilyMarked.contains(factory)) { + throw new ElasticsearchIllegalStateException("Cyclical dependancy found with reducer [" + factory.getName() + "]"); // NOCOMMIT is this the right Exception to throw? + } else if (unmarkedFactories.contains(factory)) { + temporarilyMarked.add(factory); + String[] bucketsPaths = factory.getBucketsPaths(); + for (String bucketsPath : bucketsPaths) { + ReducerFactory matchingFactory = reducerFactoriesMap.get(bucketsPath); + if (aggFactoryNames.contains(bucketsPath)) { + continue; + } else if (matchingFactory != null) { + resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, + matchingFactory); + } else { + throw new ElasticsearchIllegalStateException("No reducer found for path [" + bucketsPath + "]"); // NOCOMMIT is this the right Exception to throw? + } + } + unmarkedFactories.remove(factory); + temporarilyMarked.remove(factory); + orderedReducers.add(factory); + } } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index f904a564dd2..05cb6fbed48 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -86,4 +86,12 @@ public abstract class ReducerFactory { this.metaData = metaData; } + public String getName() { + return name; + } + + public String[] getBucketsPaths() { + return bucketsPaths; + } + } From f20dae85a9bbb0972b30ffdc94a34576ce039102 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 10:51:31 +0000 Subject: [PATCH 016/236] getProperty method in the aggregations framework now throws a specific exception --- .../InternalMultiBucketAggregation.java | 8 ++--- .../InvalidAggregationPathException.java | 33 +++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index ebd2637ac56..5efc2180229 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.reducers.Reducer; @@ -58,18 +57,19 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation String aggName = path.get(0); if (aggName.equals("_count")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_count must be the last element in the path"); + throw new InvalidAggregationPathException("_count must be the last element in the path"); } return getDocCount(); } else if (aggName.equals("_key")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_key must be the last element in the path"); + throw new InvalidAggregationPathException("_key must be the last element in the path"); } return getKey(); } InternalAggregation aggregation = aggregations.get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + "]"); + throw new InvalidAggregationPathException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java b/src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java new file mode 100644 index 00000000000..e2ab1f65245 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.ElasticsearchException; + +public class InvalidAggregationPathException extends ElasticsearchException { + + public InvalidAggregationPathException(String msg) { + super(msg); + } + + public InvalidAggregationPathException(String msg, Throwable cause) { + super(msg, cause); + } +} From 58f2ceca12e9bdc40735142ddf8fa6def6f90e4d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 10:52:00 +0000 Subject: [PATCH 017/236] Derivative Reducer now supported nth order derivatives --- .../derivative/DerivativeReducer.java | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 2bd42164c46..a0a3d9cb425 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import com.google.common.base.Function; import com.google.common.collect.Lists; +import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -32,6 +33,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InvalidAggregationPathException; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; @@ -92,12 +94,16 @@ public class DerivativeReducer extends Reducer { InternalHistogram histo = (InternalHistogram) aggregation; List buckets = histo.getBuckets(); InternalHistogram.Factory factory = histo.getFactory(); + List newBuckets = new ArrayList<>(); Double lastBucketValue = null; // NOCOMMIT this needs to be improved so that the aggs are cloned correctly to ensure aggs are fully immutable. for (InternalHistogram.Bucket bucket : buckets) { - double thisBucketValue = resolveBucketValue(histo, bucket); + Double thisBucketValue = resolveBucketValue(histo, bucket); if (lastBucketValue != null) { + if (thisBucketValue == null) { + throw new ElasticsearchIllegalStateException("FOUND GAP IN DATA"); // NOCOMMIT deal with gaps in data + } double diff = thisBucketValue - lastBucketValue; List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); @@ -113,16 +119,20 @@ public class DerivativeReducer extends Reducer { return factory.create(histo.getName(), newBuckets, null, 1, null, null, false, new ArrayList(), histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo } - private double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { - Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPaths()[0]) - .getPathElementsAsStringList()); - if (propertyValue instanceof Number) { - return ((Number) propertyValue).doubleValue(); - } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { - return ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); - } else { - throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() - + "must reference either a number value or a single value numeric metric aggregation"); + private Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { + try { + Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPaths()[0]) + .getPathElementsAsStringList()); + if (propertyValue instanceof Number) { + return ((Number) propertyValue).doubleValue(); + } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { + return ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); + } else { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } + } catch (InvalidAggregationPathException e) { + return null; } } From 247b6a7e13f2d782822519a1a0511659a7f30922 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 10:54:37 +0000 Subject: [PATCH 018/236] removed obselete NOCOMMIT and left over sysout call --- .../elasticsearch/search/aggregations/AggregatorFactories.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index ad17c533cc0..258b90c2653 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -161,7 +161,6 @@ public class AggregatorFactories { return EMPTY; } List orderedReducers = resolveReducerOrder(this.reducerFactories, this.factories); - // NOCOMMIT work out dependency order of reducer factories return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), orderedReducers); } @@ -200,7 +199,6 @@ public class AggregatorFactories { for (ReducerFactory reducerFactory : orderedReducers) { orderedReducerNames.add(reducerFactory.getName()); } - System.out.println("ORDERED REDUCERS: " + orderedReducerNames); return orderedReducers; } From e994044d28b5bba490257a70c3357ee859d1279d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 11:56:41 +0000 Subject: [PATCH 019/236] Added Builder classes for Reducers --- .../search/aggregations/reducers/Reducer.java | 3 + .../aggregations/reducers/ReducerBuilder.java | 95 +++++++++++++++++++ .../reducers/ReducerBuilders.java | 32 +++++++ 3 files changed, 130 insertions(+) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index cfc0f76622b..ed602b31751 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.reducers; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -41,6 +42,8 @@ public abstract class Reducer implements Streamable { */ public static interface Parser { + public static final ParseField BUCKETS_PATH = new ParseField("bucketsPath"); + /** * @return The reducer type this parser is associated with. */ diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java new file mode 100644 index 00000000000..49bba5a0ecb --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * A base class for all reducer builders. + */ +public abstract class ReducerBuilder> implements ToXContent { + + private final String name; + protected final String type; + private List bucketsPaths; + private Map metaData; + + /** + * Sole constructor, typically used by sub-classes. + */ + protected ReducerBuilder(String name, String type) { + this.name = name; + this.type = type; + } + + /** + * Return the name of the reducer that is being built. + */ + public String getName() { + return name; + } + + /** + * Sets the paths to the buckets to use for this reducer + */ + public B setBucketsPaths(List bucketsPaths) { + this.bucketsPaths = bucketsPaths; + return (B) this; + } + + /** + * Sets the meta data to be included in the reducer's response + */ + public B setMetaData(Map metaData) { + this.metaData = metaData; + return (B)this; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getName()); + + if (this.metaData != null) { + builder.field("meta", this.metaData); + } + builder.startObject(type); + + if (bucketsPaths != null) { + builder.startArray(Reducer.Parser.BUCKETS_PATH.getPreferredName()); + for (String path : bucketsPaths) { + builder.value(path); + } + builder.endArray(); + } + + internalXContent(builder, params); + + builder.endObject(); + + return builder.endObject(); + } + + protected abstract XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java new file mode 100644 index 00000000000..21c901af80d --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; + +public final class ReducerBuilders { + + private ReducerBuilders() { + } + + public static final DerivativeBuilder derivative(String name) { + return new DerivativeBuilder(name); + } +} From c97dd84badc08df6bb57fcbd0a1af4470775e319 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 11:57:01 +0000 Subject: [PATCH 020/236] Added Builder for Derivatives Reducer --- .../derivative/DerivativeBuilder.java | 48 +++++++++++++++++++ .../reducers/derivative/DerivativeParser.java | 2 - 2 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java new file mode 100644 index 00000000000..87165c32ac0 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; + +import java.io.IOException; + +public class DerivativeBuilder extends ReducerBuilder { + + private String format; + + public DerivativeBuilder(String name) { + super(name, DerivativeReducer.TYPE.name()); + } + + public DerivativeBuilder format(String format) { + this.format = format; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(DerivativeParser.FORMAT.getPreferredName(), format); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index edb416f875a..8a562050dcb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -33,8 +33,6 @@ import java.util.ArrayList; import java.util.List; public class DerivativeParser implements Reducer.Parser { - - public static final ParseField BUCKETS_PATH = new ParseField("bucketsPath"); public static final ParseField FORMAT = new ParseField("format"); @Override From 511e2758250a6f02de8eef87f3587b650179b662 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 16 Feb 2015 16:32:42 +0000 Subject: [PATCH 021/236] More update to support Reducer Builders --- .../aggregations/AggregationBuilder.java | 23 ++++++++++++++++++- .../TransportAggregationModule.java | 2 ++ .../bucket/histogram/InternalHistogram.java | 4 ++++ .../bucket/histogram/InternalOrder.java | 2 +- .../aggregations/reducers/ReducerBuilder.java | 5 ++-- 5 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 5b9fab55aa4..cc3033e883f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -20,12 +20,14 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.Lists; + import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import java.io.IOException; import java.util.List; @@ -37,6 +39,7 @@ import java.util.Map; public abstract class AggregationBuilder> extends AbstractAggregationBuilder { private List aggregations; + private List> reducers; private BytesReference aggregationsBinary; private Map metaData; @@ -59,6 +62,18 @@ public abstract class AggregationBuilder> extend return (B) this; } + /** + * Add a sub get to this bucket get. + */ + @SuppressWarnings("unchecked") + public B subAggregation(ReducerBuilder reducer) { + if (reducers == null) { + reducers = Lists.newArrayList(); + } + reducers.add(reducer); + return (B) this; + } + /** * Sets a raw (xcontent / json) sub addAggregation. */ @@ -120,7 +135,7 @@ public abstract class AggregationBuilder> extend builder.field(type); internalXContent(builder, params); - if (aggregations != null || aggregationsBinary != null) { + if (aggregations != null || aggregationsBinary != null || reducers != null) { builder.startObject("aggregations"); if (aggregations != null) { @@ -129,6 +144,12 @@ public abstract class AggregationBuilder> extend } } + if (reducers != null) { + for (ReducerBuilder subAgg : reducers) { + subAgg.toXContent(builder, params); + } + } + if (aggregationsBinary != null) { if (XContentFactory.xContentType(aggregationsBinary) == builder.contentType()) { builder.rawField("aggregations", aggregationsBinary); diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index c99f885462c..fe4542830cc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -57,6 +57,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExte import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; /** @@ -103,6 +104,7 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM InternalTopHits.registerStreams(); InternalGeoBounds.registerStream(); InternalChildren.registerStream(); + InternalSimpleValue.registerStreams(); // Reducers DerivativeReducer.registerStreams(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index d5b3a1384f1..4171cc3f514 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -301,6 +301,10 @@ public class InternalHistogram extends Inter return factory; } + public InternalOrder getOrder() { + return order; + } + private static class IteratorAndCurrent { private final Iterator iterator; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java index 9d503a8e90b..10902064786 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java @@ -29,7 +29,7 @@ import java.util.Comparator; /** * An internal {@link Histogram.Order} strategy which is identified by a unique id. */ -class InternalOrder extends Histogram.Order { +public class InternalOrder extends Histogram.Order { final byte id; final String key; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java index 49bba5a0ecb..0f0f9225635 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.List; import java.util.Map; /** @@ -33,7 +32,7 @@ public abstract class ReducerBuilder> implements ToX private final String name; protected final String type; - private List bucketsPaths; + private String[] bucketsPaths; private Map metaData; /** @@ -54,7 +53,7 @@ public abstract class ReducerBuilder> implements ToX /** * Sets the paths to the buckets to use for this reducer */ - public B setBucketsPaths(List bucketsPaths) { + public B setBucketsPaths(String... bucketsPaths) { this.bucketsPaths = bucketsPaths; return (B) this; } From f68bce51f1d7d83e05ddb3fdeb73a91bb9c5c419 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 09:05:24 +0000 Subject: [PATCH 022/236] Tests for derivative reducer Most tests have been marked with @AwaitsFix since they require functionality to be implemented before they will pass --- .../derivative/DerivativeReducer.java | 3 +- .../reducers/DateDerivativeTests.java | 321 ++++++++++ .../reducers/DerivativeTests.java | 568 ++++++++++++++++++ 3 files changed, 891 insertions(+), 1 deletion(-) create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index a0a3d9cb425..730a85a2d41 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -116,7 +116,8 @@ public class DerivativeReducer extends Reducer { } lastBucketValue = thisBucketValue; } - return factory.create(histo.getName(), newBuckets, null, 1, null, null, false, new ArrayList(), histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo + return factory.create(histo.getName(), newBuckets, histo.getOrder(), 1, null, null, false, new ArrayList(), + histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo } private Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java new file mode 100644 index 00000000000..ec131b3a609 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -0,0 +1,321 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import org.junit.After; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +//@AwaitsFix(bugUrl = "Fix factory selection for serialisation of Internal derivative") +public class DateDerivativeTests extends ElasticsearchIntegrationTest { + + private DateTime date(int month, int day) { + return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + } + + private DateTime date(String date) { + return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date); + } + + private static String format(DateTime date, String pattern) { + return DateTimeFormat.forPattern(pattern).print(date); + } + + private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + return client().prepareIndex(idx, "type").setSource( + jsonBuilder().startObject().field("date", date).field("value", value).startArray("dates").value(date) + .value(date.plusMonths(1).plusDays(1)).endArray().endObject()); + } + + private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { + return client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field("value", value).field("date", date(month, day)).startArray("dates") + .value(date(month, day)).value(date(month + 1, day + 1)).endArray().endObject()); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + // TODO: would be nice to have more random data here + prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet(); + List builders = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field("value", i * 2).endObject())); + } + builders.addAll(Arrays.asList(indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 + indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 + indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 + indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 + indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 + indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24 + indexRandom(true, builders); + ensureSearchable(); + } + + @After + public void afterEachTest() throws IOException { + internalCluster().wipeIndices("idx2"); + } + + @AwaitsFix(bugUrl = "waiting for derivative to support _count") + // NOCOMMIT + @Test + public void singleValuedField() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + } + + @Test + public void singleValuedField_WithSubAggregation() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + .subAggregation(derivative("deriv").setBucketsPaths("sum")).subAggregation(sum("sum").field("value"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Object[] propertiesKeys = (Object[]) histo.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) histo.getProperty("_count"); + Object[] propertiesCounts = (Object[]) histo.getProperty("sum.value"); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(1.0)); + SimpleValue deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, nullValue()); + assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((long) propertiesDocCounts[0], equalTo(1l)); + assertThat((double) propertiesCounts[0], equalTo(1.0)); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(5.0)); + deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, notNullValue()); + assertThat(deriv.value(), equalTo(4.0)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0)); + assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((long) propertiesDocCounts[1], equalTo(2l)); + assertThat((double) propertiesCounts[1], equalTo(5.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(15.0)); + deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, notNullValue()); + assertThat(deriv.value(), equalTo(10.0)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0)); + assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((long) propertiesDocCounts[2], equalTo(3l)); + assertThat((double) propertiesCounts[2], equalTo(15.0)); + } + + @AwaitsFix(bugUrl = "waiting for derivative to support _count") + // NOCOMMIT + @Test + public void multiValuedField() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2.0)); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2.0)); + } + + @AwaitsFix(bugUrl = "waiting for derivative to support _count") + // NOCOMMIT + @Test + public void unmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(0)); + } + + @AwaitsFix(bugUrl = "waiting for derivative to support _count") + // NOCOMMIT + @Test + public void partiallyUnmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(2)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1.0)); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1.0)); + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java new file mode 100644 index 00000000000..3b51bbbf6b2 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -0,0 +1,568 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class DerivativeTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + private static final String MULTI_VALUED_FIELD_NAME = "l_values"; + + static int numDocs; + static int interval; + static int numValueBuckets, numValuesBuckets; + static int numFirstDerivValueBuckets, numFirstDerivValuesBuckets; + static long[] valueCounts, valuesCounts; + static long[] firstDerivValueCounts, firstDerivValuesCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + numValueBuckets = numDocs / interval + 1; + valueCounts = new long[numValueBuckets]; + for (int i = 0; i < numDocs; i++) { + final int bucket = (i + 1) / interval; + valueCounts[bucket]++; + } + + numValuesBuckets = (numDocs + 1) / interval + 1; + valuesCounts = new long[numValuesBuckets]; + for (int i = 0; i < numDocs; i++) { + final int bucket1 = (i + 1) / interval; + final int bucket2 = (i + 2) / interval; + valuesCounts[bucket1]++; + if (bucket1 != bucket2) { + valuesCounts[bucket2]++; + } + } + + numFirstDerivValueBuckets = numValueBuckets - 1; + firstDerivValueCounts = new long[numFirstDerivValueBuckets]; + long lastValueCount = -1; + for (int i = 0; i < numValueBuckets; i++) { + long thisValue = valueCounts[i]; + if (lastValueCount != -1) { + long diff = thisValue - lastValueCount; + firstDerivValueCounts[i - 1] = diff; + } + lastValueCount = thisValue; + } + + numFirstDerivValuesBuckets = numValuesBuckets - 1; + firstDerivValuesCounts = new long[numFirstDerivValuesBuckets]; + long lastValuesCount = -1; + for (int i = 0; i < numValuesBuckets; i++) { + long thisValue = valuesCounts[i]; + if (lastValuesCount != -1) { + long diff = thisValue - lastValuesCount; + firstDerivValuesCounts[i - 1] = diff; + } + lastValuesCount = thisValue; + } + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i + 1).startArray(MULTI_VALUED_FIELD_NAME).value(i + 1) + .value(i + 2).endArray().field("tag", "tag" + i).endObject())); + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 0).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 0).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 1).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 2).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 3).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 4).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 5).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 6).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 7).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 8).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 9).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 10).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 11).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 10).endObject())); + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 12).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 10).endObject())); + + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 13).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 11).endObject())); + + indexRandom(true, builders); + ensureSearchable(); + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT + @Test + public void singleValuedField() { + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numFirstDerivValueBuckets)); + + for (int i = 0; i < numFirstDerivValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i])); + } + } + + @Test + public void singleValuedField_WithSubAggregation() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) deriv.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) deriv.getProperty("sum.value"); + + List buckets = new ArrayList<>(deriv.getBuckets()); + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long s = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == i) { + s += j + 1; + } + } + assertThat(sum.getValue(), equalTo((double) s)); + if (i > 0) { + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + assertThat(sumDeriv, notNullValue()); + long s1 = 0; + long s2 = 0; + for (int j = 0; j < numDocs; ++j) { + if ((j + 1) / interval == i - 1) { + s1 += j + 1; + } + if ((j + 1) / interval == i) { + s2 += j + 1; + } + } + long sumDerivValue = s2 - s1; + assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), + equalTo((double) sumDerivValue)); + } + assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); + assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat((double) propertiesSumCounts[i], equalTo((double) s)); + } + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT + @Test + public void multiValuedField() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(deriv.getBuckets().size(), equalTo(numFirstDerivValuesBuckets)); + + for (int i = 0; i < numFirstDerivValuesBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValuesCounts[i])); + } + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT + @Test + public void unmapped() throws Exception { + SearchResponse response = client().prepareSearch("idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(0)); + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT + @Test + public void partiallyUnmapped() throws Exception { + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(deriv.getBuckets().size(), equalTo(numFirstDerivValueBuckets)); + + for (int i = 0; i < numFirstDerivValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i])); + } + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count and gaps") // NOCOMMIT + @Test + public void singleValuedFieldWithGaps() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = (List) deriv.getBuckets(); + assertThat(buckets.size(), equalTo(5)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count and insert_zeros gap policy") // NOCOMMIT + @Test + public void singleValuedFieldWithGaps_insertZeros() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) // NOCOMMIT add insert_zeros gapPolicy + .execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = (List) deriv.getBuckets(); + assertThat(buckets.size(), equalTo(11)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2d)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(3l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2d)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(5); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2d)); + + bucket = buckets.get(6); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(6l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(7); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(7l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(8); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(8l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(3d)); + + bucket = buckets.get(9); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + + bucket = buckets.get(10); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + } + + @AwaitsFix(bugUrl="waiting for derivative to support _count and interpolate gapPolicy") // NOCOMMIT + @Test + public void singleValuedFieldWithGaps_interpolate() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); // NOCOMMIT add interpolate gapPolicy + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); + + InternalHistogram deriv = searchResponse.getAggregations().get("deriv"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = (List) deriv.getBuckets(); + assertThat(buckets.size(), equalTo(7)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0.25d)); + + bucket = buckets.get(5); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + + bucket = buckets.get(6); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + } + +} From 269d4bc30ed78ad0a07f3ac420d629c2f0ca595c Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 10:07:56 +0000 Subject: [PATCH 023/236] InternalHistogram.Factory.create() can now work from prototype Another InternalHistogram instance can be passed into the method with the buckets and the name and will be used to set all the options such as minDocCount, formatter, Order etc. --- .../bucket/histogram/InternalDateHistogram.java | 13 ------------- .../bucket/histogram/InternalHistogram.java | 9 +++++---- .../bucket/histogram/InternalOrder.java | 2 +- 3 files changed, 6 insertions(+), 18 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 0457ad9e92c..503d3626b2f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -22,15 +22,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; -import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import java.util.List; -import java.util.Map; - /** * */ @@ -75,14 +70,6 @@ public class InternalDateHistogram { return TYPE.name(); } - @Override - public InternalHistogram create(String name, List buckets, InternalOrder order, - long minDocCount, - EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, List reducers, - Map metaData) { - return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); - } - @Override public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 4171cc3f514..ad17e3796fe 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -248,6 +248,11 @@ public class InternalHistogram extends Inter return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } + public InternalHistogram create(String name, List buckets, InternalHistogram prototype) { + return new InternalHistogram<>(name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo, + prototype.formatter, prototype.keyed, this, prototype.reducers(), prototype.metaData); + } + public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { if (key instanceof Number) { @@ -301,10 +306,6 @@ public class InternalHistogram extends Inter return factory; } - public InternalOrder getOrder() { - return order; - } - private static class IteratorAndCurrent { private final Iterator iterator; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java index 10902064786..9d503a8e90b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalOrder.java @@ -29,7 +29,7 @@ import java.util.Comparator; /** * An internal {@link Histogram.Order} strategy which is identified by a unique id. */ -public class InternalOrder extends Histogram.Order { +class InternalOrder extends Histogram.Order { final byte id; final String key; From 19cdfe256ecae064bb2ed6e09dc4818f24898edd Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 10:08:25 +0000 Subject: [PATCH 024/236] DerivativeReducer now copies histogram options from old histogram instance --- .../aggregations/reducers/derivative/DerivativeReducer.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 730a85a2d41..40397d8f46e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -97,7 +97,6 @@ public class DerivativeReducer extends Reducer { List newBuckets = new ArrayList<>(); Double lastBucketValue = null; - // NOCOMMIT this needs to be improved so that the aggs are cloned correctly to ensure aggs are fully immutable. for (InternalHistogram.Bucket bucket : buckets) { Double thisBucketValue = resolveBucketValue(histo, bucket); if (lastBucketValue != null) { @@ -116,8 +115,7 @@ public class DerivativeReducer extends Reducer { } lastBucketValue = thisBucketValue; } - return factory.create(histo.getName(), newBuckets, histo.getOrder(), 1, null, null, false, new ArrayList(), - histo.getMetaData()); // NOCOMMIT get order, minDocCount, emptyBucketInfo etc. from histo + return factory.create(histo.getName(), newBuckets, histo); } private Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { From 3375c02b42f834a7aa4656a993ca2fac5307c383 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 11:14:47 +0000 Subject: [PATCH 025/236] Added support for _count and _key as bucketsPaths --- .../search/aggregations/AggregatorFactories.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 258b90c2653..628fe3144a9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -157,7 +157,7 @@ public class AggregatorFactories { } public AggregatorFactories build() { - if (factories.isEmpty()) { + if (factories.isEmpty() && reducerFactories.isEmpty()) { return EMPTY; } List orderedReducers = resolveReducerOrder(this.reducerFactories, this.factories); @@ -212,7 +212,7 @@ public class AggregatorFactories { String[] bucketsPaths = factory.getBucketsPaths(); for (String bucketsPath : bucketsPaths) { ReducerFactory matchingFactory = reducerFactoriesMap.get(bucketsPath); - if (aggFactoryNames.contains(bucketsPath)) { + if (bucketsPath.equals("_count") || bucketsPath.equals("_key") || aggFactoryNames.contains(bucketsPath)) { continue; } else if (matchingFactory != null) { resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, From 6c12cfd4657070aeac499eede1372ee9e354a849 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 11:15:04 +0000 Subject: [PATCH 026/236] updated derivative tests to test _count --- .../reducers/DateDerivativeTests.java | 71 ++++++++++++------- .../reducers/DerivativeTests.java | 55 ++++++++------ 2 files changed, 78 insertions(+), 48 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index ec131b3a609..ad1c131c885 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.reducers; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.index.mapper.core.DateFieldMapper; @@ -105,8 +104,6 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { internalCluster().wipeIndices("idx2"); } - @AwaitsFix(bugUrl = "waiting for derivative to support _count") - // NOCOMMIT @Test public void singleValuedField() throws Exception { SearchResponse response = client() @@ -121,22 +118,30 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(2)); + assertThat(buckets.size(), equalTo(3)); DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(1l)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); + assertThat(docCountDeriv, nullValue()); key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(1d)); @@ -212,8 +217,6 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat((double) propertiesCounts[2], equalTo(15.0)); } - @AwaitsFix(bugUrl = "waiting for derivative to support _count") - // NOCOMMIT @Test public void multiValuedField() throws Exception { SearchResponse response = client() @@ -228,23 +231,22 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(3)); + assertThat(buckets.size(), equalTo(4)); DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(2.0)); + assertThat(docCountDeriv, nullValue()); key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(3l)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); @@ -254,15 +256,23 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(5l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2.0)); + + key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(-2.0)); } - @AwaitsFix(bugUrl = "waiting for derivative to support _count") - // NOCOMMIT @Test public void unmapped() throws Exception { SearchResponse response = client() @@ -279,8 +289,6 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv.getBuckets().size(), equalTo(0)); } - @AwaitsFix(bugUrl = "waiting for derivative to support _count") - // NOCOMMIT @Test public void partiallyUnmapped() throws Exception { SearchResponse response = client() @@ -295,23 +303,32 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(2)); + assertThat(buckets.size(), equalTo(3)); DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1.0)); + assertThat(docCountDeriv, nullValue()); key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 3b51bbbf6b2..aadc05cd003 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -44,6 +44,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest public class DerivativeTests extends ElasticsearchIntegrationTest { @@ -157,7 +158,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { ensureSearchable(); } - @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT @Test public void singleValuedField() { @@ -173,17 +173,21 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(numFirstDerivValueBuckets)); + assertThat(buckets.size(), equalTo(numValueBuckets)); - for (int i = 0; i < numFirstDerivValueBuckets; ++i) { + for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i])); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } } } @@ -222,9 +226,9 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { s += j + 1; } } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); assertThat(sum.getValue(), equalTo((double) s)); if (i > 0) { - SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); assertThat(sumDeriv, notNullValue()); long s1 = 0; long s2 = 0; @@ -240,6 +244,8 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo((double) sumDerivValue)); + } else { + assertThat(sumDeriv, nullValue()); } assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); @@ -247,7 +253,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } - @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT @Test public void multiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") @@ -262,21 +267,24 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(deriv.getBuckets().size(), equalTo(numFirstDerivValuesBuckets)); + assertThat(deriv.getBuckets().size(), equalTo(numValuesBuckets)); - for (int i = 0; i < numFirstDerivValuesBuckets; ++i) { + for (int i = 0; i < numValuesBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo((double) firstDerivValuesCounts[i])); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValuesCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } } } - @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT @Test public void unmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") @@ -293,7 +301,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv.getBuckets().size(), equalTo(0)); } - @AwaitsFix(bugUrl="waiting for derivative to support _count") // NOCOMMIT @Test public void partiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") @@ -308,21 +315,25 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); - assertThat(deriv.getBuckets().size(), equalTo(numFirstDerivValueBuckets)); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); - for (int i = 0; i < numFirstDerivValueBuckets; ++i) { + for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i])); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } } } - @AwaitsFix(bugUrl="waiting for derivative to support _count and gaps") // NOCOMMIT + @AwaitsFix(bugUrl="waiting for derivative to gaps") // NOCOMMIT @Test public void singleValuedFieldWithGaps() throws Exception { SearchResponse searchResponse = client() @@ -382,7 +393,8 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(docCountDeriv.value(), equalTo(-1d)); } - @AwaitsFix(bugUrl="waiting for derivative to support _count and insert_zeros gap policy") // NOCOMMIT + @AwaitsFix(bugUrl = "waiting for derivative to support insert_zeros gap policy") + // NOCOMMIT @Test public void singleValuedFieldWithGaps_insertZeros() throws Exception { SearchResponse searchResponse = client() @@ -490,7 +502,8 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(docCountDeriv.value(), equalTo(-1d)); } - @AwaitsFix(bugUrl="waiting for derivative to support _count and interpolate gapPolicy") // NOCOMMIT + @AwaitsFix(bugUrl = "waiting for derivative to support interpolate gapPolicy") + // NOCOMMIT @Test public void singleValuedFieldWithGaps_interpolate() throws Exception { SearchResponse searchResponse = client() From f03fe5b8b6aa36603e0539fb668c54d9d8b0d250 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 11:31:02 +0000 Subject: [PATCH 027/236] Cleaning up NOCOMMITs which are resolved --- .../search/aggregations/AggregatorFactories.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 628fe3144a9..552ff49fe1d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -206,7 +206,7 @@ public class AggregatorFactories { List orderedReducers, List unmarkedFactories, Set temporarilyMarked, ReducerFactory factory) { if (temporarilyMarked.contains(factory)) { - throw new ElasticsearchIllegalStateException("Cyclical dependancy found with reducer [" + factory.getName() + "]"); // NOCOMMIT is this the right Exception to throw? + throw new ElasticsearchIllegalStateException("Cyclical dependancy found with reducer [" + factory.getName() + "]"); } else if (unmarkedFactories.contains(factory)) { temporarilyMarked.add(factory); String[] bucketsPaths = factory.getBucketsPaths(); @@ -218,7 +218,7 @@ public class AggregatorFactories { resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, matchingFactory); } else { - throw new ElasticsearchIllegalStateException("No reducer found for path [" + bucketsPath + "]"); // NOCOMMIT is this the right Exception to throw? + throw new ElasticsearchIllegalStateException("No reducer found for path [" + bucketsPath + "]"); } } unmarkedFactories.remove(factory); From 7f844660a834ba3d78c491c9f9e9235930f12b9d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 11:31:24 +0000 Subject: [PATCH 028/236] Cleaning up NOCOMMITs --- .../search/aggregations/reducers/DerivativeTests.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index aadc05cd003..5cdf2a8cee8 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -333,7 +333,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } - @AwaitsFix(bugUrl="waiting for derivative to gaps") // NOCOMMIT + @AwaitsFix(bugUrl = "waiting for derivative to gaps") @Test public void singleValuedFieldWithGaps() throws Exception { SearchResponse searchResponse = client() @@ -341,7 +341,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .setQuery(matchAllQuery()) .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) // NOCOMMITadd ignore gapPolicy .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); @@ -394,7 +394,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @AwaitsFix(bugUrl = "waiting for derivative to support insert_zeros gap policy") - // NOCOMMIT @Test public void singleValuedFieldWithGaps_insertZeros() throws Exception { SearchResponse searchResponse = client() @@ -503,7 +502,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @AwaitsFix(bugUrl = "waiting for derivative to support interpolate gapPolicy") - // NOCOMMIT @Test public void singleValuedFieldWithGaps_interpolate() throws Exception { SearchResponse searchResponse = client() From 5a2c4ab5ae9ab3275867bd4bb3509100fde6a43c Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Feb 2015 11:37:28 +0000 Subject: [PATCH 029/236] Added test for second_derivative --- .../reducers/DerivativeTests.java | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 5cdf2a8cee8..11bac929081 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -56,8 +56,10 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { static int interval; static int numValueBuckets, numValuesBuckets; static int numFirstDerivValueBuckets, numFirstDerivValuesBuckets; + static int numSecondDerivValueBuckets; static long[] valueCounts, valuesCounts; static long[] firstDerivValueCounts, firstDerivValuesCounts; + static long[] secondDerivValueCounts; @Override public void setupSuiteScopeCluster() throws Exception { @@ -97,6 +99,18 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { lastValueCount = thisValue; } + numSecondDerivValueBuckets = numFirstDerivValueBuckets - 1; + secondDerivValueCounts = new long[numSecondDerivValueBuckets]; + long lastFirstDerivativeValueCount = -1; + for (int i = 0; i < numFirstDerivValueBuckets; i++) { + long thisFirstDerivativeValue = firstDerivValueCounts[i]; + if (lastFirstDerivativeValueCount != -1) { + long diff = thisFirstDerivativeValue - lastFirstDerivativeValueCount; + secondDerivValueCounts[i - 1] = diff; + } + lastFirstDerivativeValueCount = thisFirstDerivativeValue; + } + numFirstDerivValuesBuckets = numValuesBuckets - 1; firstDerivValuesCounts = new long[numFirstDerivValuesBuckets]; long lastValuesCount = -1; @@ -191,6 +205,47 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } + @Test + public void singleValuedField_secondDerivative() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count")) + .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } + SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv"); + if (i > 1) { + assertThat(docCount2ndDeriv, notNullValue()); + assertThat(docCount2ndDeriv.value(), equalTo((double) secondDerivValueCounts[i - 2])); + } else { + assertThat(docCount2ndDeriv, nullValue()); + } + } + } + @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() From 7c046d28bf4f077f9bbe0b5e9069c74b2319d212 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 2 Mar 2015 14:53:05 +0000 Subject: [PATCH 030/236] Implementation of GapPolicy for derivative --- .../reducers/InternalSimpleValue.java | 2 +- .../derivative/DerivativeBuilder.java | 10 + .../reducers/derivative/DerivativeParser.java | 8 +- .../derivative/DerivativeReducer.java | 99 ++++++++- .../reducers/DerivativeTests.java | 189 ++++++++---------- 5 files changed, 193 insertions(+), 115 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java index 7d204c007c6..9641f187c6c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java @@ -93,7 +93,7 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - boolean hasValue = !Double.isInfinite(value); + boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); builder.field(CommonFields.VALUE, hasValue ? value : null); if (hasValue && valueFormatter != null) { builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(value)); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java index 87165c32ac0..f868e673b1d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -21,12 +21,14 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import java.io.IOException; public class DerivativeBuilder extends ReducerBuilder { private String format; + private GapPolicy gapPolicy; public DerivativeBuilder(String name) { super(name, DerivativeReducer.TYPE.name()); @@ -37,11 +39,19 @@ public class DerivativeBuilder extends ReducerBuilder { return this; } + public DerivativeBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { if (format != null) { builder.field(DerivativeParser.FORMAT.getPreferredName(), format); } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index 8a562050dcb..6b6b826ec6f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import org.elasticsearch.search.aggregations.support.format.ValueFormat; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.internal.SearchContext; @@ -33,7 +34,9 @@ import java.util.ArrayList; import java.util.List; public class DerivativeParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); @Override public String type() { @@ -46,6 +49,7 @@ public class DerivativeParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; + GapPolicy gapPolicy = GapPolicy.IGNORE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -55,6 +59,8 @@ public class DerivativeParser implements Reducer.Parser { format = parser.text(); } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text()); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -86,7 +92,7 @@ public class DerivativeParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter); + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 40397d8f46e..c0d96f4056b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -24,8 +24,10 @@ import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -44,9 +46,11 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -75,13 +79,16 @@ public class DerivativeReducer extends Reducer { }; private ValueFormatter formatter; + private GapPolicy gapPolicy; public DerivativeReducer() { } - public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, Map metadata) { + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; + this.gapPolicy = gapPolicy; } @Override @@ -100,9 +107,6 @@ public class DerivativeReducer extends Reducer { for (InternalHistogram.Bucket bucket : buckets) { Double thisBucketValue = resolveBucketValue(histo, bucket); if (lastBucketValue != null) { - if (thisBucketValue == null) { - throw new ElasticsearchIllegalStateException("FOUND GAP IN DATA"); // NOCOMMIT deal with gaps in data - } double diff = thisBucketValue - lastBucketValue; List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); @@ -122,13 +126,30 @@ public class DerivativeReducer extends Reducer { try { Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPaths()[0]) .getPathElementsAsStringList()); - if (propertyValue instanceof Number) { - return ((Number) propertyValue).doubleValue(); - } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { - return ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); - } else { + if (propertyValue == null) { throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + " must reference either a number value or a single value numeric metric aggregation"); + } else { + double value; + if (propertyValue instanceof Number) { + value = ((Number) propertyValue).doubleValue(); + } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { + value = ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); + } else { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } + if (Double.isInfinite(value) || Double.isNaN(value)) { + switch (gapPolicy) { + case INSERT_ZEROS: + return 0.0; + case IGNORE: + default: + return Double.NaN; + } + } else { + return value; + } } } catch (InvalidAggregationPathException e) { return null; @@ -138,27 +159,83 @@ public class DerivativeReducer extends Reducer { @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); } @Override public void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); } public static class Factory extends ReducerFactory { private final ValueFormatter formatter; + private GapPolicy gapPolicy; - public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter) { + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy) { super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; + this.gapPolicy = gapPolicy; } @Override protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPaths, formatter, metaData); + return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); } } + + public static enum GapPolicy { + INSERT_ZEROS((byte) 0, "insert_zeros"), IGNORE((byte) 1, "ignore"); + + public static GapPolicy parse(SearchContext context, String text) { + GapPolicy result = null; + for (GapPolicy policy : values()) { + if (policy.parseField.match(text)) { + if (result == null) { + result = policy; + } else { + throw new ElasticsearchIllegalStateException("Text can be parsed to 2 different gap policies: text=[" + text + + "], " + "policies=" + Arrays.asList(result, policy)); + } + } + } + if (result == null) { + final List validNames = new ArrayList<>(); + for (GapPolicy policy : values()) { + validNames.add(policy.getName()); + } + throw new SearchParseException(context, "Invalid gap policy: [" + text + "], accepted values: " + validNames); + } + return result; + } + + private final byte id; + private final ParseField parseField; + + private GapPolicy(byte id, String name) { + this.id = id; + this.parseField = new ParseField(name); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + + public static GapPolicy readFrom(StreamInput in) throws IOException { + byte id = in.readByte(); + for (GapPolicy gapPolicy : values()) { + if (id == gapPolicy.id) { + return gapPolicy; + } + } + throw new IllegalStateException("Unknown GapPolicy with id [" + id + "]"); + } + + public String getName() { + return parseField.getPreferredName(); + } + } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 11bac929081..7d2d5500cd1 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -19,13 +19,13 @@ package org.elasticsearch.search.aggregations.reducers; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; @@ -388,15 +388,14 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } - @AwaitsFix(bugUrl = "waiting for derivative to gaps") @Test public void singleValuedFieldWithGaps() throws Exception { SearchResponse searchResponse = client() .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) // NOCOMMITadd ignore gapPolicy + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); @@ -405,91 +404,30 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(deriv, Matchers.notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = (List) deriv.getBuckets(); - assertThat(buckets.size(), equalTo(5)); + assertThat(buckets.size(), equalTo(12)); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(1l)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + assertThat(docCountDeriv, nullValue()); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(1l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(0d)); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); - - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); - } - - @AwaitsFix(bugUrl = "waiting for derivative to support insert_zeros gap policy") - @Test - public void singleValuedFieldWithGaps_insertZeros() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) // NOCOMMIT add insert_zeros gapPolicy - .execute().actionGet(); - - assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); - - InternalHistogram deriv = searchResponse.getAggregations().get("histo"); - assertThat(deriv, Matchers.notNullValue()); - assertThat(deriv.getName(), equalTo("histo")); - List buckets = (List) deriv.getBuckets(); - assertThat(buckets.size(), equalTo(11)); - - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); - bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); + assertThat(docCountDeriv.value(), equalTo(1d)); bucket = buckets.get(3); assertThat(bucket, notNullValue()); @@ -497,23 +435,23 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(bucket.getDocCount(), equalTo(0l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(2d)); + assertThat(docCountDeriv.value(), equalTo(-2d)); bucket = buckets.get(4); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + assertThat(docCountDeriv.value(), equalTo(2d)); bucket = buckets.get(5); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); + assertThat(docCountDeriv.value(), equalTo(0d)); bucket = buckets.get(6); assertThat(bucket, notNullValue()); @@ -521,7 +459,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(bucket.getDocCount(), equalTo(0l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + assertThat(docCountDeriv.value(), equalTo(-2d)); bucket = buckets.get(7); assertThat(bucket, notNullValue()); @@ -537,95 +475,142 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertThat(bucket.getDocCount(), equalTo(0l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(3d)); + assertThat(docCountDeriv.value(), equalTo(0d)); bucket = buckets.get(9); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(3l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); + assertThat(docCountDeriv.value(), equalTo(3d)); bucket = buckets.get(10); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-1d)); + + bucket = buckets.get(11); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(11l)); + assertThat(bucket.getDocCount(), equalTo(1l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(-1d)); } - @AwaitsFix(bugUrl = "waiting for derivative to support interpolate gapPolicy") @Test - public void singleValuedFieldWithGaps_interpolate() throws Exception { + public void singleValuedFieldWithGaps_insertZeros() throws Exception { SearchResponse searchResponse = client() .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); // NOCOMMIT add interpolate gapPolicy + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); - InternalHistogram deriv = searchResponse.getAggregations().get("deriv"); + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); assertThat(deriv, Matchers.notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = (List) deriv.getBuckets(); - assertThat(buckets.size(), equalTo(7)); + assertThat(buckets.size(), equalTo(12)); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(1l)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + assertThat(docCountDeriv, nullValue()); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(1l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); + assertThat(docCountDeriv.value(), equalTo(0d)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(2l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + assertThat(docCountDeriv.value(), equalTo(1d)); bucket = buckets.get(3); assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(3l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2d)); + + bucket = buckets.get(4); + assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2d)); + + bucket = buckets.get(5); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(0d)); + + bucket = buckets.get(6); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(6l)); + assertThat(bucket.getDocCount(), equalTo(0l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2d)); + + bucket = buckets.get(7); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(7l)); assertThat(bucket.getDocCount(), equalTo(0l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(0d)); - bucket = buckets.get(4); + bucket = buckets.get(8); assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(8l)); assertThat(bucket.getDocCount(), equalTo(0l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0.25d)); + assertThat(docCountDeriv.value(), equalTo(0d)); - bucket = buckets.get(5); + bucket = buckets.get(9); assertThat(bucket, notNullValue()); assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(bucket.getDocCount(), equalTo(3l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(3d)); + + bucket = buckets.get(10); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); + assertThat(bucket.getDocCount(), equalTo(2l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(-1d)); - bucket = buckets.get(6); + bucket = buckets.get(11); assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); - assertThat(bucket.getDocCount(), equalTo(0l)); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(11l)); + assertThat(bucket.getDocCount(), equalTo(1l)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(-1d)); From 3131e01c9d0264a5168e12395db513398e2eb7fb Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 2 Mar 2015 17:27:02 -0500 Subject: [PATCH 031/236] Move GapPolicy and resolveBucketValues() to static helper methods Will allow many reducers to share the same helper functionality without repeating code. Chose to put these in static helpers instead of adding to Reducer base class. I can imagine other reducers that aren't time-based (or don't care about contiguous buckets), which would make things like gap policy useless. Since these seemed more like helpers than inherent traits of a Reducer, they went into their own static class. Closes #9954 --- .../aggregations/reducers/BucketHelpers.java | 160 ++++++++++++++++++ .../derivative/DerivativeBuilder.java | 3 +- .../reducers/derivative/DerivativeParser.java | 3 +- .../derivative/DerivativeReducer.java | 106 +----------- .../reducers/DerivativeTests.java | 3 +- 5 files changed, 171 insertions(+), 104 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java new file mode 100644 index 00000000000..145ff1dea1f --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -0,0 +1,160 @@ +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.InvalidAggregationPathException; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * A set of static helpers to simplify working with aggregation buckets, in particular + * providing utilities that help reducers. + */ +public class BucketHelpers { + + /** + * A gap policy determines how "holes" in a set of buckets should be handled. For example, + * a date_histogram might have empty buckets due to no data existing for that time interval. + * This can cause problems for operations like a derivative, which relies on a continuous + * function. + * + * "insert_zeros": empty buckets will be filled with zeros for all metrics + * "ignore": empty buckets will simply be ignored + */ + public static enum GapPolicy { + INSERT_ZEROS((byte) 0, "insert_zeros"), IGNORE((byte) 1, "ignore"); + + /** + * Parse a string GapPolicy into the byte enum + * + * @param context SearchContext this is taking place in + * @param text GapPolicy in string format (e.g. "ignore") + * @return GapPolicy enum + */ + public static GapPolicy parse(SearchContext context, String text) { + GapPolicy result = null; + for (GapPolicy policy : values()) { + if (policy.parseField.match(text)) { + if (result == null) { + result = policy; + } else { + throw new ElasticsearchIllegalStateException("Text can be parsed to 2 different gap policies: text=[" + text + + "], " + "policies=" + Arrays.asList(result, policy)); + } + } + } + if (result == null) { + final List validNames = new ArrayList<>(); + for (GapPolicy policy : values()) { + validNames.add(policy.getName()); + } + throw new SearchParseException(context, "Invalid gap policy: [" + text + "], accepted values: " + validNames); + } + return result; + } + + private final byte id; + private final ParseField parseField; + + private GapPolicy(byte id, String name) { + this.id = id; + this.parseField = new ParseField(name); + } + + /** + * Serialize the GapPolicy to the output stream + * + * @param out + * @throws IOException + */ + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + + /** + * Deserialize the GapPolicy from the input stream + * + * @param in + * @return GapPolicy Enum + * @throws IOException + */ + public static GapPolicy readFrom(StreamInput in) throws IOException { + byte id = in.readByte(); + for (GapPolicy gapPolicy : values()) { + if (id == gapPolicy.id) { + return gapPolicy; + } + } + throw new IllegalStateException("Unknown GapPolicy with id [" + id + "]"); + } + + /** + * Return the english-formatted name of the GapPolicy + * + * @return English representation of GapPolicy + */ + public String getName() { + return parseField.getPreferredName(); + } + } + + /** + * Given a path and a set of buckets, this method will return the value inside the agg at + * that path. This is used to extract values for use by reducers (e.g. a derivative might need + * the price for each bucket). If the bucket is empty, the configured GapPolicy is invoked to + * resolve the missing bucket + * + * @param histo A series of agg buckets in the form of a histogram + * @param bucket A specific bucket that a value needs to be extracted from. This bucket should be present + * in the histo parameter + * @param aggPath The path to a particular value that needs to be extracted. This path should point to a metric + * inside the bucket + * @param gapPolicy The gap policy to apply if empty buckets are found + * @return The value extracted from bucket found at aggPath + */ + public static Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket, + String aggPath, GapPolicy gapPolicy) { + try { + Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(aggPath).getPathElementsAsStringList()); + if (propertyValue == null) { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } else { + double value; + if (propertyValue instanceof Number) { + value = ((Number) propertyValue).doubleValue(); + } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { + value = ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); + } else { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } + if (Double.isInfinite(value) || Double.isNaN(value)) { + switch (gapPolicy) { + case INSERT_ZEROS: + return 0.0; + case IGNORE: + default: + return Double.NaN; + } + } else { + return value; + } + } + } catch (InvalidAggregationPathException e) { + return null; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java index f868e673b1d..210d56d4a6f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -21,10 +21,11 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; -import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import java.io.IOException; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + public class DerivativeBuilder extends ReducerBuilder { private String format; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index 6b6b826ec6f..c4d3aa2a229 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; -import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import org.elasticsearch.search.aggregations.support.format.ValueFormat; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.internal.SearchContext; @@ -33,6 +32,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + public class DerivativeParser implements Reducer.Parser { public static final ParseField FORMAT = new ParseField("format"); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index c0d96f4056b..1130639a1a2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -22,38 +22,30 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import com.google.common.base.Function; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalStateException; + import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.InvalidAggregationPathException; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; -import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; -import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; -import org.elasticsearch.search.aggregations.reducers.Reducer; -import org.elasticsearch.search.aggregations.reducers.ReducerFactory; -import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.*; import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; + public class DerivativeReducer extends Reducer { public final static Type TYPE = new Type("derivative"); @@ -105,7 +97,7 @@ public class DerivativeReducer extends Reducer { List newBuckets = new ArrayList<>(); Double lastBucketValue = null; for (InternalHistogram.Bucket bucket : buckets) { - Double thisBucketValue = resolveBucketValue(histo, bucket); + Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); if (lastBucketValue != null) { double diff = thisBucketValue - lastBucketValue; @@ -122,40 +114,6 @@ public class DerivativeReducer extends Reducer { return factory.create(histo.getName(), newBuckets, histo); } - private Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket) { - try { - Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(bucketsPaths()[0]) - .getPathElementsAsStringList()); - if (propertyValue == null) { - throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() - + " must reference either a number value or a single value numeric metric aggregation"); - } else { - double value; - if (propertyValue instanceof Number) { - value = ((Number) propertyValue).doubleValue(); - } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { - value = ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); - } else { - throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() - + " must reference either a number value or a single value numeric metric aggregation"); - } - if (Double.isInfinite(value) || Double.isNaN(value)) { - switch (gapPolicy) { - case INSERT_ZEROS: - return 0.0; - case IGNORE: - default: - return Double.NaN; - } - } else { - return value; - } - } - } catch (InvalidAggregationPathException e) { - return null; - } - } - @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); @@ -186,56 +144,4 @@ public class DerivativeReducer extends Reducer { } } - - public static enum GapPolicy { - INSERT_ZEROS((byte) 0, "insert_zeros"), IGNORE((byte) 1, "ignore"); - - public static GapPolicy parse(SearchContext context, String text) { - GapPolicy result = null; - for (GapPolicy policy : values()) { - if (policy.parseField.match(text)) { - if (result == null) { - result = policy; - } else { - throw new ElasticsearchIllegalStateException("Text can be parsed to 2 different gap policies: text=[" + text - + "], " + "policies=" + Arrays.asList(result, policy)); - } - } - } - if (result == null) { - final List validNames = new ArrayList<>(); - for (GapPolicy policy : values()) { - validNames.add(policy.getName()); - } - throw new SearchParseException(context, "Invalid gap policy: [" + text + "], accepted values: " + validNames); - } - return result; - } - - private final byte id; - private final ParseField parseField; - - private GapPolicy(byte id, String name) { - this.id = id; - this.parseField = new ParseField(name); - } - - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); - } - - public static GapPolicy readFrom(StreamInput in) throws IOException { - byte id = in.readByte(); - for (GapPolicy gapPolicy : values()) { - if (id == gapPolicy.id) { - return gapPolicy; - } - } - throw new IllegalStateException("Unknown GapPolicy with id [" + id + "]"); - } - - public String getName() { - return parseField.getPreferredName(); - } - } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 7d2d5500cd1..24a4c8cff5a 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.sum.Sum; -import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer.GapPolicy; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; @@ -509,7 +508,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .setQuery(matchAllQuery()) .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) - .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))) + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS))) .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); From 8e02a8565de162ee2a6df7df86e0d7efa16799a1 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 5 Mar 2015 09:58:22 -0500 Subject: [PATCH 032/236] Add header to BucketHelpers class --- .../aggregations/reducers/BucketHelpers.java | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java index 145ff1dea1f..f92a2b70d3b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -1,3 +1,22 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.ElasticsearchIllegalStateException; From 3063f06fc7506a9be7331553bf77614d9ca2dd35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Mar 2015 12:54:12 +0100 Subject: [PATCH 033/236] Add randomiziation to test for derivative aggregation --- .../reducers/DerivativeTests.java | 539 +++++------------- 1 file changed, 154 insertions(+), 385 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 24a4c8cff5a..a5c9506aeac 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -21,16 +21,20 @@ package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; +import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -41,7 +45,6 @@ import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.der import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @@ -49,49 +52,44 @@ import static org.hamcrest.core.IsNull.nullValue; public class DerivativeTests extends ElasticsearchIntegrationTest { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - private static final String MULTI_VALUED_FIELD_NAME = "l_values"; - static int numDocs; - static int interval; - static int numValueBuckets, numValuesBuckets; - static int numFirstDerivValueBuckets, numFirstDerivValuesBuckets; - static int numSecondDerivValueBuckets; - static long[] valueCounts, valuesCounts; - static long[] firstDerivValueCounts, firstDerivValuesCounts; - static long[] secondDerivValueCounts; + private static int interval; + private static int numValueBuckets; + private static int numFirstDerivValueBuckets; + private static int numSecondDerivValueBuckets; + private static long[] valueCounts; + private static long[] firstDerivValueCounts; + private static long[] secondDerivValueCounts; + + private static Long[] valueCounts_empty; + private static long numDocsEmptyIdx; + private static Double[] firstDerivValueCounts_empty; + + // expected bucket values for random setup with gaps + private static int numBuckets_empty_rnd; + private static Long[] valueCounts_empty_rnd; + private static Double[] firstDerivValueCounts_empty_rnd; + private static long numDocsEmptyIdx_rnd; @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); + interval = 5; + numValueBuckets = randomIntBetween(6, 80); - numValueBuckets = numDocs / interval + 1; valueCounts = new long[numValueBuckets]; - for (int i = 0; i < numDocs; i++) { - final int bucket = (i + 1) / interval; - valueCounts[bucket]++; - } - - numValuesBuckets = (numDocs + 1) / interval + 1; - valuesCounts = new long[numValuesBuckets]; - for (int i = 0; i < numDocs; i++) { - final int bucket1 = (i + 1) / interval; - final int bucket2 = (i + 2) / interval; - valuesCounts[bucket1]++; - if (bucket1 != bucket2) { - valuesCounts[bucket2]++; - } + for (int i = 0; i < numValueBuckets; i++) { + valueCounts[i] = randomIntBetween(1, 20); } numFirstDerivValueBuckets = numValueBuckets - 1; firstDerivValueCounts = new long[numFirstDerivValueBuckets]; - long lastValueCount = -1; + Long lastValueCount = null; for (int i = 0; i < numValueBuckets; i++) { long thisValue = valueCounts[i]; - if (lastValueCount != -1) { + if (lastValueCount != null) { long diff = thisValue - lastValueCount; firstDerivValueCounts[i - 1] = diff; } @@ -100,112 +98,69 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { numSecondDerivValueBuckets = numFirstDerivValueBuckets - 1; secondDerivValueCounts = new long[numSecondDerivValueBuckets]; - long lastFirstDerivativeValueCount = -1; + Long lastFirstDerivativeValueCount = null; for (int i = 0; i < numFirstDerivValueBuckets; i++) { long thisFirstDerivativeValue = firstDerivValueCounts[i]; - if (lastFirstDerivativeValueCount != -1) { + if (lastFirstDerivativeValueCount != null) { long diff = thisFirstDerivativeValue - lastFirstDerivativeValueCount; secondDerivValueCounts[i - 1] = diff; } lastFirstDerivativeValueCount = thisFirstDerivativeValue; } - numFirstDerivValuesBuckets = numValuesBuckets - 1; - firstDerivValuesCounts = new long[numFirstDerivValuesBuckets]; - long lastValuesCount = -1; - for (int i = 0; i < numValuesBuckets; i++) { - long thisValue = valuesCounts[i]; - if (lastValuesCount != -1) { - long diff = thisValue - lastValuesCount; - firstDerivValuesCounts[i - 1] = diff; - } - lastValuesCount = thisValue; - } - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i + 1).startArray(MULTI_VALUED_FIELD_NAME).value(i + 1) - .value(i + 2).endArray().field("tag", "tag" + i).endObject())); + for (int i = 0; i < numValueBuckets; i++) { + for (int docs = 0; docs < valueCounts[i]; docs++) { + builders.add(client().prepareIndex("idx", "type").setSource(newDocBuilder(i * interval))); + } } + // setup for index with empty buckets + valueCounts_empty = new Long[] { 1l, 1l, 2l, 0l, 2l, 2l, 0l, 0l, 0l, 3l, 2l, 1l }; + firstDerivValueCounts_empty = new Double[] { null, 0d, 1d, -2d, 2d, 0d, -2d, 0d, 0d, 3d, -1d, -1d }; + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 0).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 0).endObject())); + for (int i = 0; i < valueCounts_empty.length; i++) { + for (int docs = 0; docs < valueCounts_empty[i]; docs++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type").setSource(newDocBuilder(i))); + numDocsEmptyIdx++; + } + } - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 1).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).endObject())); + // randomized setup for index with empty buckets + numBuckets_empty_rnd = randomIntBetween(20, 100); + valueCounts_empty_rnd = new Long[numBuckets_empty_rnd]; + firstDerivValueCounts_empty_rnd = new Double[numBuckets_empty_rnd]; + firstDerivValueCounts_empty_rnd[0] = null; - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 2).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 3).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).endObject())); - - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 4).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 5).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).endObject())); - - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 6).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 7).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).endObject())); - - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 8).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 9).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 10).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 9).endObject())); - - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 11).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 10).endObject())); - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 12).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 10).endObject())); - - builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + 13).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 11).endObject())); + assertAcked(prepareCreate("empty_bucket_idx_rnd").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < numBuckets_empty_rnd; i++) { + valueCounts_empty_rnd[i] = (long) randomIntBetween(1, 10); + // make approximately half of the buckets empty + if (randomBoolean()) + valueCounts_empty_rnd[i] = 0l; + for (int docs = 0; docs < valueCounts_empty_rnd[i]; docs++) { + builders.add(client().prepareIndex("empty_bucket_idx_rnd", "type").setSource(newDocBuilder(i))); + numDocsEmptyIdx_rnd++; + } + if (i > 0) { + firstDerivValueCounts_empty_rnd[i] = (double) valueCounts_empty_rnd[i] - valueCounts_empty_rnd[i - 1]; + } + } indexRandom(true, builders); ensureSearchable(); } - @Test - public void singleValuedField() { - - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) - .execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram deriv = response.getAggregations().get("histo"); - assertThat(deriv, notNullValue()); - assertThat(deriv.getName(), equalTo("histo")); - List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - if (i > 0) { - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); - } else { - assertThat(docCountDeriv, nullValue()); - } - } + private XContentBuilder newDocBuilder(int singleValueFieldValue) throws IOException { + return jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, singleValueFieldValue).endObject(); } + /** + * test first and second derivative on the sing + */ @Test - public void singleValuedField_secondDerivative() { + public void singleValuedField() { SearchResponse response = client() .prepareSearch("idx") @@ -216,7 +171,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertSearchResponse(response); - InternalHistogram deriv = response.getAggregations().get("histo"); + InternalHistogram deriv = response.getAggregations().get("histo"); assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); @@ -224,10 +179,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); if (i > 0) { assertThat(docCountDeriv, notNullValue()); @@ -256,7 +208,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { assertSearchResponse(response); - InternalHistogram deriv = response.getAggregations().get("histo"); + InternalHistogram deriv = response.getAggregations().get("histo"); assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); @@ -264,92 +216,44 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count"); Object[] propertiesSumCounts = (Object[]) deriv.getProperty("sum.value"); - List buckets = new ArrayList<>(deriv.getBuckets()); + List buckets = new ArrayList(deriv.getBuckets()); + Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + // overwritten for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); Sum sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); - long s = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == i) { - s += j + 1; - } - } + long expectedSum = valueCounts[i] * (i * interval); + assertThat(sum.getValue(), equalTo((double) expectedSum)); SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); - assertThat(sum.getValue(), equalTo((double) s)); if (i > 0) { assertThat(sumDeriv, notNullValue()); - long s1 = 0; - long s2 = 0; - for (int j = 0; j < numDocs; ++j) { - if ((j + 1) / interval == i - 1) { - s1 += j + 1; - } - if ((j + 1) / interval == i) { - s2 += j + 1; - } - } - long sumDerivValue = s2 - s1; + long sumDerivValue = expectedSum - expectedSumPreviousBucket; assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo((double) sumDerivValue)); } else { assertThat(sumDeriv, nullValue()); } + expectedSumPreviousBucket = expectedSum; assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); - assertThat((double) propertiesSumCounts[i], equalTo((double) s)); - } - } - - @Test - public void multiValuedField() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) - .execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram deriv = response.getAggregations().get("histo"); - assertThat(deriv, notNullValue()); - assertThat(deriv.getName(), equalTo("histo")); - List buckets = deriv.getBuckets(); - assertThat(deriv.getBuckets().size(), equalTo(numValuesBuckets)); - - for (int i = 0; i < numValuesBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valuesCounts[i])); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - if (i > 0) { - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo((double) firstDerivValuesCounts[i - 1])); - } else { - assertThat(docCountDeriv, nullValue()); - } + assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum)); } } @Test public void unmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx_unmapped") + SearchResponse response = client() + .prepareSearch("idx_unmapped") .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) - .execute().actionGet(); + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); - InternalHistogram deriv = response.getAggregations().get("histo"); + InternalHistogram deriv = response.getAggregations().get("histo"); assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); assertThat(deriv.getBuckets().size(), equalTo(0)); @@ -357,15 +261,15 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { @Test public void partiallyUnmapped() throws Exception { - SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) - .execute().actionGet(); + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); - InternalHistogram deriv = response.getAggregations().get("histo"); + InternalHistogram deriv = response.getAggregations().get("histo"); assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); List buckets = deriv.getBuckets(); @@ -373,10 +277,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(String.valueOf(i * interval))); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); if (i > 0) { assertThat(docCountDeriv, notNullValue()); @@ -394,111 +295,57 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .setQuery(matchAllQuery()) .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))) - .execute().actionGet(); + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); - InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); assertThat(deriv, Matchers.notNullValue()); assertThat(deriv.getName(), equalTo("histo")); - List buckets = (List) deriv.getBuckets(); - assertThat(buckets.size(), equalTo(12)); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, nullValue()); + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i])); + } + } + } - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); + @Test + public void singleValuedFieldWithGaps_random() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd)); - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(3l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets_empty_rnd)); - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(2d)); - - bucket = buckets.get(5); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(6); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(6l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); - - bucket = buckets.get(7); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(7l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(8); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(8l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(9); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); - assertThat(bucket.getDocCount(), equalTo(3l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(3d)); - - bucket = buckets.get(10); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); - - bucket = buckets.get(11); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(11l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); + for (int i = 0; i < valueCounts_empty_rnd.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + System.out.println(bucket.getDocCount()); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty_rnd[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty_rnd[i])); + } + } } @Test @@ -508,111 +355,33 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .setQuery(matchAllQuery()) .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) - .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS))) - .execute().actionGet(); + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() + .actionGet(); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(14l)); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); - InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); assertThat(deriv, Matchers.notNullValue()); assertThat(deriv.getName(), equalTo("histo")); - List buckets = (List) deriv.getBuckets(); - assertThat(buckets.size(), equalTo(12)); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(0l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, nullValue()); - - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(1l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(2l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(1d)); - - bucket = buckets.get(3); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(3l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); - - bucket = buckets.get(4); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(4l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(2d)); - - bucket = buckets.get(5); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(5l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(6); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(6l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-2d)); - - bucket = buckets.get(7); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(7l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(8); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(8l)); - assertThat(bucket.getDocCount(), equalTo(0l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(0d)); - - bucket = buckets.get(9); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(9l)); - assertThat(bucket.getDocCount(), equalTo(3l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(3d)); - - bucket = buckets.get(10); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(10l)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); - - bucket = buckets.get(11); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(11l)); - assertThat(bucket.getDocCount(), equalTo(1l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), equalTo(-1d)); + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i + ": ", bucket, i, valueCounts_empty[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i])); + } + } } + private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, + final long expectedDocCount) { + assertThat(msg, bucket, notNullValue()); + assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); + assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); + } } From 02679e7c4364fe51b15823fc8017657b5117e15d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 16 Mar 2015 22:59:26 -0700 Subject: [PATCH 034/236] [BUILD] fix snapshot URL --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 954f4d897e3..30fd9468946 100644 --- a/pom.xml +++ b/pom.xml @@ -56,7 +56,7 @@ lucene-snapshots Lucene Snapshots - https://download.elasticsearch.org/lucenesnapshots/1662607 + https://download.elastic.co/lucenesnapshots/1662607 From cb4ab060214aa8f6beff8e107e65d1a691176530 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 18 Mar 2015 14:14:00 -0700 Subject: [PATCH 035/236] missed file in merge --- .../bucket/significant/SignificanceHeuristicTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index c3fe8b94071..5b669cb9175 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.PercentageScore; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; From b751f0e11bacedd4d684c5cf826bbc64dc314722 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 23 Mar 2015 08:58:44 +0000 Subject: [PATCH 036/236] added validation of reducers --- .../aggregations/AggregatorFactories.java | 4 ++- .../aggregations/AggregatorFactory.java | 4 +++ .../bucket/histogram/HistogramAggregator.java | 4 +++ .../aggregations/reducers/ReducerFactory.java | 15 +++++++--- .../derivative/DerivativeReducer.java | 29 +++++++++++++++++-- .../reducers/DateDerivativeTests.java | 10 +++---- .../reducers/DerivativeTests.java | 9 +++--- 7 files changed, 57 insertions(+), 18 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 552ff49fe1d..1a4c157da8e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -40,6 +40,7 @@ public class AggregatorFactories { public static final AggregatorFactories EMPTY = new Empty(); + private AggregatorFactory parent; private AggregatorFactory[] factories; private List reducerFactories; @@ -101,6 +102,7 @@ public class AggregatorFactories { } void setParent(AggregatorFactory parent) { + this.parent = parent; for (AggregatorFactory factory : factories) { factory.parent = parent; } @@ -111,7 +113,7 @@ public class AggregatorFactories { factory.validate(); } for (ReducerFactory factory : reducerFactories) { - factory.validate(); + factory.validate(parent, factories, reducerFactories); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 41aee8f931f..f69e54ee710 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -66,6 +66,10 @@ public abstract class AggregatorFactory { return this; } + public String name() { + return name; + } + /** * Validates the state of this factory (makes sure the factory is properly configured) */ diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index 0a6a8bce732..63325c12aad 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -169,6 +169,10 @@ public class HistogramAggregator extends BucketsAggregator { this.histogramFactory = histogramFactory; } + public long minDocCount() { + return minDocCount; + } + @Override protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) throws IOException { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index 05cb6fbed48..ccdd2ac0328 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,10 +51,15 @@ public abstract class ReducerFactory { } /** - * Validates the state of this factory (makes sure the factory is properly configured) + * Validates the state of this factory (makes sure the factory is properly + * configured) + * + * @param reducerFactories + * @param factories + * @param parent */ - public final void validate() { - doValidate(); + public final void validate(AggregatorFactory parent, AggregatorFactory[] factories, List reducerFactories) { + doValidate(parent, factories, reducerFactories); } protected abstract Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, @@ -79,7 +86,7 @@ public abstract class ReducerFactory { return aggregator; } - public void doValidate() { + public void doValidate(AggregatorFactory parent, AggregatorFactory[] factories, List reducerFactories) { } public void setMetaData(Map metaData) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 1130639a1a2..5f40ab2906e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -22,18 +22,24 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import com.google.common.base.Function; import com.google.common.collect.Lists; - +import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; -import org.elasticsearch.search.aggregations.reducers.*; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -43,7 +49,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; public class DerivativeReducer extends Reducer { @@ -143,5 +148,23 @@ public class DerivativeReducer extends Reducer { return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); } + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new ElasticsearchIllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + if (!(parent instanceof HistogramAggregator.Factory)) { + throw new ElasticsearchIllegalStateException("derivative reducer [" + name + + "] must have a histogram or date_histogram as parent"); + } else { + HistogramAggregator.Factory histoParent = (HistogramAggregator.Factory) parent; + if (histoParent.minDocCount() != 0) { + throw new ElasticsearchIllegalStateException("parent histogram of derivative reducer [" + name + + "] must have min_doc_count of 0"); + } + } + } + } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index ad1c131c885..ede94abd973 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -109,7 +109,7 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -152,7 +152,7 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("sum")).subAggregation(sum("sum").field("value"))) .execute().actionGet(); @@ -222,7 +222,7 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -278,7 +278,7 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx_unmapped") .addAggregation( - dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -294,7 +294,7 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx", "idx_unmapped") .addAggregation( - dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH) + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index a5c9506aeac..6f5641fcffa 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -34,7 +34,6 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -165,7 +164,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count")) .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); @@ -202,7 +201,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); @@ -248,7 +247,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx_unmapped") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -264,7 +263,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx", "idx_unmapped") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); From 53de93a89be2143465b2bf7e3304a8c05caf755e Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 5 Mar 2015 14:21:16 +0000 Subject: [PATCH 037/236] Aggregations: Added Factory for all MultiBucketAggregations to implement This allows things like reducers to add aggregations to buckets without needing to know how to construct the aggregation or bucket itself. --- .../InternalMultiBucketAggregation.java | 34 +++++++++++++- .../bucket/filters/InternalFilters.java | 13 +++++- .../bucket/geogrid/InternalGeoHashGrid.java | 14 +++++- .../histogram/InternalDateHistogram.java | 5 +++ .../bucket/histogram/InternalHistogram.java | 25 +++++++++-- .../bucket/range/InternalRange.java | 44 ++++++++++++++++--- .../bucket/range/date/InternalDateRange.java | 18 ++++++-- .../geodistance/InternalGeoDistance.java | 18 ++++++-- .../bucket/range/ipv4/InternalIPv4Range.java | 17 +++++-- .../significant/InternalSignificantTerms.java | 17 ++++--- .../significant/SignificantLongTerms.java | 30 +++++++++---- .../significant/SignificantStringTerms.java | 27 ++++++++---- .../significant/UnmappedSignificantTerms.java | 23 +++++++--- .../bucket/terms/DoubleTerms.java | 36 ++++++++++----- .../bucket/terms/InternalTerms.java | 18 ++++---- .../aggregations/bucket/terms/LongTerms.java | 25 ++++++++--- .../bucket/terms/StringTerms.java | 27 ++++++++---- .../bucket/terms/UnmappedTerms.java | 24 +++++++--- .../derivative/DerivativeReducer.java | 4 +- 19 files changed, 325 insertions(+), 94 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index e7377414eda..856b96979f2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -25,7 +25,8 @@ import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.List; import java.util.Map; -public abstract class InternalMultiBucketAggregation extends InternalAggregation implements MultiBucketsAggregation { +public abstract class InternalMultiBucketAggregation + extends InternalAggregation implements MultiBucketsAggregation { public InternalMultiBucketAggregation() { } @@ -34,6 +35,28 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation super(name, reducers, metaData); } + /** + * Create a new copy of this {@link Aggregation} with the same settings as + * this {@link Aggregation} and contains the provided buckets. + * + * @param buckets + * the buckets to use in the new {@link Aggregation} + * @return the new {@link Aggregation} + */ + public abstract A create(List buckets); + + /** + * Create a new {@link InternalBucket} using the provided prototype bucket + * and aggregations. + * + * @param aggregations + * the aggregations for the new bucket + * @param prototype + * the bucket to use as a prototype + * @return the new bucket + */ + public abstract B createBucket(InternalAggregations aggregations, B prototype); + @Override public Object getProperty(List path) { if (path.isEmpty()) { @@ -75,4 +98,13 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation return aggregation.getProperty(path.subList(1, path.size())); } } + + public static abstract class Factory { + + public abstract String type(); + + public abstract A create(List buckets, A prototype); + + public abstract B createBucket(InternalAggregations aggregations, B prototype); + } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java index 1e4c882ef5f..0383164ba86 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation.InternalBucket; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.reducers.Reducer; @@ -42,7 +43,7 @@ import java.util.Map; /** * */ -public class InternalFilters extends InternalMultiBucketAggregation implements Filters { +public class InternalFilters extends InternalMultiBucketAggregation implements Filters { public final static Type TYPE = new Type("filters"); @@ -175,6 +176,16 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F return TYPE; } + @Override + public InternalFilters create(List buckets) { + return new InternalFilters(this.name, buckets, this.keyed, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.keyed); + } + @Override public List getBuckets() { return buckets; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 83428f8c209..6bbf1e2dc7f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -46,7 +46,8 @@ import java.util.Map; * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. */ -public class InternalGeoHashGrid extends InternalMultiBucketAggregation implements GeoHashGrid { +public class InternalGeoHashGrid extends InternalMultiBucketAggregation implements + GeoHashGrid { public static final Type TYPE = new Type("geohash_grid", "ghcells"); @@ -163,7 +164,6 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen return builder; } } - private int requiredSize; private Collection buckets; protected Map bucketMap; @@ -183,6 +183,16 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen return TYPE; } + @Override + public InternalGeoHashGrid create(List buckets) { + return new InternalGeoHashGrid(this.name, this.requiredSize, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.geohashAsLong, prototype.docCount, aggregations); + } + @Override public List getBuckets() { Object o = buckets; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 503d3626b2f..a82a089066b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -70,6 +70,11 @@ public class InternalDateHistogram { return TYPE.name(); } + @Override + public InternalDateHistogram.Bucket createBucket(InternalAggregations aggregations, InternalDateHistogram.Bucket prototype) { + return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.getKeyed(), prototype.formatter, this); + } + @Override public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index ad17e3796fe..8c5b219379c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -52,7 +52,8 @@ import java.util.Map; /** * TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})? */ -public class InternalHistogram extends InternalMultiBucketAggregation implements Histogram { +public class InternalHistogram extends InternalMultiBucketAggregation implements + Histogram { final static Type TYPE = new Type("histogram", "histo"); @@ -233,7 +234,7 @@ public class InternalHistogram extends Inter } - public static class Factory { + public static class Factory extends InternalMultiBucketAggregation.Factory, B> { protected Factory() { } @@ -248,11 +249,17 @@ public class InternalHistogram extends Inter return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } - public InternalHistogram create(String name, List buckets, InternalHistogram prototype) { - return new InternalHistogram<>(name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo, + @Override + public InternalHistogram create(List buckets, InternalHistogram prototype) { + return new InternalHistogram<>(prototype.name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo, prototype.formatter, prototype.keyed, this, prototype.reducers(), prototype.metaData); } + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return (B) new Bucket(prototype.key, prototype.docCount, prototype.getKeyed(), prototype.formatter, this, aggregations); + } + public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { if (key instanceof Number) { @@ -306,6 +313,16 @@ public class InternalHistogram extends Inter return factory; } + @Override + public InternalHistogram create(List buckets) { + return getFactory().create(buckets, this); + } + + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return getFactory().createBucket(aggregations, prototype); + } + private static class IteratorAndCurrent { private final Iterator iterator; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 00ff8b08030..a3602060fd2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -43,7 +43,8 @@ import java.util.Map; /** * */ -public class InternalRange extends InternalMultiBucketAggregation implements Range { +public class InternalRange> extends InternalMultiBucketAggregation + implements Range { static final Factory FACTORY = new Factory(); @@ -124,6 +125,14 @@ public class InternalRange extends InternalMulti return to; } + public boolean getKeyed() { + return keyed; + } + + public ValueFormatter getFormatter() { + return formatter; + } + @Override public String getFromAsString() { if (Double.isInfinite(from)) { @@ -216,7 +225,7 @@ public class InternalRange extends InternalMulti } } - public static class Factory> { + public static class Factory> extends InternalMultiBucketAggregation.Factory { public String type() { return TYPE.name(); @@ -231,12 +240,25 @@ public class InternalRange extends InternalMulti public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { return (B) new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + @Override + public R create(List ranges, R prototype) { + return (R) new InternalRange<>(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); + } + + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return (B) new Bucket(prototype.getKey(), prototype.from, prototype.to, prototype.getDocCount(), aggregations, prototype.keyed, + prototype.formatter); + } } private List ranges; private Map rangeMap; - private @Nullable ValueFormatter formatter; - private boolean keyed; + @Nullable + protected ValueFormatter formatter; + protected boolean keyed; public InternalRange() {} // for serialization @@ -258,10 +280,20 @@ public class InternalRange extends InternalMulti return ranges; } - protected Factory getFactory() { + public Factory getFactory() { return FACTORY; } + @Override + public R create(List buckets) { + return getFactory().create(buckets, (R) this); + } + + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return getFactory().createBucket(aggregations, prototype); + } + @Override public InternalAggregation doReduce(ReduceContext reduceContext) { List aggregations = reduceContext.aggregations(); @@ -271,7 +303,7 @@ public class InternalRange extends InternalMulti rangeList[i] = new ArrayList(); } for (InternalAggregation aggregation : aggregations) { - InternalRange ranges = (InternalRange) aggregation; + InternalRange ranges = (InternalRange) aggregation; int i = 0; for (Bucket range : ranges.ranges) { rangeList[i++].add(range); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java index b679a6bc3d5..6444f53e527 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java @@ -38,7 +38,7 @@ import java.util.Map; /** * */ -public class InternalDateRange extends InternalRange { +public class InternalDateRange extends InternalRange { public final static Type TYPE = new Type("date_range", "drange"); @@ -113,7 +113,7 @@ public class InternalDateRange extends InternalRange { } } - private static class Factory extends InternalRange.Factory { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -126,10 +126,22 @@ public class InternalDateRange extends InternalRange { return new InternalDateRange(name, ranges, formatter, keyed, reducers, metaData); } + @Override + public InternalDateRange create(List ranges, InternalDateRange prototype) { + return new InternalDateRange(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); + } + @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormatter()); + } } InternalDateRange() {} // for serialization @@ -145,7 +157,7 @@ public class InternalDateRange extends InternalRange { } @Override - protected InternalRange.Factory getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java index 0fef2e2ba00..b271c3336e0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java @@ -36,7 +36,7 @@ import java.util.Map; /** * */ -public class InternalGeoDistance extends InternalRange { +public class InternalGeoDistance extends InternalRange { public static final Type TYPE = new Type("geo_distance", "gdist"); @@ -101,7 +101,7 @@ public class InternalGeoDistance extends InternalRange { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -114,10 +114,22 @@ public class InternalGeoDistance extends InternalRange ranges, InternalGeoDistance prototype) { + return new InternalGeoDistance(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); + } + @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormatter()); + } } InternalGeoDistance() {} // for serialization @@ -133,7 +145,7 @@ public class InternalGeoDistance extends InternalRange getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java index be2f8e52f8f..96668e67c69 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java @@ -36,7 +36,7 @@ import java.util.Map; /** * */ -public class InternalIPv4Range extends InternalRange { +public class InternalIPv4Range extends InternalRange { public static final long MAX_IP = 4294967296l; @@ -110,7 +110,7 @@ public class InternalIPv4Range extends InternalRange { } } - private static class Factory extends InternalRange.Factory { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -123,10 +123,21 @@ public class InternalIPv4Range extends InternalRange { return new InternalIPv4Range(name, ranges, keyed, reducers, metaData); } + @Override + public InternalIPv4Range create(List ranges, InternalIPv4Range prototype) { + return new InternalIPv4Range(prototype.name, ranges, prototype.keyed, prototype.reducers(), prototype.metaData); + } + @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed()); + } } public InternalIPv4Range() {} // for serialization @@ -142,7 +153,7 @@ public class InternalIPv4Range extends InternalRange { } @Override - protected InternalRange.Factory getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index a48fc850b90..a949c916c7d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -39,12 +39,13 @@ import java.util.Map; /** * */ -public abstract class InternalSignificantTerms extends InternalMultiBucketAggregation implements SignificantTerms, ToXContent, Streamable { +public abstract class InternalSignificantTerms extends + InternalMultiBucketAggregation implements SignificantTerms, ToXContent, Streamable { protected SignificanceHeuristic significanceHeuristic; protected int requiredSize; protected long minDocCount; - protected List buckets; + protected List buckets; protected Map bucketMap; protected long subsetSize; protected long supersetSize; @@ -124,7 +125,8 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, - SignificanceHeuristic significanceHeuristic, List buckets, List reducers, Map metaData) { + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, + Map metaData) { super(name, reducers, metaData); this.requiredSize = requiredSize; this.minDocCount = minDocCount; @@ -166,13 +168,13 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg // Compute the overall result set size and the corpus size using the // top-level Aggregations from each shard for (InternalAggregation aggregation : aggregations) { - InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; + InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; globalSubsetSize += terms.subsetSize; globalSupersetSize += terms.supersetSize; } Map> buckets = new HashMap<>(); for (InternalAggregation aggregation : aggregations) { - InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; + InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; for (Bucket bucket : terms.buckets) { List existingBuckets = buckets.get(bucket.getKey()); if (existingBuckets == null) { @@ -200,9 +202,10 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = (Bucket) ordered.pop(); } - return newAggregation(globalSubsetSize, globalSupersetSize, Arrays.asList(list)); + return create(globalSubsetSize, globalSupersetSize, Arrays.asList(list), this); } - abstract InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets); + protected abstract A create(long subsetSize, long supersetSize, List buckets, + InternalSignificantTerms prototype); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index 85ae983ef18..a450f9d0933 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -42,7 +42,7 @@ import java.util.Map; /** * */ -public class SignificantLongTerms extends InternalSignificantTerms { +public class SignificantLongTerms extends InternalSignificantTerms { public static final Type TYPE = new Type("significant_terms", "siglterms"); @@ -162,15 +162,13 @@ public class SignificantLongTerms extends InternalSignificantTerms { return builder; } } - private ValueFormatter formatter; SignificantLongTerms() { } // for serialization - public SignificantLongTerms(long subsetSize, long supersetSize, String name, @Nullable ValueFormatter formatter, - int requiredSize, - long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, + public SignificantLongTerms(long subsetSize, long supersetSize, String name, @Nullable ValueFormatter formatter, int requiredSize, + long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, List reducers, Map metaData) { super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); @@ -183,10 +181,24 @@ public class SignificantLongTerms extends InternalSignificantTerms { } @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, - List buckets) { - return new SignificantLongTerms(subsetSize, supersetSize, getName(), formatter, requiredSize, minDocCount, significanceHeuristic, - buckets, reducers(), getMetaData()); + public SignificantLongTerms create(List buckets) { + return new SignificantLongTerms(this.subsetSize, this.supersetSize, this.name, this.formatter, this.requiredSize, this.minDocCount, + this.significanceHeuristic, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, SignificantLongTerms.Bucket prototype) { + return new Bucket(prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize, prototype.term, + aggregations, prototype.formatter); + } + + @Override + protected SignificantLongTerms create(long subsetSize, long supersetSize, + List buckets, + InternalSignificantTerms prototype) { + return new SignificantLongTerms(subsetSize, supersetSize, prototype.getName(), ((SignificantLongTerms) prototype).formatter, + prototype.requiredSize, prototype.minDocCount, prototype.significanceHeuristic, buckets, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index d8fc74c9bc5..9fbaa6cc375 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -41,7 +41,7 @@ import java.util.Map; /** * */ -public class SignificantStringTerms extends InternalSignificantTerms { +public class SignificantStringTerms extends InternalSignificantTerms { public static final InternalAggregation.Type TYPE = new Type("significant_terms", "sigsterms"); @@ -160,9 +160,8 @@ public class SignificantStringTerms extends InternalSignificantTerms { SignificantStringTerms() {} // for serialization - public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, - long minDocCount, - SignificanceHeuristic significanceHeuristic, List buckets, List reducers, + public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, Map metaData) { super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); } @@ -173,10 +172,22 @@ public class SignificantStringTerms extends InternalSignificantTerms { } @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, - List buckets) { - return new SignificantStringTerms(subsetSize, supersetSize, getName(), requiredSize, minDocCount, significanceHeuristic, buckets, - reducers(), getMetaData()); + public SignificantStringTerms create(List buckets) { + return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.requiredSize, this.minDocCount, + this.significanceHeuristic, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, SignificantStringTerms.Bucket prototype) { + return new Bucket(prototype.termBytes, prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize, + aggregations); + } + + @Override + protected SignificantStringTerms create(long subsetSize, long supersetSize, List buckets, + InternalSignificantTerms prototype) { + return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.requiredSize, prototype.minDocCount, + prototype.significanceHeuristic, buckets, prototype.reducers(), prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java index 04099009272..2d0309c9da1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/UnmappedSignificantTerms.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; import org.elasticsearch.search.aggregations.reducers.Reducer; @@ -34,7 +35,7 @@ import java.util.Map; /** * */ -public class UnmappedSignificantTerms extends InternalSignificantTerms { +public class UnmappedSignificantTerms extends InternalSignificantTerms { public static final Type TYPE = new Type("significant_terms", "umsigterms"); @@ -67,6 +68,21 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { return TYPE; } + @Override + public UnmappedSignificantTerms create(List buckets) { + return new UnmappedSignificantTerms(this.name, this.requiredSize, this.minDocCount, this.reducers(), this.metaData); + } + + @Override + public InternalSignificantTerms.Bucket createBucket(InternalAggregations aggregations, InternalSignificantTerms.Bucket prototype) { + throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms"); + } + + @Override + protected UnmappedSignificantTerms create(long subsetSize, long supersetSize, List buckets, InternalSignificantTerms prototype) { + throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms"); + } + @Override public InternalAggregation doReduce(ReduceContext reduceContext) { for (InternalAggregation aggregation : reduceContext.aggregations()) { @@ -77,11 +93,6 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { return this; } - @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets) { - throw new UnsupportedOperationException("How did you get there?"); - } - @Override protected void doReadFrom(StreamInput in) throws IOException { this.requiredSize = readSize(in); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 0e6ca403407..dbb8061db09 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -40,7 +40,7 @@ import java.util.Map; /** * */ -public class DoubleTerms extends InternalTerms { +public class DoubleTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "dterms"); @@ -85,7 +85,8 @@ public class DoubleTerms extends InternalTerms { super(formatter, showDocCountError); } - public Bucket(double term, long docCount, InternalAggregations aggregations, boolean showDocCountError, long docCountError, @Nullable ValueFormatter formatter) { + public Bucket(double term, long docCount, InternalAggregations aggregations, boolean showDocCountError, long docCountError, + @Nullable ValueFormatter formatter) { super(docCount, aggregations, showDocCountError, docCountError, formatter); this.term = term; } @@ -153,13 +154,15 @@ public class DoubleTerms extends InternalTerms { } } - private @Nullable ValueFormatter formatter; + private @Nullable + ValueFormatter formatter; - DoubleTerms() {} // for serialization + DoubleTerms() { + } // for serialization public DoubleTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, - long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, - List reducers, Map metaData) { + long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, + long otherDocCount, List reducers, Map metaData) { super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, metaData); this.formatter = formatter; @@ -171,10 +174,23 @@ public class DoubleTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, - long docCountError, long otherDocCount, List reducers, Map metaData) { - return new DoubleTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, - otherDocCount, reducers, metaData); + public DoubleTerms create(List buckets) { + return new DoubleTerms(this.name, this.order, this.formatter, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.term, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError, + prototype.formatter); + } + + @Override + protected DoubleTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new DoubleTerms(name, prototype.order, ((DoubleTerms) prototype).formatter, prototype.requiredSize, prototype.shardSize, + prototype.minDocCount, buckets, prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 129698daffa..b753f322796 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -43,7 +43,8 @@ import java.util.Map; /** * */ -public abstract class InternalTerms extends InternalMultiBucketAggregation implements Terms, ToXContent, Streamable { +public abstract class InternalTerms extends InternalMultiBucketAggregation + implements Terms, ToXContent, Streamable { protected static final String DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = "doc_count_error_upper_bound"; protected static final String SUM_OF_OTHER_DOC_COUNTS = "sum_other_doc_count"; @@ -115,7 +116,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple protected int requiredSize; protected int shardSize; protected long minDocCount; - protected List buckets; + protected List buckets; protected Map bucketMap; protected long docCountError; protected boolean showTermDocCountError; @@ -123,8 +124,9 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple protected InternalTerms() {} // for serialization - protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, - boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, Map metaData) { + protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, + Map metaData) { super(name, reducers, metaData); this.order = order; this.requiredSize = requiredSize; @@ -171,7 +173,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple long sumDocCountError = 0; long otherDocCount = 0; for (InternalAggregation aggregation : aggregations) { - InternalTerms terms = (InternalTerms) aggregation; + InternalTerms terms = (InternalTerms) aggregation; otherDocCount += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError; if (terms.buckets.size() < this.shardSize || this.order == InternalOrder.TERM_ASC || this.order == InternalOrder.TERM_DESC) { @@ -224,10 +226,10 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple } else { docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; } - return newAggregation(name, Arrays.asList(list), showTermDocCountError, docCountError, otherDocCount, reducers(), getMetaData()); + return create(name, Arrays.asList(list), docCountError, otherDocCount, this); } - protected abstract InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, - long otherDocCount, List reducers, Map metaData); + protected abstract A create(String name, List buckets, long docCountError, long otherDocCount, + InternalTerms prototype); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index b8edad21dd9..eee9e6bfc4b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -39,7 +39,7 @@ import java.util.Map; /** * */ -public class LongTerms extends InternalTerms { +public class LongTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "lterms"); @@ -157,7 +157,7 @@ public class LongTerms extends InternalTerms { LongTerms() {} // for serialization public LongTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, - List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, Map metaData) { super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, metaData); @@ -170,10 +170,23 @@ public class LongTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, - long docCountError, long otherDocCount, List reducers, Map metaData) { - return new LongTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, - otherDocCount, reducers, metaData); + public LongTerms create(List buckets) { + return new LongTerms(this.name, this.order, this.formatter, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.term, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError, + prototype.formatter); + } + + @Override + protected LongTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new LongTerms(name, prototype.order, ((LongTerms) prototype).formatter, prototype.requiredSize, prototype.shardSize, + prototype.minDocCount, buckets, prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index ef9ec91e80c..ee458acdf13 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -38,7 +38,7 @@ import java.util.Map; /** * */ -public class StringTerms extends InternalTerms { +public class StringTerms extends InternalTerms { public static final InternalAggregation.Type TYPE = new Type("terms", "sterms"); @@ -74,7 +74,6 @@ public class StringTerms extends InternalTerms { BucketStreams.registerStream(BUCKET_STREAM, TYPE.stream()); } - public static class Bucket extends InternalTerms.Bucket { BytesRef termBytes; @@ -149,10 +148,11 @@ public class StringTerms extends InternalTerms { } } - StringTerms() {} // for serialization + StringTerms() { + } // for serialization public StringTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, - List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, Map metaData) { super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, metaData); @@ -164,10 +164,21 @@ public class StringTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, - long docCountError, long otherDocCount, List reducers, Map metaData) { - return new StringTerms(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, - otherDocCount, reducers, metaData); + public StringTerms create(List buckets) { + return new StringTerms(this.name, this.order, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.termBytes, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError); + } + + @Override + protected StringTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new StringTerms(name, prototype.order, prototype.requiredSize, prototype.shardSize, prototype.minDocCount, buckets, + prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 89134a394ec..14f07c57e83 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; @@ -33,7 +34,7 @@ import java.util.Map; /** * */ -public class UnmappedTerms extends InternalTerms { +public class UnmappedTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "umterms"); @@ -65,6 +66,21 @@ public class UnmappedTerms extends InternalTerms { return TYPE; } + @Override + public UnmappedTerms create(List buckets) { + return new UnmappedTerms(this.name, this.order, this.requiredSize, this.shardSize, this.minDocCount, this.reducers(), this.metaData); + } + + @Override + public InternalTerms.Bucket createBucket(InternalAggregations aggregations, InternalTerms.Bucket prototype) { + throw new UnsupportedOperationException("not supported for UnmappedTerms"); + } + + @Override + protected UnmappedTerms create(String name, List buckets, long docCountError, long otherDocCount, InternalTerms prototype) { + throw new UnsupportedOperationException("not supported for UnmappedTerms"); + } + @Override protected void doReadFrom(StreamInput in) throws IOException { this.docCountError = 0; @@ -92,12 +108,6 @@ public class UnmappedTerms extends InternalTerms { return this; } - @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, - long otherDocCount, List reducers, Map metaData) { - throw new UnsupportedOperationException("How did you get there?"); - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 5f40ab2906e..40a5b005560 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -95,7 +95,7 @@ public class DerivativeReducer extends Reducer { @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { - InternalHistogram histo = (InternalHistogram) aggregation; + InternalHistogram histo = (InternalHistogram) aggregation; List buckets = histo.getBuckets(); InternalHistogram.Factory factory = histo.getFactory(); @@ -116,7 +116,7 @@ public class DerivativeReducer extends Reducer { } lastBucketValue = thisBucketValue; } - return factory.create(histo.getName(), newBuckets, histo); + return factory.create(newBuckets, histo); } @Override From a824184bf2fddecb0ed4daa2c2deacbb66d33c30 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 8 Apr 2015 10:15:46 -0400 Subject: [PATCH 038/236] Aggregations: Add MovAvg Reducer Allows the user to calculate a Moving Average over a histogram of buckets. Provides four different moving averages: - Simple - Linear weighted - Single Exponentially weighted (aka EWMA) - Double Exponentially weighted (aka Holt-winters) Closes #10024 --- .../aggregations/AggregationModule.java | 5 +- .../TransportAggregationModule.java | 5 +- .../reducers/ReducerBuilders.java | 5 + .../reducers/movavg/MovAvgBuilder.java | 102 ++++ .../reducers/movavg/MovAvgParser.java | 142 +++++ .../reducers/movavg/MovAvgReducer.java | 182 +++++++ .../movavg/models/DoubleExpModel.java | 194 +++++++ .../reducers/movavg/models/LinearModel.java | 93 ++++ .../reducers/movavg/models/MovAvgModel.java | 49 ++ .../movavg/models/MovAvgModelBuilder.java | 33 ++ .../movavg/models/MovAvgModelModule.java | 55 ++ .../movavg/models/MovAvgModelParser.java | 34 ++ .../models/MovAvgModelParserMapper.java | 54 ++ .../movavg/models/MovAvgModelStreams.java | 74 +++ .../reducers/movavg/models/SimpleModel.java | 86 +++ .../movavg/models/SingleExpModel.java | 133 +++++ .../models/TransportMovAvgModelModule.java | 51 ++ .../aggregations/reducers/MovAvgTests.java | 500 ++++++++++++++++++ 18 files changed, 1795 insertions(+), 2 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index d1cb6d96800..a8d3895ec78 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -57,6 +57,8 @@ import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelModule; import java.util.List; @@ -101,6 +103,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ aggParsers.add(ChildrenParser.class); reducerParsers.add(DerivativeParser.class); + reducerParsers.add(MovAvgParser.class); } /** @@ -129,7 +132,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ @Override public Iterable spawnModules() { - return ImmutableList.of(new SignificantTermsHeuristicModule()); + return ImmutableList.of(new SignificantTermsHeuristicModule(), new MovAvgModelModule()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index fe4542830cc..c3d89cf4f8f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -59,6 +59,8 @@ import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer; +import org.elasticsearch.search.aggregations.reducers.movavg.models.TransportMovAvgModelModule; /** * A module that registers all the transport streams for the addAggregation @@ -108,10 +110,11 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM // Reducers DerivativeReducer.registerStreams(); + MovAvgReducer.registerStreams(); } @Override public Iterable spawnModules() { - return ImmutableList.of(new TransportSignificantTermsHeuristicModule()); + return ImmutableList.of(new TransportSignificantTermsHeuristicModule(), new TransportMovAvgModelModule()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java index 21c901af80d..0aa8be4e992 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgBuilder; public final class ReducerBuilders { @@ -29,4 +30,8 @@ public final class ReducerBuilders { public static final DerivativeBuilder derivative(String name) { return new DerivativeBuilder(name); } + + public static final MovAvgBuilder smooth(String name) { + return new MovAvgBuilder(name); + } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java new file mode 100644 index 00000000000..9790604197d --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelBuilder; + +import java.io.IOException; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + +/** + * A builder to create MovingAvg reducer aggregations + */ +public class MovAvgBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + private MovAvgModelBuilder modelBuilder; + private Integer window; + + public MovAvgBuilder(String name) { + super(name, MovAvgReducer.TYPE.name()); + } + + public MovAvgBuilder format(String format) { + this.format = format; + return this; + } + + /** + * Defines what should be done when a gap in the series is discovered + * + * @param gapPolicy A GapPolicy enum defining the selected policy + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + /** + * Sets a MovAvgModelBuilder for the Moving Average. The model builder is used to + * define what type of moving average you want to use on the series + * + * @param modelBuilder A MovAvgModelBuilder which has been prepopulated with settings + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder modelBuilder(MovAvgModelBuilder modelBuilder) { + this.modelBuilder = modelBuilder; + return this; + } + + /** + * Sets the window size for the moving average. This window will "slide" across the + * series, and the values inside that window will be used to calculate the moving avg value + * + * @param window Size of window + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder window(int window) { + this.window = window; + return this; + } + + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MovAvgParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(MovAvgParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + if (modelBuilder != null) { + modelBuilder.toXContent(builder, params); + } + if (window != null) { + builder.field(MovAvgParser.WINDOW.getPreferredName(), window); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java new file mode 100644 index 00000000000..3f241a67b3a --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelParser; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelParserMapper; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + +public class MovAvgParser implements Reducer.Parser { + + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + public static final ParseField MODEL = new ParseField("model"); + public static final ParseField WINDOW = new ParseField("window"); + public static final ParseField SETTINGS = new ParseField("settings"); + + private final MovAvgModelParserMapper movAvgModelParserMapper; + + @Inject + public MovAvgParser(MovAvgModelParserMapper movAvgModelParserMapper) { + this.movAvgModelParserMapper = movAvgModelParserMapper; + } + + @Override + public String type() { + return MovAvgReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.IGNORE; + int window = 5; + Map settings = null; + String model = "simple"; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (WINDOW.match(currentFieldName)) { + window = parser.intValue(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text()); + } else if (MODEL.match(currentFieldName)) { + model = parser.text(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (SETTINGS.match(currentFieldName)) { + settings = parser.map(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "]."); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for smooth aggregation [" + reducerName + "]"); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + MovAvgModelParser modelParser = movAvgModelParserMapper.get(model); + if (modelParser == null) { + throw new SearchParseException(context, "Unknown model [" + model + + "] specified. Valid options are:" + movAvgModelParserMapper.getAllNames().toString()); + } + MovAvgModel movAvgModel = modelParser.parse(settings); + + + return new MovAvgReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, window, movAvgModel); + } + + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java new file mode 100644 index 00000000000..b339cdf487d --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import com.google.common.base.Function; +import com.google.common.collect.EvictingQueue; +import com.google.common.collect.Lists; +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.reducers.*; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelStreams; +import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; + +public class MovAvgReducer extends Reducer { + + public final static Type TYPE = new Type("moving_avg"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MovAvgReducer readResult(StreamInput in) throws IOException { + MovAvgReducer result = new MovAvgReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private static final Function FUNCTION = new Function() { + @Override + public InternalAggregation apply(Aggregation input) { + return (InternalAggregation) input; + } + }; + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + private int window; + private MovAvgModel model; + + public MovAvgReducer() { + } + + public MovAvgReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + int window, MovAvgModel model, Map metadata) { + super(name, bucketsPaths, metadata); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.window = window; + this.model = model; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalHistogram histo = (InternalHistogram) aggregation; + List buckets = histo.getBuckets(); + InternalHistogram.Factory factory = histo.getFactory(); + + List newBuckets = new ArrayList<>(); + EvictingQueue values = EvictingQueue.create(this.window); + + for (InternalHistogram.Bucket bucket : buckets) { + Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); + if (thisBucketValue != null) { + values.offer(thisBucketValue); + + // TODO handle "edge policy" + double movavg = model.next(values); + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); + aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( + aggs), bucket.getKeyed(), bucket.getFormatter()); + newBuckets.add(newBucket); + } else { + newBuckets.add(bucket); + } + } + //return factory.create(histo.getName(), newBuckets, histo); + return factory.create(newBuckets, histo); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + window = in.readVInt(); + model = MovAvgModelStreams.read(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + out.writeVInt(window); + model.writeTo(out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private GapPolicy gapPolicy; + private int window; + private MovAvgModel model; + + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + int window, MovAvgModel model) { + super(name, TYPE.name(), bucketsPaths); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.window = window; + this.model = model; + } + + @Override + protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + Map metaData) throws IOException { + return new MovAvgReducer(name, bucketsPaths, formatter, gapPolicy, window, model, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new ElasticsearchIllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + if (!(parent instanceof HistogramAggregator.Factory)) { + throw new ElasticsearchIllegalStateException("derivative reducer [" + name + + "] must have a histogram or date_histogram as parent"); + } else { + HistogramAggregator.Factory histoParent = (HistogramAggregator.Factory) parent; + if (histoParent.minDocCount() != 0) { + throw new ElasticsearchIllegalStateException("parent histogram of derivative reducer [" + name + + "] must have min_doc_count of 0"); + } + } + } + + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java new file mode 100644 index 00000000000..907c23fd213 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.*; + +/** + * Calculate a doubly exponential weighted moving average + */ +public class DoubleExpModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("double_exp"); + + /** + * Controls smoothing of data. Alpha = 1 retains no memory of past values + * (e.g. random walk), while alpha = 0 retains infinite memory of past values (e.g. + * mean of the series). Useful values are somewhere in between + */ + private double alpha; + + /** + * Equivalent to alpha, but controls the smoothing of the trend instead of the data + */ + private double beta; + + public DoubleExpModel(double alpha, double beta) { + this.alpha = alpha; + this.beta = beta; + } + + + @Override + public double next(Collection values) { + return next(values, 1).get(0); + } + + /** + * Calculate a doubly exponential weighted moving average + * + * @param values Collection of values to calculate avg for + * @param numForecasts number of forecasts into the future to return + * + * @param Type T extending Number + * @return Returns a Double containing the moving avg for the window + */ + public List next(Collection values, int numForecasts) { + // Smoothed value + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + int counter = 0; + + //TODO bail if too few values + + T last; + for (T v : values) { + last = v; + if (counter == 1) { + s = v.doubleValue(); + b = v.doubleValue() - last.doubleValue(); + } else { + s = alpha * v.doubleValue() + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + List forecastValues = new ArrayList<>(numForecasts); + for (int i = 0; i < numForecasts; i++) { + forecastValues.add(s + (i * b)); + } + + return forecastValues; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new DoubleExpModel(in.readDouble(), in.readDouble()); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + out.writeDouble(alpha); + out.writeDouble(beta); + } + + public static class DoubleExpModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + + Double alpha; + Double beta; + + if (settings == null || (alpha = (Double)settings.get("alpha")) == null) { + alpha = 0.5; + } + + if (settings == null || (beta = (Double)settings.get("beta")) == null) { + beta = 0.5; + } + + return new DoubleExpModel(alpha, beta); + } + } + + public static class DoubleExpModelBuilder implements MovAvgModelBuilder { + + private double alpha = 0.5; + private double beta = 0.5; + + /** + * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values + * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. + * the series mean). Useful values are somewhere in between. Defaults to 0.5. + * + * @param alpha A double between 0-1 inclusive, controls data smoothing + * + * @return The builder to continue chaining + */ + public DoubleExpModelBuilder alpha(double alpha) { + this.alpha = alpha; + return this; + } + + /** + * Equivalent to alpha, but controls the smoothing of the trend instead of the data + * + * @param beta a double between 0-1 inclusive, controls trend smoothing + * + * @return The builder to continue chaining + */ + public DoubleExpModelBuilder beta(double beta) { + this.beta = beta; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + builder.startObject(MovAvgParser.SETTINGS.getPreferredName()); + builder.field("alpha", alpha); + builder.field("beta", beta); + builder.endObject(); + return builder; + } + } +} + diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java new file mode 100644 index 00000000000..6c269590d33 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a linearly weighted moving average, such that older values are + * linearly less important. "Time" is determined by position in collection + */ +public class LinearModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("linear"); + + @Override + public double next(Collection values) { + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (T v : values) { + avg += v.doubleValue() * current; + totalWeight += current; + current += 1; + } + return avg / totalWeight; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new LinearModel(); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + } + + public static class LinearModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + return new LinearModel(); + } + } + + public static class LinearModelBuilder implements MovAvgModelBuilder { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + return builder; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java new file mode 100644 index 00000000000..84f7832f893 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.*; + +public abstract class MovAvgModel { + + /** + * Returns the next value in the series, according to the underlying smoothing model + * + * @param values Collection of numerics to smooth, usually windowed + * @param Type of numeric + * @return Returns a double, since most smoothing methods operate on floating points + */ + public abstract double next(Collection values); + + /** + * Write the model to the output stream + * + * @param out Output stream + * @throws IOException + */ + public abstract void writeTo(StreamOutput out) throws IOException; +} + + + + diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java new file mode 100644 index 00000000000..96bc9427de3 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Represents the common interface that all moving average models share. Moving + * average models are used by the MovAvg reducer + */ +public interface MovAvgModelBuilder extends ToXContent { + public abstract XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java new file mode 100644 index 00000000000..71ccbcb31b0 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.Lists; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.inject.multibindings.Multibinder; + +import java.util.List; + +/** + * Register the various model parsers + */ +public class MovAvgModelModule extends AbstractModule { + + private List> parsers = Lists.newArrayList(); + + public MovAvgModelModule() { + registerParser(SimpleModel.SimpleModelParser.class); + registerParser(LinearModel.LinearModelParser.class); + registerParser(SingleExpModel.SingleExpModelParser.class); + registerParser(DoubleExpModel.DoubleExpModelParser.class); + } + + public void registerParser(Class parser) { + parsers.add(parser); + } + + @Override + protected void configure() { + Multibinder parserMapBinder = Multibinder.newSetBinder(binder(), MovAvgModelParser.class); + for (Class clazz : parsers) { + parserMapBinder.addBinding().to(clazz); + } + bind(MovAvgModelParserMapper.class); + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java new file mode 100644 index 00000000000..d27e447baa4 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + + +import org.elasticsearch.common.Nullable; + +import java.util.Map; + +/** + * Common interface for parsers used by the various Moving Average models + */ +public interface MovAvgModelParser { + public MovAvgModel parse(@Nullable Map settings); + + public String getName(); +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java new file mode 100644 index 00000000000..459729d8960 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.inject.Inject; + +import java.util.Set; + +/** + * Contains a map of all concrete model parsers which can be used to build Models + */ +public class MovAvgModelParserMapper { + + protected ImmutableMap movAvgParsers; + + @Inject + public MovAvgModelParserMapper(Set parsers) { + MapBuilder builder = MapBuilder.newMapBuilder(); + for (MovAvgModelParser parser : parsers) { + builder.put(parser.getName(), parser); + } + movAvgParsers = builder.immutableMap(); + } + + public @Nullable + MovAvgModelParser get(String parserName) { + return movAvgParsers.get(parserName); + } + + public ImmutableSet getAllNames() { + return movAvgParsers.keySet(); + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java new file mode 100644 index 00000000000..b11a3687021 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A registry for all moving average models. This is needed for reading them from a stream without knowing which + * one it is. + */ +public class MovAvgModelStreams { + + private static ImmutableMap STREAMS = ImmutableMap.of(); + + public static MovAvgModel read(StreamInput in) throws IOException { + return stream(in.readString()).readResult(in); + } + + /** + * A stream that knows how to read an heuristic from the input. + */ + public static interface Stream { + + MovAvgModel readResult(StreamInput in) throws IOException; + + String getName(); + } + + /** + * Registers the given stream and associate it with the given types. + * + * @param stream The stream to register + * @param names The names associated with the streams + */ + public static synchronized void registerStream(Stream stream, String... names) { + MapBuilder uStreams = MapBuilder.newMapBuilder(STREAMS); + for (String name : names) { + uStreams.put(name, stream); + } + STREAMS = uStreams.immutableMap(); + } + + /** + * Returns the stream that is registered for the given name + * + * @param name The given name + * @return The associated stream + */ + public static Stream stream(String name) { + return STREAMS.get(name); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java new file mode 100644 index 00000000000..243b022af2c --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a simple unweighted (arithmetic) moving average + */ +public class SimpleModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("simple"); + + @Override + public double next(Collection values) { + double avg = 0; + for (T v : values) { + avg += v.doubleValue(); + } + return avg / values.size(); + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new SimpleModel(); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + } + + public static class SimpleModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + return new SimpleModel(); + } + } + + public static class SimpleModelBuilder implements MovAvgModelBuilder { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + return builder; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java new file mode 100644 index 00000000000..f17ba68f498 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a exponentially weighted moving average + */ +public class SingleExpModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("single_exp"); + + /** + * Controls smoothing of data. Alpha = 1 retains no memory of past values + * (e.g. random walk), while alpha = 0 retains infinite memory of past values (e.g. + * mean of the series). Useful values are somewhere in between + */ + private double alpha; + + public SingleExpModel(double alpha) { + this.alpha = alpha; + } + + + @Override + public double next(Collection values) { + double avg = 0; + boolean first = true; + + for (T v : values) { + if (first) { + avg = v.doubleValue(); + first = false; + } else { + avg = (v.doubleValue() * alpha) + (avg * (1 - alpha)); + } + } + return avg; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new SingleExpModel(in.readDouble()); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + out.writeDouble(alpha); + } + + public static class SingleExpModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + + Double alpha; + if (settings == null || (alpha = (Double)settings.get("alpha")) == null) { + alpha = 0.5; + } + + return new SingleExpModel(alpha); + } + } + + public static class SingleExpModelBuilder implements MovAvgModelBuilder { + + private double alpha = 0.5; + + /** + * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values + * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. + * the series mean). Useful values are somewhere in between. Defaults to 0.5. + * + * @param alpha A double between 0-1 inclusive, controls data smoothing + * + * @return The builder to continue chaining + */ + public SingleExpModelBuilder alpha(double alpha) { + this.alpha = alpha; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + builder.startObject(MovAvgParser.SETTINGS.getPreferredName()); + builder.field("alpha", alpha); + builder.endObject(); + return builder; + } + } +} + diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java new file mode 100644 index 00000000000..bc085f6241a --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.Lists; +import org.elasticsearch.common.inject.AbstractModule; + +import java.util.List; + +/** + * Register the transport streams so that models can be serialized/deserialized from the stream + */ +public class TransportMovAvgModelModule extends AbstractModule { + + private List streams = Lists.newArrayList(); + + public TransportMovAvgModelModule() { + registerStream(SimpleModel.STREAM); + registerStream(LinearModel.STREAM); + registerStream(SingleExpModel.STREAM); + registerStream(DoubleExpModel.STREAM); + } + + public void registerStream(MovAvgModelStreams.Stream stream) { + streams.add(stream); + } + + @Override + protected void configure() { + for (MovAvgModelStreams.Stream stream : streams) { + MovAvgModelStreams.registerStream(stream, stream.getName()); + } + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java new file mode 100644 index 00000000000..d22656b0ad5 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java @@ -0,0 +1,500 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + + +import com.google.common.collect.EvictingQueue; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.smooth; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MovAvgTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + private static final String SINGLE_VALUED_VALUE_FIELD_NAME = "v_value"; + + static int interval; + static int numValueBuckets; + static int numFilledValueBuckets; + static int windowSize; + static BucketHelpers.GapPolicy gapPolicy; + + static long[] docCounts; + static long[] valueCounts; + static Double[] simpleMovAvgCounts; + static Double[] linearMovAvgCounts; + static Double[] singleExpMovAvgCounts; + static Double[] doubleExpMovAvgCounts; + + static Double[] simpleMovAvgValueCounts; + static Double[] linearMovAvgValueCounts; + static Double[] singleExpMovAvgValueCounts; + static Double[] doubleExpMovAvgValueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + interval = 5; + numValueBuckets = randomIntBetween(6, 80); + numFilledValueBuckets = numValueBuckets; + windowSize = randomIntBetween(3,10); + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; + + docCounts = new long[numValueBuckets]; + valueCounts = new long[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + docCounts[i] = randomIntBetween(0, 20); + valueCounts[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket + } + + this.setupSimple(); + this.setupLinear(); + this.setupSingle(); + this.setupDouble(); + + + List builders = new ArrayList<>(); + for (int i = 0; i < numValueBuckets; i++) { + for (int docs = 0; docs < docCounts[i]; docs++) { + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i * interval) + .field(SINGLE_VALUED_VALUE_FIELD_NAME, 1).endObject())); + } + } + + indexRandom(true, builders); + ensureSearchable(); + } + + private void setupSimple() { + simpleMovAvgCounts = new Double[numValueBuckets]; + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + window.offer(thisValue); + + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + + simpleMovAvgCounts[i] = movAvg; + } + + window.clear(); + simpleMovAvgValueCounts = new Double[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + + simpleMovAvgValueCounts[i] = movAvg; + + } + + } + + private void setupLinear() { + EvictingQueue window = EvictingQueue.create(windowSize); + linearMovAvgCounts = new Double[numValueBuckets]; + window.clear(); + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + linearMovAvgCounts[i] = avg / totalWeight; + } + + window.clear(); + linearMovAvgValueCounts = new Double[numValueBuckets]; + + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + window.offer(thisValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + linearMovAvgValueCounts[i] = avg / totalWeight; + } + } + + private void setupSingle() { + EvictingQueue window = EvictingQueue.create(windowSize); + singleExpMovAvgCounts = new Double[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double avg = 0; + double alpha = 0.5; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + singleExpMovAvgCounts[i] = avg ; + } + + singleExpMovAvgValueCounts = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double avg = 0; + double alpha = 0.5; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + singleExpMovAvgCounts[i] = avg ; + } + + } + + private void setupDouble() { + EvictingQueue window = EvictingQueue.create(windowSize); + doubleExpMovAvgCounts = new Double[numValueBuckets]; + + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + double alpha = 0.5; + double beta = 0.5; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + doubleExpMovAvgCounts[i] = s + (0 * b) ; + } + + doubleExpMovAvgValueCounts = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + double alpha = 0.5; + double beta = 0.5; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + doubleExpMovAvgValueCounts[i] = s + (0 * b) ; + } + } + + /** + * test simple moving average on single value field + */ + @Test + public void simpleSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(smooth("smooth") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(smooth("movavg_values") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + } + } + + /** + * test linear moving average on single value field + */ + @Test + public void linearSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(smooth("smooth") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(smooth("movavg_values") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(linearMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(linearMovAvgCounts[i])); + } + } + + /** + * test single exponential moving average on single value field + */ + @Test + public void singleExpSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(smooth("smooth") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(smooth("movavg_values") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + } + } + + /** + * test double exponential moving average on single value field + */ + @Test + public void doubleExpSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(smooth("smooth") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(smooth("movavg_values") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + } + } + + + private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, + long expectedDocCount) { + if (expectedDocCount == -1) { + expectedDocCount = 0; + } + assertThat(msg, bucket, notNullValue()); + assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); + assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); + } + +} From e19d20b407ce3a652c6a63d0bf335431b1fe0fde Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 17 Mar 2015 16:36:37 -0700 Subject: [PATCH 039/236] max bucket reducer and sibling reducer framework --- .../percolate/PercolateRequestBuilder.java | 6 +- .../percolate/PercolateShardResponse.java | 31 ++++ .../percolate/PercolateSourceBuilder.java | 5 +- .../action/search/SearchRequestBuilder.java | 1 + .../percolator/PercolatorService.java | 55 +++++-- .../aggregations/AggregationModule.java | 2 + .../search/aggregations/AggregationPhase.java | 22 ++- .../aggregations/AggregatorFactories.java | 18 +- .../TransportAggregationModule.java | 6 +- .../aggregations/reducers/BucketHelpers.java | 14 +- .../search/aggregations/reducers/Reducer.java | 10 ++ .../aggregations/reducers/ReducerBuilder.java | 15 +- .../reducers/ReducerBuilders.java | 5 + .../aggregations/reducers/ReducerFactory.java | 8 +- .../aggregations/reducers/SiblingReducer.java | 65 ++++++++ .../InternalBucketMetricValue.java | 132 +++++++++++++++ .../bucketmetrics/MaxBucketBuilder.java | 48 ++++++ .../bucketmetrics/MaxBucketParser.java | 92 +++++++++++ .../bucketmetrics/MaxBucketReducer.java | 144 ++++++++++++++++ .../derivative/DerivativeReducer.java | 16 +- .../reducers/movavg/MovAvgReducer.java | 17 +- .../search/builder/SearchSourceBuilder.java | 155 +++++++++++------- .../controller/SearchPhaseController.java | 33 +++- .../search/query/QuerySearchResult.java | 36 +++- .../PercolatorFacetsAndAggregationsTests.java | 79 +++++++++ .../aggregations/reducers/MaxBucketTests.java | 123 ++++++++++++++ 26 files changed, 1010 insertions(+), 128 deletions(-) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java index e1309a5c095..732e08ac36b 100644 --- a/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -162,9 +164,9 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder reducers; PercolateShardResponse() { hls = new ArrayList<>(); @@ -69,6 +75,7 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { if (result.aggregations() != null) { this.aggregations = (InternalAggregations) result.aggregations(); } + this.reducers = result.reducers(); } } @@ -112,6 +119,10 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { return aggregations; } + public List reducers() { + return reducers; + } + public byte percolatorTypeId() { return percolatorTypeId; } @@ -144,6 +155,16 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { hls.add(fields); } aggregations = InternalAggregations.readOptionalAggregations(in); + if (in.readBoolean()) { + int reducersSize = in.readVInt(); + List reducers = new ArrayList<>(reducersSize); + for (int i = 0; i < reducersSize; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add((SiblingReducer) reducer); + } + this.reducers = reducers; + } } @Override @@ -169,5 +190,15 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { } } out.writeOptionalStreamable(aggregations); + if (reducers == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } + } } } diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java index f09e630f459..68fc57b2a17 100644 --- a/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java +++ b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -50,7 +51,7 @@ public class PercolateSourceBuilder implements ToXContent { private List sorts; private Boolean trackScores; private HighlightBuilder highlightBuilder; - private List aggregations; + private List aggregations; /** * Sets the document to run the percolate queries against. @@ -130,7 +131,7 @@ public class PercolateSourceBuilder implements ToXContent { /** * Add an aggregation definition. */ - public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) { + public PercolateSourceBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) { if (aggregations == null) { aggregations = Lists.newArrayList(); } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 59d6db804b0..fcead5866b7 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index f19b3b076e7..cd5db78226d 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -19,11 +19,20 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.ByteObjectOpenHashMap; +import com.google.common.collect.Lists; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; @@ -58,20 +67,30 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.percolator.QueryCollector.*; +import org.elasticsearch.percolator.QueryCollector.Count; +import org.elasticsearch.percolator.QueryCollector.Match; +import org.elasticsearch.percolator.QueryCollector.MatchAndScore; +import org.elasticsearch.percolator.QueryCollector.MatchAndSort; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationPhase; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.internal.SearchContext; @@ -83,7 +102,9 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.percolator.QueryCollector.*; +import static org.elasticsearch.percolator.QueryCollector.count; +import static org.elasticsearch.percolator.QueryCollector.match; +import static org.elasticsearch.percolator.QueryCollector.matchAndScore; public class PercolatorService extends AbstractComponent { @@ -826,15 +847,29 @@ public class PercolatorService extends AbstractComponent { return null; } + InternalAggregations aggregations; if (shardResults.size() == 1) { - return shardResults.get(0).aggregations(); + aggregations = shardResults.get(0).aggregations(); + } else { + List aggregationsList = new ArrayList<>(shardResults.size()); + for (PercolateShardResponse shardResult : shardResults) { + aggregationsList.add(shardResult.aggregations()); + } + aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(null, bigArrays, scriptService)); } - - List aggregationsList = new ArrayList<>(shardResults.size()); - for (PercolateShardResponse shardResult : shardResults) { - aggregationsList.add(shardResult.aggregations()); + if (aggregations != null) { + List reducers = shardResults.get(0).reducers(); + if (reducers != null) { + List newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION)); + for (SiblingReducer reducer : reducers) { + InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(null, bigArrays, + scriptService)); + newAggs.add(newAgg); + } + aggregations = new InternalAggregations(newAggs); + } } - return InternalAggregations.reduce(aggregationsList, new ReduceContext(null, bigArrays, scriptService)); + return aggregations; } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index a8d3895ec78..e9da6e719b9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -56,6 +56,7 @@ import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketParser; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelModule; @@ -103,6 +104,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ aggParsers.add(ChildrenParser.class); reducerParsers.add(DerivativeParser.class); + reducerParsers.add(MaxBucketParser.class); reducerParsers.add(MovAvgParser.class); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 9d627310142..dd915b80f87 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -74,8 +76,11 @@ public class AggregationPhase implements SearchPhase { List collectors = new ArrayList<>(); Aggregator[] aggregators; + List reducers; try { - aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext); + AggregatorFactories factories = context.aggregations().factories(); + aggregators = factories.createTopLevelAggregators(aggregationContext); + reducers = factories.createReducers(); } catch (IOException e) { throw new AggregationInitializationException("Could not initialize aggregators", e); } @@ -136,6 +141,21 @@ public class AggregationPhase implements SearchPhase { } } context.queryResult().aggregations(new InternalAggregations(aggregations)); + try { + List reducers = context.aggregations().factories().createReducers(); + List siblingReducers = new ArrayList<>(reducers.size()); + for (Reducer reducer : reducers) { + if (reducer instanceof SiblingReducer) { + siblingReducers.add((SiblingReducer) reducer); + } else { + throw new AggregationExecutionException("Invalid reducer named [" + reducer.name() + "] of type [" + + reducer.type().name() + "]. Only sibling reducers are allowed at the top level"); + } + } + context.queryResult().reducers(siblingReducers); + } catch (IOException e) { + throw new AggregationExecutionException("Failed to build top level reducers", e); + } // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 1a4c157da8e..1a4dcd4f177 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -56,7 +56,7 @@ public class AggregatorFactories { public List createReducers() throws IOException { List reducers = new ArrayList<>(); for (ReducerFactory factory : this.reducerFactories) { - reducers.add(factory.create(null, null, false)); // NOCOMIT add context, parent etc. + reducers.add(factory.create()); } return reducers; } @@ -213,14 +213,18 @@ public class AggregatorFactories { temporarilyMarked.add(factory); String[] bucketsPaths = factory.getBucketsPaths(); for (String bucketsPath : bucketsPaths) { - ReducerFactory matchingFactory = reducerFactoriesMap.get(bucketsPath); - if (bucketsPath.equals("_count") || bucketsPath.equals("_key") || aggFactoryNames.contains(bucketsPath)) { + int aggSepIndex = bucketsPath.indexOf('>'); + String firstAggName = aggSepIndex == -1 ? bucketsPath : bucketsPath.substring(0, aggSepIndex); + if (bucketsPath.equals("_count") || bucketsPath.equals("_key") || aggFactoryNames.contains(firstAggName)) { continue; - } else if (matchingFactory != null) { - resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, - matchingFactory); } else { - throw new ElasticsearchIllegalStateException("No reducer found for path [" + bucketsPath + "]"); + ReducerFactory matchingFactory = reducerFactoriesMap.get(firstAggName); + if (matchingFactory != null) { + resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, + temporarilyMarked, matchingFactory); + } else { + throw new ElasticsearchIllegalStateException("No aggregation found for path [" + bucketsPath + "]"); + } } } unmarkedFactories.remove(factory); diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index c3d89cf4f8f..d405db6c741 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -58,6 +58,8 @@ import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketReducer; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer; import org.elasticsearch.search.aggregations.reducers.movavg.models.TransportMovAvgModelModule; @@ -106,10 +108,12 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM InternalTopHits.registerStreams(); InternalGeoBounds.registerStream(); InternalChildren.registerStream(); - InternalSimpleValue.registerStreams(); // Reducers DerivativeReducer.registerStreams(); + InternalSimpleValue.registerStreams(); + InternalBucketMetricValue.registerStreams(); + MaxBucketReducer.registerStreams(); MovAvgReducer.registerStreams(); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java index f92a2b70d3b..30d6fc0107e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.InvalidAggregationPathException; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import org.elasticsearch.search.aggregations.support.AggregationPath; @@ -143,10 +143,16 @@ public class BucketHelpers { * @param gapPolicy The gap policy to apply if empty buckets are found * @return The value extracted from bucket found at aggPath */ - public static Double resolveBucketValue(InternalHistogram histo, InternalHistogram.Bucket bucket, - String aggPath, GapPolicy gapPolicy) { + public static Double resolveBucketValue(InternalMultiBucketAggregation agg, + InternalMultiBucketAggregation.Bucket bucket, String aggPath, GapPolicy gapPolicy) { + List aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList(); + return resolveBucketValue(agg, bucket, aggPathsList, gapPolicy); + } + + public static Double resolveBucketValue(InternalMultiBucketAggregation agg, + InternalMultiBucketAggregation.Bucket bucket, List aggPathsList, GapPolicy gapPolicy) { try { - Object propertyValue = bucket.getProperty(histo.getName(), AggregationPath.parse(aggPath).getPathElementsAsStringList()); + Object propertyValue = bucket.getProperty(agg.getName(), aggPathsList); if (propertyValue == null) { throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + " must reference either a number value or a single value numeric metric aggregation"); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index ed602b31751..3c0b4fdbe22 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -19,11 +19,14 @@ package org.elasticsearch.search.aggregations.reducers; +import com.google.common.base.Function; + import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; @@ -66,6 +69,13 @@ public abstract class Reducer implements Streamable { } + public static final Function AGGREGATION_TRANFORM_FUNCTION = new Function() { + @Override + public InternalAggregation apply(Aggregation input) { + return (InternalAggregation) input; + } + }; + private String name; private String[] bucketsPaths; private Map metaData; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java index 0f0f9225635..4dee8ea96a2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import java.io.IOException; import java.util.Map; @@ -28,10 +29,8 @@ import java.util.Map; /** * A base class for all reducer builders. */ -public abstract class ReducerBuilder> implements ToXContent { +public abstract class ReducerBuilder> extends AbstractAggregationBuilder { - private final String name; - protected final String type; private String[] bucketsPaths; private Map metaData; @@ -39,15 +38,7 @@ public abstract class ReducerBuilder> implements ToX * Sole constructor, typically used by sub-classes. */ protected ReducerBuilder(String name, String type) { - this.name = name; - this.type = type; - } - - /** - * Return the name of the reducer that is being built. - */ - public String getName() { - return name; + super(name, type); } /** diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java index 0aa8be4e992..3f45964153b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.reducers; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketBuilder; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgBuilder; @@ -31,6 +32,10 @@ public final class ReducerBuilders { return new DerivativeBuilder(name); } + public static final MaxBucketBuilder maxBucket(String name) { + return new MaxBucketBuilder(name); + } + public static final MovAvgBuilder smooth(String name) { return new MovAvgBuilder(name); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java index ccdd2ac0328..46ac844808c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.reducers; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.List; @@ -62,8 +61,7 @@ public abstract class ReducerFactory { doValidate(parent, factories, reducerFactories); } - protected abstract Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, - Map metaData) throws IOException; + protected abstract Reducer createInternal(Map metaData) throws IOException; /** * Creates the reducer @@ -81,8 +79,8 @@ public abstract class ReducerFactory { * * @return The created aggregator */ - public final Reducer create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - Reducer aggregator = createInternal(context, parent, collectsFromSingleBucket, this.metaData); + public final Reducer create() throws IOException { + Reducer aggregator = createInternal(this.metaData); return aggregator; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java new file mode 100644 index 00000000000..b0be9634ddc --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import com.google.common.collect.Lists; + +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public abstract class SiblingReducer extends Reducer { + + protected SiblingReducer() { // for Serialisation + super(); + } + + protected SiblingReducer(String name, String[] bucketsPaths, Map metaData) { + super(name, bucketsPaths, metaData); + } + + @SuppressWarnings("unchecked") + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + @SuppressWarnings("rawtypes") + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + List newBuckets = new ArrayList<>(); + for (int i = 0; i < buckets.size(); i++) { + InternalMultiBucketAggregation.InternalBucket bucket = (InternalMultiBucketAggregation.InternalBucket) buckets.get(i); + InternalAggregation aggToAdd = doReduce(bucket.getAggregations(), reduceContext); + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(aggToAdd); + InternalMultiBucketAggregation.InternalBucket newBucket = multiBucketsAgg.createBucket(new InternalAggregations(aggs), bucket); + newBuckets.add(newBucket); + } + + return multiBucketsAgg.create(newBuckets); + } + + public abstract InternalAggregation doReduce(Aggregations aggregations, ReduceContext context); +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java new file mode 100644 index 00000000000..69b23ae91ef --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationStreams; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue { + + public final static Type TYPE = new Type("bucket_metric_value"); + + public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { + @Override + public InternalBucketMetricValue readResult(StreamInput in) throws IOException { + InternalBucketMetricValue result = new InternalBucketMetricValue(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + AggregationStreams.registerStream(STREAM, TYPE.stream()); + } + + private double value; + + private String[] keys; + + protected InternalBucketMetricValue() { + super(); + } + + public InternalBucketMetricValue(String name, String[] keys, double value, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { + super(name, reducers, metaData); + this.keys = keys; + this.value = value; + this.valueFormatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public double value() { + return value; + } + + public String[] keys() { + return keys; + } + + @Override + public InternalAggregation doReduce(ReduceContext reduceContext) { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + public Object getProperty(List path) { + if (path.isEmpty()) { + return this; + } else if (path.size() == 1 && "value".equals(path.get(0))) { + return value(); + } else if (path.size() == 1 && "keys".equals(path.get(0))) { + return keys(); + } else { + throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + } + } + + @Override + protected void doReadFrom(StreamInput in) throws IOException { + valueFormatter = ValueFormatterStreams.readOptional(in); + value = in.readDouble(); + keys = in.readStringArray(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(valueFormatter, out); + out.writeDouble(value); + out.writeStringArray(keys); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + boolean hasValue = !Double.isInfinite(value); + builder.field(CommonFields.VALUE, hasValue ? value : null); + if (hasValue && valueFormatter != null) { + builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(value)); + } + builder.startArray("keys"); + for (String key : keys) { + builder.value(key); + } + builder.endArray(); + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java new file mode 100644 index 00000000000..eb04617e548 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; + +import java.io.IOException; + +public class MaxBucketBuilder extends ReducerBuilder { + + private String format; + + public MaxBucketBuilder(String name) { + super(name, MaxBucketReducer.TYPE.name()); + } + + public MaxBucketBuilder format(String format) { + this.format = format; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MaxBucketParser.FORMAT.getPreferredName(), format); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java new file mode 100644 index 00000000000..2a9dab3b6bd --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MaxBucketParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + + @Override + public String type() { + return MaxBucketReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "]."); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "]."); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]"); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new MaxBucketReducer.Factory(reducerName, bucketsPaths, formatter); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java new file mode 100644 index 00000000000..e209684797c --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class MaxBucketReducer extends SiblingReducer { + + public final static Type TYPE = new Type("max_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MaxBucketReducer readResult(StreamInput in) throws IOException { + MaxBucketReducer result = new MaxBucketReducer(); + result.readFrom(in); + return result; + } + }; + + private ValueFormatter formatter; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private MaxBucketReducer() { + } + + protected MaxBucketReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, Map metaData) { + super(name, bucketsPaths, metaData); + this.formatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { + List maxBucketKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); + for (Aggregation aggregation : aggregations) { + if (aggregation.getName().equals(bucketsPath.get(0))) { + bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + for (int i = 0; i < buckets.size(); i++) { + Bucket bucket = buckets.get(i); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, GapPolicy.IGNORE); + if (bucketValue != null) { + if (bucketValue > maxValue) { + maxBucketKeys.clear(); + maxBucketKeys.add(bucket.getKeyAsString()); + maxValue = bucketValue; + } else if (bucketValue.equals(maxValue)) { + maxBucketKeys.add(bucket.getKeyAsString()); + } + } + } + } + } + String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]); + return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.EMPTY_LIST, metaData()); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new MaxBucketReducer(name, bucketsPaths, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new ElasticsearchIllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 40a5b005560..a58d0f0e74e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -19,15 +19,12 @@ package org.elasticsearch.search.aggregations.reducers.derivative; -import com.google.common.base.Function; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; @@ -40,7 +37,6 @@ import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; -import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -68,13 +64,6 @@ public class DerivativeReducer extends Reducer { ReducerStreams.registerStream(STREAM, TYPE.stream()); } - private static final Function FUNCTION = new Function() { - @Override - public InternalAggregation apply(Aggregation input) { - return (InternalAggregation) input; - } - }; - private ValueFormatter formatter; private GapPolicy gapPolicy; @@ -106,7 +95,7 @@ public class DerivativeReducer extends Reducer { if (lastBucketValue != null) { double diff = thisBucketValue - lastBucketValue; - List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); @@ -143,8 +132,7 @@ public class DerivativeReducer extends Reducer { } @Override - protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, - Map metaData) throws IOException { + protected Reducer createInternal(Map metaData) throws IOException { return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java index b339cdf487d..20baa1706f1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java @@ -22,19 +22,26 @@ package org.elasticsearch.search.aggregations.reducers.movavg; import com.google.common.base.Function; import com.google.common.collect.EvictingQueue; import com.google.common.collect.Lists; + import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; -import org.elasticsearch.search.aggregations.reducers.*; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelStreams; -import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -43,7 +50,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; public class MovAvgReducer extends Reducer { @@ -155,8 +161,7 @@ public class MovAvgReducer extends Reducer { } @Override - protected Reducer createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, - Map metaData) throws IOException { + protected Reducer createInternal(Map metaData) throws IOException { return new MovAvgReducer(name, bucketsPaths, formatter, gapPolicy, window, model, metaData); } diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 6adfb53fd41..05ebaf44e05 100644 --- a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatOpenHashMap; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; + import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.client.Requests; @@ -38,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilder; @@ -55,9 +57,10 @@ import java.util.List; import java.util.Map; /** - * A search source builder allowing to easily build search source. Simple construction - * using {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}. - * + * A search source builder allowing to easily build search source. Simple + * construction using + * {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}. + * * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder) */ public class SearchSourceBuilder implements ToXContent { @@ -109,7 +112,6 @@ public class SearchSourceBuilder implements ToXContent { private List aggregations; private BytesReference aggregationsBinary; - private HighlightBuilder highlightBuilder; private SuggestBuilder suggestBuilder; @@ -123,7 +125,6 @@ public class SearchSourceBuilder implements ToXContent { private String[] stats; - /** * Constructs a new search source builder. */ @@ -132,7 +133,7 @@ public class SearchSourceBuilder implements ToXContent { /** * Constructs a new search source builder with a search query. - * + * * @see org.elasticsearch.index.query.QueryBuilders */ public SearchSourceBuilder query(QueryBuilder query) { @@ -190,8 +191,9 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets a filter that will be executed after the query has been executed and only has affect on the search hits - * (not aggregations). This filter is always executed as last filtering mechanism. + * Sets a filter that will be executed after the query has been executed and + * only has affect on the search hits (not aggregations). This filter is + * always executed as last filtering mechanism. */ public SearchSourceBuilder postFilter(FilterBuilder postFilter) { this.postFilterBuilder = postFilter; @@ -276,8 +278,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Should each {@link org.elasticsearch.search.SearchHit} be returned with an - * explanation of the hit (ranking). + * Should each {@link org.elasticsearch.search.SearchHit} be returned with + * an explanation of the hit (ranking). */ public SearchSourceBuilder explain(Boolean explain) { this.explain = explain; @@ -285,8 +287,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Should each {@link org.elasticsearch.search.SearchHit} be returned with a version - * associated with it. + * Should each {@link org.elasticsearch.search.SearchHit} be returned with a + * version associated with it. */ public SearchSourceBuilder version(Boolean version) { this.version = version; @@ -310,21 +312,24 @@ public class SearchSourceBuilder implements ToXContent { } /** - * An optional terminate_after to terminate the search after - * collecting terminateAfter documents + * An optional terminate_after to terminate the search after collecting + * terminateAfter documents */ - public SearchSourceBuilder terminateAfter(int terminateAfter) { + public SearchSourceBuilder terminateAfter(int terminateAfter) { if (terminateAfter <= 0) { throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); } this.terminateAfter = terminateAfter; return this; } + /** * Adds a sort against the given field name and the sort ordering. - * - * @param name The name of the field - * @param order The sort ordering + * + * @param name + * The name of the field + * @param order + * The sort ordering */ public SearchSourceBuilder sort(String name, SortOrder order) { return sort(SortBuilders.fieldSort(name).order(order)); @@ -332,8 +337,9 @@ public class SearchSourceBuilder implements ToXContent { /** * Add a sort against the given field name. - * - * @param name The name of the field to sort by + * + * @param name + * The name of the field to sort by */ public SearchSourceBuilder sort(String name) { return sort(SortBuilders.fieldSort(name)); @@ -351,8 +357,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Applies when sorting, and controls if scores will be tracked as well. Defaults to - * false. + * Applies when sorting, and controls if scores will be tracked as well. + * Defaults to false. */ public SearchSourceBuilder trackScores(boolean trackScores) { this.trackScores = trackScores; @@ -401,6 +407,7 @@ public class SearchSourceBuilder implements ToXContent { /** * Set the rescore window size for rescores that don't specify their window. + * * @param defaultRescoreWindowSize * @return */ @@ -465,8 +472,9 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Indicates whether the response should contain the stored _source for every hit - * + * Indicates whether the response should contain the stored _source for + * every hit + * * @param fetch * @return */ @@ -480,22 +488,33 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard + * Indicate that _source should be returned with every hit, with an + * "include" and/or "exclude" set which can include simple wildcard * elements. - * - * @param include An optional include (optionally wildcarded) pattern to filter the returned _source - * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source + * + * @param include + * An optional include (optionally wildcarded) pattern to filter + * the returned _source + * @param exclude + * An optional exclude (optionally wildcarded) pattern to filter + * the returned _source */ public SearchSourceBuilder fetchSource(@Nullable String include, @Nullable String exclude) { - return fetchSource(include == null ? Strings.EMPTY_ARRAY : new String[]{include}, exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + return fetchSource(include == null ? Strings.EMPTY_ARRAY : new String[] { include }, exclude == null ? Strings.EMPTY_ARRAY + : new String[] { exclude }); } /** - * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard + * Indicate that _source should be returned with every hit, with an + * "include" and/or "exclude" set which can include simple wildcard * elements. - * - * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source - * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source + * + * @param includes + * An optional list of include (optionally wildcarded) pattern to + * filter the returned _source + * @param excludes + * An optional list of exclude (optionally wildcarded) pattern to + * filter the returned _source */ public SearchSourceBuilder fetchSource(@Nullable String[] includes, @Nullable String[] excludes) { fetchSourceContext = new FetchSourceContext(includes, excludes); @@ -511,7 +530,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets no fields to be loaded, resulting in only id and type to be returned per field. + * Sets no fields to be loaded, resulting in only id and type to be returned + * per field. */ public SearchSourceBuilder noFields() { this.fieldNames = ImmutableList.of(); @@ -519,8 +539,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets the fields to load and return as part of the search request. If none are specified, - * the source of the document will be returned. + * Sets the fields to load and return as part of the search request. If none + * are specified, the source of the document will be returned. */ public SearchSourceBuilder fields(List fields) { this.fieldNames = fields; @@ -528,8 +548,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds the fields to load and return as part of the search request. If none are specified, - * the source of the document will be returned. + * Adds the fields to load and return as part of the search request. If none + * are specified, the source of the document will be returned. */ public SearchSourceBuilder fields(String... fields) { if (fieldNames == null) { @@ -542,8 +562,9 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds a field to load and return (note, it must be stored) as part of the search request. - * If none are specified, the source of the document will be return. + * Adds a field to load and return (note, it must be stored) as part of the + * search request. If none are specified, the source of the document will be + * return. */ public SearchSourceBuilder field(String name) { if (fieldNames == null) { @@ -554,7 +575,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds a field to load from the field data cache and return as part of the search request. + * Adds a field to load from the field data cache and return as part of the + * search request. */ public SearchSourceBuilder fieldDataField(String name) { if (fieldDataFields == null) { @@ -566,9 +588,11 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field under the given name with the provided script. - * - * @param name The name of the field - * @param script The script + * + * @param name + * The name of the field + * @param script + * The script */ public SearchSourceBuilder scriptField(String name, String script) { return scriptField(name, null, script, null); @@ -576,10 +600,13 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field. - * - * @param name The name of the field - * @param script The script to execute - * @param params The script parameters + * + * @param name + * The name of the field + * @param script + * The script to execute + * @param params + * The script parameters */ public SearchSourceBuilder scriptField(String name, String script, Map params) { return scriptField(name, null, script, params); @@ -587,11 +614,15 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field. - * - * @param name The name of the field - * @param lang The language of the script - * @param script The script to execute - * @param params The script parameters (can be null) + * + * @param name + * The name of the field + * @param lang + * The language of the script + * @param script + * The script to execute + * @param params + * The script parameters (can be null) */ public SearchSourceBuilder scriptField(String name, String lang, String script, Map params) { if (scriptFields == null) { @@ -602,10 +633,13 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets the boost a specific index will receive when the query is executeed against it. - * - * @param index The index to apply the boost against - * @param indexBoost The boost to apply to the index + * Sets the boost a specific index will receive when the query is executeed + * against it. + * + * @param index + * The index to apply the boost against + * @param indexBoost + * The boost to apply to the index */ public SearchSourceBuilder indexBoost(String index, float indexBoost) { if (this.indexBoost == null) { @@ -648,7 +682,6 @@ public class SearchSourceBuilder implements ToXContent { } } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -657,7 +690,7 @@ public class SearchSourceBuilder implements ToXContent { return builder; } - public void innerToXContent(XContentBuilder builder, Params params) throws IOException{ + public void innerToXContent(XContentBuilder builder, Params params) throws IOException { if (from != -1) { builder.field("from", from); } @@ -899,8 +932,8 @@ public class SearchSourceBuilder implements ToXContent { private PartialField(String name, String include, String exclude) { this.name = name; - this.includes = include == null ? null : new String[]{include}; - this.excludes = exclude == null ? null : new String[]{exclude}; + this.includes = include == null ? null : new String[] { include }; + this.excludes = exclude == null ? null : new String[] { exclude }; } public String name() { diff --git a/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index 91d8948878b..cdfacbaa062 100644 --- a/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -21,9 +21,17 @@ package org.elasticsearch.search.controller; import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; +import com.google.common.collect.Lists; import org.apache.lucene.index.Term; -import org.apache.lucene.search.*; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TopFieldDocs; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; @@ -33,8 +41,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -47,7 +58,12 @@ import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * @@ -391,6 +407,19 @@ public class SearchPhaseController extends AbstractComponent { } } + if (aggregations != null) { + List reducers = firstResult.reducers(); + if (reducers != null) { + List newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION)); + for (SiblingReducer reducer : reducers) { + InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(null, bigArrays, + scriptService)); + newAggs.add(newAgg); + } + aggregations = new InternalAggregations(newAggs); + } + } + InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); diff --git a/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 50167676cc7..e45006b2c32 100644 --- a/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,15 +20,20 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.elasticsearch.common.lucene.Lucene.readTopDocs; import static org.elasticsearch.common.lucene.Lucene.writeTopDocs; @@ -44,6 +49,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private int size; private TopDocs topDocs; private InternalAggregations aggregations; + private List reducers; private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; @@ -114,6 +120,14 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.aggregations = aggregations; } + public List reducers() { + return reducers; + } + + public void reducers(List reducers) { + this.reducers = reducers; + } + public Suggest suggest() { return suggest; } @@ -162,6 +176,16 @@ public class QuerySearchResult extends QuerySearchResultProvider { if (in.readBoolean()) { aggregations = InternalAggregations.readAggregations(in); } + if (in.readBoolean()) { + int size = in.readVInt(); + List reducers = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add((SiblingReducer) reducer); + } + this.reducers = reducers; + } if (in.readBoolean()) { suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); } @@ -187,6 +211,16 @@ public class QuerySearchResult extends QuerySearchResultProvider { out.writeBoolean(true); aggregations.writeTo(out); } + if (reducers == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } + } if (suggest == null) { out.writeBoolean(false); } else { diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java index 263af854883..9f04e4a37b0 100644 --- a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java +++ b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java @@ -23,8 +23,11 @@ import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilders; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -40,6 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatc import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; /** * @@ -111,6 +115,81 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati } } + @Test + // Just test the integration with facets and aggregations, not the facet and aggregation functionality! + public void testAggregationsAndReducers() throws Exception { + assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string", "field2", "type=string")); + ensureGreen(); + + int numQueries = scaledRandomIntBetween(250, 500); + int numUniqueQueries = between(1, numQueries / 2); + String[] values = new String[numUniqueQueries]; + for (int i = 0; i < values.length; i++) { + values[i] = "value" + i; + } + int[] expectedCount = new int[numUniqueQueries]; + + logger.info("--> registering {} queries", numQueries); + for (int i = 0; i < numQueries; i++) { + String value = values[i % numUniqueQueries]; + expectedCount[i % numUniqueQueries]++; + QueryBuilder queryBuilder = matchQuery("field1", value); + client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()) + .execute().actionGet(); + } + client().admin().indices().prepareRefresh("test").execute().actionGet(); + + for (int i = 0; i < numQueries; i++) { + String value = values[i % numUniqueQueries]; + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() + .setIndices("test").setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + + SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2") + .collectMode(aggCollectionMode )); + + if (randomBoolean()) { + percolateRequestBuilder.setPercolateQuery(matchAllQuery()); + } + if (randomBoolean()) { + percolateRequestBuilder.setScore(true); + } else { + percolateRequestBuilder.setSortByScore(true).setSize(numQueries); + } + + boolean countOnly = randomBoolean(); + if (countOnly) { + percolateRequestBuilder.setOnlyCount(countOnly); + } + + percolateRequestBuilder.addAggregation(ReducerBuilders.maxBucket("max_a").setBucketsPaths("a>_count")); + + PercolateResponse response = percolateRequestBuilder.execute().actionGet(); + assertMatchCount(response, expectedCount[i % numUniqueQueries]); + if (!countOnly) { + assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries])); + } + + Aggregations aggregations = response.getAggregations(); + assertThat(aggregations.asList().size(), equalTo(2)); + Terms terms = aggregations.get("a"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("a")); + List buckets = new ArrayList<>(terms.getBuckets()); + assertThat(buckets.size(), equalTo(1)); + assertThat(buckets.get(0).getKeyAsString(), equalTo("b")); + assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length])); + + InternalBucketMetricValue maxA = aggregations.get("max_a"); + assertThat(maxA, notNullValue()); + assertThat(maxA.getName(), equalTo("max_a")); + assertThat(maxA.value(), equalTo((double) expectedCount[i % values.length])); + assertThat(maxA.keys(), equalTo(new String[] {"b"})); + } + } + @Test public void testSignificantAggs() throws Exception { client().admin().indices().prepareCreate("test").execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java new file mode 100644 index 00000000000..f1932118601 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.maxBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MaxBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + i).endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void singleValuedField() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + if (bucket.getDocCount() > maxValue) { + maxValue = bucket.getDocCount(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } +} From 48a94a41df7e9da359fef38901147fc73b25ed14 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 13 Apr 2015 11:44:29 +0100 Subject: [PATCH 040/236] Added normalisation to Derivative Reducer This changes adds the ability to specify the units for the x-axis for derivative values and calculate the derivative based on those units rather than the original histograms x-axis units --- .../derivative/DerivativeBuilder.java | 18 +++++++- .../reducers/derivative/DerivativeParser.java | 39 ++++++++++++++-- .../derivative/DerivativeReducer.java | 41 ++++++++++++++--- .../reducers/DateDerivativeTests.java | 45 +++++++++++++++++++ .../reducers/DerivativeTests.java | 42 +++++++++++++++++ 5 files changed, 173 insertions(+), 12 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java index 210d56d4a6f..6504a26d72c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -20,16 +20,17 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import java.io.IOException; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; - public class DerivativeBuilder extends ReducerBuilder { private String format; private GapPolicy gapPolicy; + private String units; public DerivativeBuilder(String name) { super(name, DerivativeReducer.TYPE.name()); @@ -45,6 +46,16 @@ public class DerivativeBuilder extends ReducerBuilder { return this; } + public DerivativeBuilder units(String units) { + this.units = units; + return this; + } + + public DerivativeBuilder units(DateHistogramInterval units) { + this.units = units.toString(); + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { if (format != null) { @@ -53,6 +64,9 @@ public class DerivativeBuilder extends ReducerBuilder { if (gapPolicy != null) { builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); } + if (units != null) { + builder.field(DerivativeParser.UNITS.getPreferredName(), units); + } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index c4d3aa2a229..fab2bd3c0b6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -19,9 +19,15 @@ package org.elasticsearch.search.aggregations.reducers.derivative; +import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -32,12 +38,23 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; - public class DerivativeParser implements Reducer.Parser { public static final ParseField FORMAT = new ParseField("format"); public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + public static final ParseField UNITS = new ParseField("units"); + + private final ImmutableMap dateFieldUnits; + + public DerivativeParser() { + dateFieldUnits = MapBuilder. newMapBuilder().put("year", DateTimeUnit.YEAR_OF_CENTURY) + .put("1y", DateTimeUnit.YEAR_OF_CENTURY).put("quarter", DateTimeUnit.QUARTER).put("1q", DateTimeUnit.QUARTER) + .put("month", DateTimeUnit.MONTH_OF_YEAR).put("1M", DateTimeUnit.MONTH_OF_YEAR).put("week", DateTimeUnit.WEEK_OF_WEEKYEAR) + .put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR).put("day", DateTimeUnit.DAY_OF_MONTH).put("1d", DateTimeUnit.DAY_OF_MONTH) + .put("hour", DateTimeUnit.HOUR_OF_DAY).put("1h", DateTimeUnit.HOUR_OF_DAY).put("minute", DateTimeUnit.MINUTES_OF_HOUR) + .put("1m", DateTimeUnit.MINUTES_OF_HOUR).put("second", DateTimeUnit.SECOND_OF_MINUTE) + .put("1s", DateTimeUnit.SECOND_OF_MINUTE).immutableMap(); + } @Override public String type() { @@ -50,6 +67,7 @@ public class DerivativeParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; + String units = null; GapPolicy gapPolicy = GapPolicy.IGNORE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -62,6 +80,8 @@ public class DerivativeParser implements Reducer.Parser { bucketsPaths = new String[] { parser.text() }; } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text()); + } else if (UNITS.match(currentFieldName)) { + units = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -93,7 +113,20 @@ public class DerivativeParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy); + long xAxisUnits = -1; + if (units != null) { + DateTimeUnit dateTimeUnit = dateFieldUnits.get(units); + if (dateTimeUnit != null) { + xAxisUnits = dateTimeUnit.field().getDurationField().getUnitMillis(); + } else { + TimeValue timeValue = TimeValue.parseTimeValue(units, null); + if (timeValue != null) { + xAxisUnits = timeValue.getMillis(); + } + } + } + + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, xAxisUnits); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index a58d0f0e74e..7f02e66b73e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -25,6 +25,7 @@ import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; @@ -39,6 +40,7 @@ import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; +import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -66,15 +68,17 @@ public class DerivativeReducer extends Reducer { private ValueFormatter formatter; private GapPolicy gapPolicy; + private long xAxisUnits; public DerivativeReducer() { } - public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, long xAxisUnits, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; + this.xAxisUnits = xAxisUnits; } @Override @@ -89,51 +93,74 @@ public class DerivativeReducer extends Reducer { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); + Long lastBucketKey = null; Double lastBucketValue = null; for (InternalHistogram.Bucket bucket : buckets) { + Long thisBucketKey = resolveBucketKeyAsLong(bucket); Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); if (lastBucketValue != null) { - double diff = thisBucketValue - lastBucketValue; - - List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); - aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); + double gradient = thisBucketValue - lastBucketValue; + if (xAxisUnits != -1) { + double xDiff = (thisBucketKey - lastBucketKey) / xAxisUnits; + gradient = gradient / xDiff; + } + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), + AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(new InternalSimpleValue(name(), gradient, formatter, new ArrayList(), metaData())); InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); } else { newBuckets.add(bucket); } + lastBucketKey = thisBucketKey; lastBucketValue = thisBucketValue; } return factory.create(newBuckets, histo); } + private Long resolveBucketKeyAsLong(InternalHistogram.Bucket bucket) { + Object key = bucket.getKey(); + if (key instanceof DateTime) { + return ((DateTime) key).getMillis(); + } else if (key instanceof Number) { + return ((Number) key).longValue(); + } else { + throw new AggregationExecutionException("Bucket keys must be either a Number or a DateTime for aggregation " + name() + + ". Found bucket with key " + key); + } + } + @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); gapPolicy = GapPolicy.readFrom(in); + xAxisUnits = in.readLong(); } @Override public void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(formatter, out); gapPolicy.writeTo(out); + out.writeLong(xAxisUnits); } public static class Factory extends ReducerFactory { private final ValueFormatter formatter; private GapPolicy gapPolicy; + private long xAxisUnits; - public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy) { + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, long xAxisUnits) { super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; this.gapPolicy = gapPolicy; + this.xAxisUnits = xAxisUnits; } @Override protected Reducer createInternal(Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); + return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, xAxisUnits, metaData); } @Override diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index ede94abd973..eefbe411940 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -45,6 +45,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHist import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -147,6 +148,50 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(docCountDeriv.value(), equalTo(1d)); } + @Test + public void singleValuedField_normalised() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count").units(DateHistogramInterval.DAY))).execute() + .actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, nullValue()); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo(1d / 31d, 0.00001)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo(1d / 29d, 0.00001)); + } + @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 6f5641fcffa..2e4c50fb8aa 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -43,6 +43,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @@ -196,6 +197,47 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } + /** + * test first and second derivative on the sing + */ + @Test + public void singleValuedField_normalised() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count").units("1")) + .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001)); + } else { + assertThat(docCountDeriv, nullValue()); + } + SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv"); + if (i > 1) { + assertThat(docCount2ndDeriv, notNullValue()); + assertThat(docCount2ndDeriv.value(), closeTo((double) (secondDerivValueCounts[i - 2]) / 5, 0.00001)); + } else { + assertThat(docCount2ndDeriv, nullValue()); + } + } + } + @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() From 306d94adb97c4be2e18d0cd4266d97cd9dba1a55 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 13 Apr 2015 14:24:23 +0100 Subject: [PATCH 041/236] Revert "Added normalisation to Derivative Reducer" This reverts commit 48a94a41df7e9da359fef38901147fc73b25ed14. --- .../derivative/DerivativeBuilder.java | 18 +------- .../reducers/derivative/DerivativeParser.java | 39 ++-------------- .../derivative/DerivativeReducer.java | 41 +++-------------- .../reducers/DateDerivativeTests.java | 45 ------------------- .../reducers/DerivativeTests.java | 42 ----------------- 5 files changed, 12 insertions(+), 173 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java index 6504a26d72c..210d56d4a6f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -20,17 +20,16 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import java.io.IOException; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + public class DerivativeBuilder extends ReducerBuilder { private String format; private GapPolicy gapPolicy; - private String units; public DerivativeBuilder(String name) { super(name, DerivativeReducer.TYPE.name()); @@ -46,16 +45,6 @@ public class DerivativeBuilder extends ReducerBuilder { return this; } - public DerivativeBuilder units(String units) { - this.units = units; - return this; - } - - public DerivativeBuilder units(DateHistogramInterval units) { - this.units = units.toString(); - return this; - } - @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { if (format != null) { @@ -64,9 +53,6 @@ public class DerivativeBuilder extends ReducerBuilder { if (gapPolicy != null) { builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); } - if (units != null) { - builder.field(DerivativeParser.UNITS.getPreferredName(), units); - } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index fab2bd3c0b6..c4d3aa2a229 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -19,15 +19,9 @@ package org.elasticsearch.search.aggregations.reducers.derivative; -import com.google.common.collect.ImmutableMap; - import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -38,23 +32,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + public class DerivativeParser implements Reducer.Parser { public static final ParseField FORMAT = new ParseField("format"); public static final ParseField GAP_POLICY = new ParseField("gap_policy"); - public static final ParseField UNITS = new ParseField("units"); - - private final ImmutableMap dateFieldUnits; - - public DerivativeParser() { - dateFieldUnits = MapBuilder. newMapBuilder().put("year", DateTimeUnit.YEAR_OF_CENTURY) - .put("1y", DateTimeUnit.YEAR_OF_CENTURY).put("quarter", DateTimeUnit.QUARTER).put("1q", DateTimeUnit.QUARTER) - .put("month", DateTimeUnit.MONTH_OF_YEAR).put("1M", DateTimeUnit.MONTH_OF_YEAR).put("week", DateTimeUnit.WEEK_OF_WEEKYEAR) - .put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR).put("day", DateTimeUnit.DAY_OF_MONTH).put("1d", DateTimeUnit.DAY_OF_MONTH) - .put("hour", DateTimeUnit.HOUR_OF_DAY).put("1h", DateTimeUnit.HOUR_OF_DAY).put("minute", DateTimeUnit.MINUTES_OF_HOUR) - .put("1m", DateTimeUnit.MINUTES_OF_HOUR).put("second", DateTimeUnit.SECOND_OF_MINUTE) - .put("1s", DateTimeUnit.SECOND_OF_MINUTE).immutableMap(); - } @Override public String type() { @@ -67,7 +50,6 @@ public class DerivativeParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; - String units = null; GapPolicy gapPolicy = GapPolicy.IGNORE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -80,8 +62,6 @@ public class DerivativeParser implements Reducer.Parser { bucketsPaths = new String[] { parser.text() }; } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text()); - } else if (UNITS.match(currentFieldName)) { - units = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -113,20 +93,7 @@ public class DerivativeParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - long xAxisUnits = -1; - if (units != null) { - DateTimeUnit dateTimeUnit = dateFieldUnits.get(units); - if (dateTimeUnit != null) { - xAxisUnits = dateTimeUnit.field().getDurationField().getUnitMillis(); - } else { - TimeValue timeValue = TimeValue.parseTimeValue(units, null); - if (timeValue != null) { - xAxisUnits = timeValue.getMillis(); - } - } - } - - return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, xAxisUnits); + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 7f02e66b73e..a58d0f0e74e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -25,7 +25,6 @@ import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; @@ -40,7 +39,6 @@ import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; -import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -68,17 +66,15 @@ public class DerivativeReducer extends Reducer { private ValueFormatter formatter; private GapPolicy gapPolicy; - private long xAxisUnits; public DerivativeReducer() { } - public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, long xAxisUnits, + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; - this.xAxisUnits = xAxisUnits; } @Override @@ -93,74 +89,51 @@ public class DerivativeReducer extends Reducer { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); - Long lastBucketKey = null; Double lastBucketValue = null; for (InternalHistogram.Bucket bucket : buckets) { - Long thisBucketKey = resolveBucketKeyAsLong(bucket); Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); if (lastBucketValue != null) { - double gradient = thisBucketValue - lastBucketValue; - if (xAxisUnits != -1) { - double xDiff = (thisBucketKey - lastBucketKey) / xAxisUnits; - gradient = gradient / xDiff; - } - List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), - AGGREGATION_TRANFORM_FUNCTION)); - aggs.add(new InternalSimpleValue(name(), gradient, formatter, new ArrayList(), metaData())); + double diff = thisBucketValue - lastBucketValue; + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); } else { newBuckets.add(bucket); } - lastBucketKey = thisBucketKey; lastBucketValue = thisBucketValue; } return factory.create(newBuckets, histo); } - private Long resolveBucketKeyAsLong(InternalHistogram.Bucket bucket) { - Object key = bucket.getKey(); - if (key instanceof DateTime) { - return ((DateTime) key).getMillis(); - } else if (key instanceof Number) { - return ((Number) key).longValue(); - } else { - throw new AggregationExecutionException("Bucket keys must be either a Number or a DateTime for aggregation " + name() - + ". Found bucket with key " + key); - } - } - @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); gapPolicy = GapPolicy.readFrom(in); - xAxisUnits = in.readLong(); } @Override public void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(formatter, out); gapPolicy.writeTo(out); - out.writeLong(xAxisUnits); } public static class Factory extends ReducerFactory { private final ValueFormatter formatter; private GapPolicy gapPolicy; - private long xAxisUnits; - public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, long xAxisUnits) { + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy) { super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; this.gapPolicy = gapPolicy; - this.xAxisUnits = xAxisUnits; } @Override protected Reducer createInternal(Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, xAxisUnits, metaData); + return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); } @Override diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index eefbe411940..ede94abd973 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -45,7 +45,6 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHist import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -148,50 +147,6 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(docCountDeriv.value(), equalTo(1d)); } - @Test - public void singleValuedField_normalised() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv").setBucketsPaths("_count").units(DateHistogramInterval.DAY))).execute() - .actionGet(); - - assertSearchResponse(response); - - InternalHistogram deriv = response.getAggregations().get("histo"); - assertThat(deriv, notNullValue()); - assertThat(deriv.getName(), equalTo("histo")); - List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(3)); - - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); - Histogram.Bucket bucket = buckets.get(0); - assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(1l)); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, nullValue()); - - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); - bucket = buckets.get(1); - assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(2l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), closeTo(1d / 31d, 0.00001)); - - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); - bucket = buckets.get(2); - assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); - assertThat(bucket.getDocCount(), equalTo(3l)); - docCountDeriv = bucket.getAggregations().get("deriv"); - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), closeTo(1d / 29d, 0.00001)); - } - @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 2e4c50fb8aa..6f5641fcffa 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -43,7 +43,6 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @@ -197,47 +196,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } - /** - * test first and second derivative on the sing - */ - @Test - public void singleValuedField_normalised() { - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .subAggregation(derivative("deriv").setBucketsPaths("_count").units("1")) - .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram deriv = response.getAggregations().get("histo"); - assertThat(deriv, notNullValue()); - assertThat(deriv.getName(), equalTo("histo")); - List buckets = deriv.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); - if (i > 0) { - assertThat(docCountDeriv, notNullValue()); - assertThat(docCountDeriv.value(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001)); - } else { - assertThat(docCountDeriv, nullValue()); - } - SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv"); - if (i > 1) { - assertThat(docCount2ndDeriv, notNullValue()); - assertThat(docCount2ndDeriv.value(), closeTo((double) (secondDerivValueCounts[i - 2]) / 5, 0.00001)); - } else { - assertThat(docCount2ndDeriv, nullValue()); - } - } - } - @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() From 392f9ce1f88ea0a609c05ea9d1bbd3738a25cd23 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 13 Apr 2015 14:34:53 +0100 Subject: [PATCH 042/236] clean up --- .../index/query/CommonTermsQueryBuilder.java | 5 ++++- .../aggregations/AggregationBuilder.java | 22 +------------------ .../search/aggregations/Aggregator.java | 2 +- .../aggregations/AggregatorParsers.java | 1 - .../aggregations/reducers/BucketHelpers.java | 4 ++-- 5 files changed, 8 insertions(+), 26 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 2dedbb44f8a..fef75c4e7fb 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.query; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -27,7 +30,7 @@ import java.io.IOException; /** * CommonTermsQuery query is a query that executes high-frequency terms in a * optional sub-query to prevent slow queries due to "common" terms like - * stopwords. This query basically builds 2 queries off the {@link #addAggregator(Term) + * stopwords. This query basically builds 2 queries off the {@link #add(Term) * added} terms where low-frequency terms are added to a required boolean clause * and high-frequency terms are added to an optional boolean clause. The * optional clause is only executed if the required "low-frequency' clause diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index cc3033e883f..d41daa7363f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import java.io.IOException; import java.util.List; @@ -39,7 +38,6 @@ import java.util.Map; public abstract class AggregationBuilder> extends AbstractAggregationBuilder { private List aggregations; - private List> reducers; private BytesReference aggregationsBinary; private Map metaData; @@ -62,18 +60,6 @@ public abstract class AggregationBuilder> extend return (B) this; } - /** - * Add a sub get to this bucket get. - */ - @SuppressWarnings("unchecked") - public B subAggregation(ReducerBuilder reducer) { - if (reducers == null) { - reducers = Lists.newArrayList(); - } - reducers.add(reducer); - return (B) this; - } - /** * Sets a raw (xcontent / json) sub addAggregation. */ @@ -135,7 +121,7 @@ public abstract class AggregationBuilder> extend builder.field(type); internalXContent(builder, params); - if (aggregations != null || aggregationsBinary != null || reducers != null) { + if (aggregations != null || aggregationsBinary != null) { builder.startObject("aggregations"); if (aggregations != null) { @@ -144,12 +130,6 @@ public abstract class AggregationBuilder> extend } } - if (reducers != null) { - for (ReducerBuilder subAgg : reducers) { - subAgg.toXContent(builder, params); - } - } - if (aggregationsBinary != null) { if (XContentFactory.xContentType(aggregationsBinary) == builder.contentType()) { builder.rawField("aggregations", aggregationsBinary); diff --git a/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index bce1f9bc196..fd9519499a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -105,7 +105,7 @@ public abstract class Aggregator extends BucketCollector implements Releasable { * Build an empty aggregation. */ public abstract InternalAggregation buildEmptyAggregation(); - + /** Aggregation mode for sub aggregations. */ public enum SubAggCollectionMode { diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index 62caa385585..1e1950a15c7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -197,7 +197,6 @@ public class AggregatorParsers { if (subFactories != null) { throw new SearchParseException(context, "Aggregation [" + aggregationName + "] cannot define sub-aggregations"); } - // TODO: should we validate here like aggs? factories.addReducer(reducerFactory); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java index 30d6fc0107e..b6955a086ab 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -150,9 +150,9 @@ public class BucketHelpers { } public static Double resolveBucketValue(InternalMultiBucketAggregation agg, - InternalMultiBucketAggregation.Bucket bucket, List aggPathsList, GapPolicy gapPolicy) { + InternalMultiBucketAggregation.Bucket bucket, List aggPathAsList, GapPolicy gapPolicy) { try { - Object propertyValue = bucket.getProperty(agg.getName(), aggPathsList); + Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList); if (propertyValue == null) { throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + " must reference either a number value or a single value numeric metric aggregation"); From 7fdf32fb0dcb8a0b19a75f82ec3db2edd80fb2ff Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 13 Apr 2015 15:13:02 +0100 Subject: [PATCH 043/236] changed `bucketsPaths` to `buckets_paths` --- .../org/elasticsearch/search/aggregations/reducers/Reducer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index 3c0b4fdbe22..5ec45064c7f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -45,7 +45,7 @@ public abstract class Reducer implements Streamable { */ public static interface Parser { - public static final ParseField BUCKETS_PATH = new ParseField("bucketsPath"); + public static final ParseField BUCKETS_PATH = new ParseField("buckets_path"); /** * @return The reducer type this parser is associated with. From ea1470a0807d47f49577403fc5cbf3370f7c067b Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 20 Apr 2015 13:58:08 +0100 Subject: [PATCH 044/236] More tests for max bucket reducer --- .../aggregations/reducers/MaxBucketTests.java | 253 +++++++++++++++++- 1 file changed, 251 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java index f1932118601..48d93766bfc 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java @@ -23,6 +23,9 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -32,10 +35,13 @@ import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.core.IsNull.notNullValue; @ElasticsearchIntegrationTest.SuiteScopeTest @@ -69,7 +75,8 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); builders.add(client().prepareIndex("idx", "type").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + i).endObject())); + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } @@ -84,7 +91,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { } @Test - public void singleValuedField() throws Exception { + public void testDocCount_topLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) @@ -120,4 +127,246 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { assertThat(maxBucketValue.value(), equalTo(maxValue)); assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() > maxValue) { + maxValue = bucket.getDocCount(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(maxBucketValue.keys(), equalTo(new String[0])); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(maxBucket("max_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(maxBucket("max_terms_bucket").setBucketsPaths("terms>max_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + List maxTermsKeys = new ArrayList<>(); + double maxTermsValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxHistoKeys = new ArrayList<>(); + double maxHistoValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() > maxHistoValue) { + maxHistoValue = bucket.getDocCount(); + maxHistoKeys = new ArrayList<>(); + maxHistoKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxHistoValue) { + maxHistoKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_histo_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_histo_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxHistoValue)); + assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[maxHistoKeys.size()]))); + if (maxHistoValue > maxTermsValue) { + maxTermsValue = maxHistoValue; + maxTermsKeys = new ArrayList<>(); + maxTermsKeys.add(termsBucket.getKeyAsString()); + } else if (maxHistoValue == maxTermsValue) { + maxTermsKeys.add(termsBucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_terms_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_terms_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxTermsValue)); + assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[maxTermsKeys.size()]))); + } } From 0f4b7f3b5c1611e74e3de00411ec957004d4c5db Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 15 Apr 2015 14:23:29 +0100 Subject: [PATCH 045/236] Added section for reducer aggregations in the main aggregation docs page --- docs/reference/search/aggregations.asciidoc | 32 +++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc index e7803a27e9c..98e3ba4ccea 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/search/aggregations.asciidoc @@ -116,6 +116,38 @@ aggregated for the buckets created by their "parent" bucket aggregation. There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process. +[float] +=== Reducer Aggregations + +coming[2.0.0] + +experimental[] + +Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding +information to the output tree. There are many different types of reducer, each computing different information from +other aggregations, but these type can broken down into two families: + +_Parent_:: + A family of reducer aggregations that is provided with the output of its parent aggregation and is able + to compute new buckets or new aggregations to add to existing buckets. + +_Sibling_:: + Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a + new aggregation which will be at the same level as the sibling aggregation. + +Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` +parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the +<> section. + +?????? SHOULD THE SECTION ABOUT DEFINING AGGREGATION PATHS +BE IN THIS PAGE AND REFERENCED FROM THE TERMS AGGREGATION DOCUMENTATION ??????? + +Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path` +allowing reducers to be chained. + +NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be +included in the final output. + [float] === Caching heavy aggregations From be647a89d3a9edb58f4e84f7256f8201d33efd8a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 16 Apr 2015 14:07:40 +0100 Subject: [PATCH 046/236] Documentation for the derivative reducer --- docs/reference/search/aggregations.asciidoc | 3 + .../search/aggregations/reducer.asciidoc | 3 + .../reducer/derivative-aggregation.asciidoc | 192 ++++++++++++++++++ .../reducer/max-bucket-aggregation.asciidoc | 192 ++++++++++++++++++ 4 files changed, 390 insertions(+) create mode 100644 docs/reference/search/aggregations/reducer.asciidoc create mode 100644 docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc create mode 100644 docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc index 98e3ba4ccea..74784c110a9 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/search/aggregations.asciidoc @@ -227,3 +227,6 @@ Then that piece of metadata will be returned in place for our `titles` terms agg include::aggregations/metrics.asciidoc[] include::aggregations/bucket.asciidoc[] + +include::aggregations/reducer.asciidoc[] + diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc new file mode 100644 index 00000000000..5b3bff11c18 --- /dev/null +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -0,0 +1,3 @@ +[[search-aggregations-reducer]] + +include::reducer/derivative.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc new file mode 100644 index 00000000000..f1fa8b44043 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc @@ -0,0 +1,192 @@ +[[search-aggregations-reducer-derivative-aggregation]] +=== Derivative Aggregation + +A parent reducer aggregation which calculates the derivative of a specified metric in a parent histogram (or date_histogram) +aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0`. + +The following snippet calculates the derivative of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 <2> + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + } + } + ] + } + } +} +-------------------------------------------------- + +<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative +<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units +would be $/month assuming the `price` field has units of $. + +==== Second Order Derivative + +A second order derivative can be calculated by chaining the derivative reducer aggregation onto the result of another derivative +reducer aggregation as in the following example which will calculate both the first and the second order derivative of the total +monthly sales: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" + } + }, + "sales_2nd_deriv": { + "derivative": { + "buckets_paths": "sales_deriv" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` for the second derivative points to the name of the first derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 + } <1> + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + }, + "sales_2nd_deriv": { + "value": 805 + } + } + ] + } + } +} +-------------------------------------------------- +<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the +second derivative + +==== Dealing with gaps in the data + +There are a couple of reasons why the data output by the enclosing histogram may have gaps: + +* There are no documents matching the query for some buckets +* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval +on the enclosing histogram or with a query matching only a small number of documents) + +Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both +the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior +should be when a gap in the data is found. There are currently two options for controlling the gap policy: + +_ignore_:: + This option will not produce a derivative value for any buckets where the value in the current or previous bucket is + missing + +_insert_zeros_:: + This option will assume the missing value is `0` and calculate the derivative with the value `0`. + + diff --git a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..659f3ff1930 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -0,0 +1,192 @@ +[[search-aggregations-reducer-max-bucket-aggregation]] +=== Max Bucket Aggregation + +A parent reducer aggregation which calculates the derivative of a specified metric in a parent histogram (or date_histogram) +aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0`. + +The following snippet calculates the derivative of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 <2> + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + } + } + ] + } + } +} +-------------------------------------------------- + +<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative +<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units +would be $/month assuming the `price` field has units of $. + +==== Second Order Derivative + +A second order derivative can be calculated by chaining the derivative reducer aggregation onto the result of another derivative +reducer aggregation as in the following example which will calculate both the first and the second order derivative of the total +monthly sales: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" + } + }, + "sales_2nd_deriv": { + "derivative": { + "buckets_paths": "sales_deriv" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` for the second derivative points to the name of the first derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 + } <1> + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + }, + "sales_2nd_deriv": { + "value": 805 + } + } + ] + } + } +} +-------------------------------------------------- +<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the +second derivative + +==== Dealing with gaps in the data + +There are a couple of reasons why the data output by the enclosing histogram may have gaps: + +* There are no documents matching the query for some buckets +* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval +on the enclosing histogram or with a query matching only a small number of documents) + +Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both +the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior +should be when a gap in the data is found. There are currently two options for controlling the gap policy: + +_ignore_:: + This option will not produce a derivative value for any buckets where the value in the current or previous bucket is + missing + +_insert_zeros_:: + This option will assume the missing value is `0` and calculate the derivative with the value `0`. + + From bd28c9c44e779dc784bd59387682b35d441bd627 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 17 Apr 2015 11:34:01 +0100 Subject: [PATCH 047/236] Documentation for the max_bucket reducer --- .../reducer/max-bucket-aggregation.asciidoc | 148 +++--------------- 1 file changed, 19 insertions(+), 129 deletions(-) diff --git a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc index 659f3ff1930..ca6f274d189 100644 --- a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc +++ b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -1,16 +1,17 @@ [[search-aggregations-reducer-max-bucket-aggregation]] === Max Bucket Aggregation -A parent reducer aggregation which calculates the derivative of a specified metric in a parent histogram (or date_histogram) -aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0`. +A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibing aggregation +and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must +be a multi-bucket aggregation. -The following snippet calculates the derivative of the total monthly `sales`: +The following snippet calculates the maximum of the total monthly `sales`: [source,js] -------------------------------------------------- { "aggs" : { - "sales" : { + "sales_per_month" : { "date_histogram" : { "field" : "date", "interval" : "month" @@ -20,19 +21,20 @@ The following snippet calculates the derivative of the total monthly `sales`: "sum": { "field": "price" } - }, - "sales_deriv": { - "derivative": { - "buckets_paths": "sales" <1> - } } } + }, + "max_monthly_sales": { + "max_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative +<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +"sales_per_month` date histogram. And the following may be the response: @@ -40,7 +42,7 @@ And the following may be the response: -------------------------------------------------- { "aggregations": { - "sales": { + "sales_per_month": { "buckets": [ { "key_as_string": "2015/01/01 00:00:00", @@ -48,7 +50,7 @@ And the following may be the response: "doc_count": 3, "sales": { "value": 550 - } <1> + } }, { "key_as_string": "2015/02/01 00:00:00", @@ -56,9 +58,6 @@ And the following may be the response: "doc_count": 2, "sales": { "value": 60 - }, - "sales_deriv": { - "value": -490 <2> } }, { @@ -67,126 +66,17 @@ And the following may be the response: "doc_count": 2, "sales": { "value": 375 - }, - "sales_deriv": { - "value": 315 } } ] + }, + "max_monthly_sales": { + "keys": ["2015/01/01 00:00:00"], <1> + "value": 550 } } } -------------------------------------------------- -<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative -<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units -would be $/month assuming the `price` field has units of $. - -==== Second Order Derivative - -A second order derivative can be calculated by chaining the derivative reducer aggregation onto the result of another derivative -reducer aggregation as in the following example which will calculate both the first and the second order derivative of the total -monthly sales: - -[source,js] --------------------------------------------------- -{ - "aggs" : { - "sales" : { - "date_histogram" : { - "field" : "date", - "interval" : "month" - }, - "aggs": { - "sales": { - "sum": { - "field": "price" - } - }, - "sales_deriv": { - "derivative": { - "buckets_paths": "sales" - } - }, - "sales_2nd_deriv": { - "derivative": { - "buckets_paths": "sales_deriv" <1> - } - } - } - } - } -} --------------------------------------------------- - -<1> `bucket_paths` for the second derivative points to the name of the first derivative - -And the following may be the response: - -[source,js] --------------------------------------------------- -{ - "aggregations": { - "sales": { - "buckets": [ - { - "key_as_string": "2015/01/01 00:00:00", - "key": 1420070400000, - "doc_count": 3, - "sales": { - "value": 550 - } <1> - }, - { - "key_as_string": "2015/02/01 00:00:00", - "key": 1422748800000, - "doc_count": 2, - "sales": { - "value": 60 - }, - "sales_deriv": { - "value": -490 - } <1> - }, - { - "key_as_string": "2015/03/01 00:00:00", - "key": 1425168000000, - "doc_count": 2, - "sales": { - "value": 375 - }, - "sales_deriv": { - "value": 315 - }, - "sales_2nd_deriv": { - "value": 805 - } - } - ] - } - } -} --------------------------------------------------- -<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the -second derivative - -==== Dealing with gaps in the data - -There are a couple of reasons why the data output by the enclosing histogram may have gaps: - -* There are no documents matching the query for some buckets -* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval -on the enclosing histogram or with a query matching only a small number of documents) - -Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both -the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior -should be when a gap in the data is found. There are currently two options for controlling the gap policy: - -_ignore_:: - This option will not produce a derivative value for any buckets where the value in the current or previous bucket is - missing - -_insert_zeros_:: - This option will assume the missing value is `0` and calculate the derivative with the value `0`. - +<1> `keys` is an array of strings since the maximum value may be present in multiple buckets From 89d424e074c0b42584ce7e2a594767613b95a7a6 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 21 Apr 2015 16:00:02 +0100 Subject: [PATCH 048/236] Derivative can now access multi-value metric aggregations --- .../aggregations/AggregatorFactories.java | 30 ++++------ .../reducers/DerivativeTests.java | 59 +++++++++++++++++-- 2 files changed, 66 insertions(+), 23 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 286f0c55b92..84318096080 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.ArrayList; @@ -62,7 +63,8 @@ public class AggregatorFactories { } /** - * Create all aggregators so that they can be consumed with multiple buckets. + * Create all aggregators so that they can be consumed with multiple + * buckets. */ public Aggregator[] createSubAggregators(Aggregator parent) throws IOException { Aggregator[] aggregators = new Aggregator[count()]; @@ -138,7 +140,8 @@ public class AggregatorFactories { public Builder addAggregator(AggregatorFactory factory) { if (!names.add(factory.name)) { - throw new ElasticsearchIllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); + throw new ElasticsearchIllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + + "]"); } factories.add(factory); return this; @@ -158,19 +161,12 @@ public class AggregatorFactories { } /* - * L ← Empty list that will contain the sorted nodes - * while there are unmarked nodes do - * select an unmarked node n - * visit(n) - * function visit(node n) - * if n has a temporary mark then stop (not a DAG) - * if n is not marked (i.e. has not been visited yet) then - * mark n temporarily - * for each node m with an edge from n to m do - * visit(m) - * mark n permanently - * unmark n temporarily - * add n to head of L + * L ← Empty list that will contain the sorted nodes while there are + * unmarked nodes do select an unmarked node n visit(n) function + * visit(node n) if n has a temporary mark then stop (not a DAG) if n is + * not marked (i.e. has not been visited yet) then mark n temporarily + * for each node m with an edge from n to m do visit(m) mark n + * permanently unmark n temporarily add n to head of L */ private List resolveReducerOrder(List reducerFactories, List aggFactories) { Map reducerFactoriesMap = new HashMap<>(); @@ -204,8 +200,8 @@ public class AggregatorFactories { temporarilyMarked.add(factory); String[] bucketsPaths = factory.getBucketsPaths(); for (String bucketsPath : bucketsPaths) { - int aggSepIndex = bucketsPath.indexOf('>'); - String firstAggName = aggSepIndex == -1 ? bucketsPath : bucketsPath.substring(0, aggSepIndex); + List bucketsPathElements = AggregationPath.parse(bucketsPath).getPathElementsAsStringList(); + String firstAggName = bucketsPathElements.get(0); if (bucketsPath.equals("_count") || bucketsPath.equals("_key") || aggFactoryNames.contains(firstAggName)) { continue; } else { diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 6f5641fcffa..95c13b6fd2f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.support.AggregationPath; @@ -39,6 +40,7 @@ import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -159,7 +161,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { * test first and second derivative on the sing */ @Test - public void singleValuedField() { + public void docCountDerivative() { SearchResponse response = client() .prepareSearch("idx") @@ -197,7 +199,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @Test - public void singleValuedField_WithSubAggregation() throws Exception { + public void singleValueAggDerivative() throws Exception { SearchResponse response = client() .prepareSearch("idx") .addAggregation( @@ -242,6 +244,52 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } + @Test + public void multiValueAggDerivative() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("stats.sum"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) deriv.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) deriv.getProperty("stats.sum"); + + List buckets = new ArrayList(deriv.getBuckets()); + Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + // overwritten + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + long expectedSum = valueCounts[i] * (i * interval); + assertThat(stats.getSum(), equalTo((double) expectedSum)); + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(sumDeriv, notNullValue()); + long sumDerivValue = expectedSum - expectedSumPreviousBucket; + assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), + equalTo((double) sumDerivValue)); + } else { + assertThat(sumDeriv, nullValue()); + } + expectedSumPreviousBucket = expectedSum; + assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); + assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum)); + } + } + @Test public void unmapped() throws Exception { SearchResponse response = client() @@ -288,7 +336,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @Test - public void singleValuedFieldWithGaps() throws Exception { + public void docCountDerivativeWithGaps() throws Exception { SearchResponse searchResponse = client() .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) @@ -317,7 +365,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @Test - public void singleValuedFieldWithGaps_random() throws Exception { + public void docCountDerivativeWithGaps_random() throws Exception { SearchResponse searchResponse = client() .prepareSearch("empty_bucket_idx_rnd") .setQuery(matchAllQuery()) @@ -336,7 +384,6 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { for (int i = 0; i < valueCounts_empty_rnd.length; i++) { Histogram.Bucket bucket = buckets.get(i); - System.out.println(bucket.getDocCount()); checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); if (firstDerivValueCounts_empty_rnd[i] == null) { @@ -348,7 +395,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } @Test - public void singleValuedFieldWithGaps_insertZeros() throws Exception { + public void docCountDerivativeWithGaps_insertZeros() throws Exception { SearchResponse searchResponse = client() .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) From 730314fec1e60da161ee962c521767fca968aeac Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 21 Apr 2015 20:27:27 -0400 Subject: [PATCH 049/236] Execute tests with $JAVA_HOME. --- pom.xml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 772d7ef6578..d28060f8cd7 100644 --- a/pom.xml +++ b/pom.xml @@ -47,6 +47,7 @@ 512m ${basedir}/logs/ 5 + ${java.home}${file.separator}bin${file.separator}java .local-${project.version}-execution-hints.log @@ -444,7 +445,7 @@ - java + ${jvm.executable> -classpath @@ -516,6 +517,7 @@ junit4 + ${jvm.executable} 10 pipe,warn true From 270cb9f349aad7a3b51edb26cf43984b9bb18372 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 22 Apr 2015 03:04:50 -0400 Subject: [PATCH 050/236] enable securitymanager --- config/elasticsearch.yml | 9 + .../tests.policy => config/security.policy | 17 +- pom.xml | 2 +- .../elasticsearch/bootstrap/Bootstrap.java | 13 +- .../org/elasticsearch/bootstrap/Security.java | 160 ++++++++++++++++++ .../elasticsearch/env/NodeEnvironment.java | 4 +- 6 files changed, 194 insertions(+), 11 deletions(-) rename dev-tools/tests.policy => config/security.policy (93%) create mode 100644 src/main/java/org/elasticsearch/bootstrap/Security.java diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index f1359cd58d0..8842b93e35c 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -231,6 +231,15 @@ # #http.enabled: false +################################### Security ################################## + +# SecurityManager runs elasticsearch with a lower set of priviledges. +# For more information, see +# . + +# Disable security completely: +# +# security.enabled: false ################################### Gateway ################################### diff --git a/dev-tools/tests.policy b/config/security.policy similarity index 93% rename from dev-tools/tests.policy rename to config/security.policy index 724f001e422..44b89c47c58 100644 --- a/dev-tools/tests.policy +++ b/config/security.policy @@ -17,21 +17,26 @@ * under the License. */ -// Policy file to prevent tests from writing outside the test sandbox directory -// PLEASE NOTE: You may need to enable other permissions when new tests are added, -// everything not allowed here is forbidden! +// Default security policy file. +// On startup, BootStrap reads environment and adds additional permissions +// for configured paths to these. grant { - // contain read access to only what we need: + // system jar resources + permission java.io.FilePermission "${java.home}${/}-", "read"; + + // temporary files + permission java.io.FilePermission "${java.io.tmpdir}", "read,write"; + permission java.io.FilePermission "${java.io.tmpdir}${/}-", "read,write,delete"; + + // paths used for running tests // project base directory permission java.io.FilePermission "${project.basedir}${/}target${/}-", "read"; // read permission for lib sigar permission java.io.FilePermission "${project.basedir}${/}lib/sigar{/}-", "read"; // mvn custom ./m2/repository for dependency jars permission java.io.FilePermission "${m2.repository}${/}-", "read"; - // system jar resources - permission java.io.FilePermission "${java.home}${/}-", "read"; // per-jvm directory permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp", "read,write"; permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp${/}-", "read,write,delete"; diff --git a/pom.xml b/pom.xml index 772d7ef6578..66df6cbd5d6 100644 --- a/pom.xml +++ b/pom.xml @@ -628,7 +628,7 @@ ${tests.compatibility} true - ${basedir}/dev-tools/tests.policy + ${basedir}/config/security.policy diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 9020d115a8b..72c13efa0f4 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.spi.Message; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.jna.Natives; import org.elasticsearch.common.logging.ESLogger; @@ -40,7 +39,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeBuilder; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.nio.file.Paths; import java.util.Locale; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -61,6 +59,7 @@ public class Bootstrap { private static Bootstrap bootstrap; private void setup(boolean addShutdownHook, Tuple tuple) throws Exception { + setupSecurity(tuple.v1(), tuple.v2()); if (tuple.v1().getAsBoolean("bootstrap.mlockall", false)) { Natives.tryMlockall(); } @@ -92,6 +91,16 @@ public class Bootstrap { }); } } + + private void setupSecurity(Settings settings, Environment environment) throws Exception { + ESLogger logger = Loggers.getLogger(Bootstrap.class); + if (settings.getAsBoolean("security.enabled", true)) { + Security.configure(environment); + logger.info("security enabled"); + } else { + logger.info("security disabled"); + } + } @SuppressForbidden(reason = "Exception#printStackTrace()") private static void setupLogging(Tuple tuple) { diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java new file mode 100644 index 00000000000..35de7edbe9f --- /dev/null +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; + +import java.io.BufferedOutputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Set; + +/** + * Initializes securitymanager with necessary permissions. + *

+ * We use a template file (the one we test with), and add additional + * permissions based on the environment (data paths, etc) + */ +class Security { + + /** + * Initializes securitymanager for the environment + * Can only happen once! + */ + static void configure(Environment environment) throws IOException { + Path newConfig = processTemplate(environment.configFile().resolve("security.policy"), environment); + System.setProperty("java.security.policy", newConfig.toString()); + System.setSecurityManager(new SecurityManager()); + try { + Files.delete(newConfig); + } catch (Exception e) { + Loggers.getLogger(Security.class).warn("unable to remove temporary file: " + newConfig, e); + } + } + + // package-private for testing + static Path processTemplate(Path template, Environment environment) throws IOException { + Path processed = Files.createTempFile(null, null); + try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { + // copy the template as-is. + Files.copy(template, output); + + // add permissions for all configured paths. + Set paths = new HashSet<>(); + paths.add(environment.homeFile()); + paths.add(environment.configFile()); + paths.add(environment.logsFile()); + paths.add(environment.pluginsFile()); + paths.add(environment.workFile()); + paths.add(environment.workWithClusterFile()); + for (Path path : environment.dataFiles()) { + paths.add(path); + } + for (Path path : environment.dataWithClusterFiles()) { + paths.add(path); + } + output.write(createPermissions(paths)); + } + return processed; + } + + // package private for testing + static byte[] createPermissions(Set paths) throws IOException { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + + // all policy files are UTF-8: + // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html + try (Writer writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8)) { + writer.write(System.lineSeparator()); + writer.write("grant {"); + writer.write(System.lineSeparator()); + for (Path path : paths) { + // add each path twice: once for itself, again for files underneath it + addPath(writer, encode(path), "read,readlink,write,delete"); + addRecursivePath(writer, encode(path), "read,readlink,write,delete"); + } + + // on *nix, try to grant read perms to file stores / SSD detection + if (!Constants.WINDOWS) { + Set stores = new HashSet<>(); + for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { + try { + String mount = NodeEnvironment.getMountPoint(store); + // mount point for fstat() calls against it + if (mount.startsWith("/")) { + stores.add(mount); + } + // block device: add it for SSD detection + if (store.name().startsWith("/")) { + stores.add(store.name()); + } + } catch (Throwable t) { + // these are hacks that are not guaranteed + } + } + for (String store : stores) { + addPath(writer, encode(store), "read,readlink"); + } + addRecursivePath(writer, "/sys/block", "read,readlink"); + addRecursivePath(writer, "/sys/devices", "read,readlink"); + addRecursivePath(writer, "/dev", "read,readlink"); + addRecursivePath(writer, "/devices", "read,readlink"); + } + + writer.write("};"); + writer.write(System.lineSeparator()); + } + + return stream.toByteArray(); + } + + static void addPath(Writer writer, String path, String permissions) throws IOException { + writer.write("permission java.io.FilePermission \"" + path + "\", \"" + permissions + "\";"); + writer.write(System.lineSeparator()); + } + + static void addRecursivePath(Writer writer, String path, String permissions) throws IOException { + writer.write("permission java.io.FilePermission \"" + path + "${/}-\", \"" + permissions + "\";"); + writer.write(System.lineSeparator()); + } + + // Any backslashes in paths must be escaped, because it is the escape character when parsing. + // See "Note Regarding File Path Specifications on Windows Systems". + // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html + static String encode(Path path) { + return encode(path.toString()); + } + + static String encode(String path) { + return path.replace("\\", "\\\\"); + } +} diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 9436888e070..aef08e60b38 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -295,7 +295,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // NOTE: poached from Lucene's IOUtils: /** Files.getFileStore(Path) useless here! Don't complain, just try it yourself. */ - private static FileStore getFileStore(Path path) throws IOException { + public static FileStore getFileStore(Path path) throws IOException { FileStore store = Files.getFileStore(path); try { @@ -317,7 +317,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // NOTE: poached from Lucene's IOUtils: // these are hacks that are not guaranteed - private static String getMountPoint(FileStore store) { + public static String getMountPoint(FileStore store) { String desc = store.toString(); return desc.substring(0, desc.lastIndexOf('(') - 1); } From f6934e0410130a08a5bf53caab82e53832ed1d63 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 22 Apr 2015 10:06:22 +0100 Subject: [PATCH 051/236] unit test for derivative of metric agg with gaps --- .../reducers/DerivativeTests.java | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 95c13b6fd2f..1c579c6cd5f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -45,6 +45,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @@ -424,6 +425,40 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } + @Test + public void singleValueAggDerivativeWithGaps() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), closeTo(thisSumValue - lastSumValue, 0.00001)); + } + lastSumValue = thisSumValue; + } + } + private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, final long expectedDocCount) { assertThat(msg, bucket, notNullValue()); From 9406d834417a7a3a8b11fc3d7e1166eefd0dd4ac Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 22 Apr 2015 08:46:14 -0400 Subject: [PATCH 052/236] fix typo --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index d28060f8cd7..2a5773b4594 100644 --- a/pom.xml +++ b/pom.xml @@ -445,7 +445,7 @@ - ${jvm.executable> + ${jvm.executable} -classpath From 6f1b398b33856aff0ba396c5276e4b3dd22235f8 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 22 Apr 2015 15:30:04 +0200 Subject: [PATCH 053/236] Fix RestAnalyzeAction to also accept source parameter instead of the request body --- .../indices/analyze/RestAnalyzeAction.java | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 0495d3420cb..0d4dea78218 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -55,7 +56,9 @@ public class RestAnalyzeAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + String text = request.param("text"); + AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); analyzeRequest.text(text); analyzeRequest.listenerThreaded(false); @@ -66,16 +69,16 @@ public class RestAnalyzeAction extends BaseRestHandler { analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", analyzeRequest.tokenFilters()))); analyzeRequest.charFilters(request.paramAsStringArray("char_filters", analyzeRequest.charFilters())); - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (request.hasContent() || request.hasParam("source")) { + XContentType type = contentType(request); if (type == null) { if (text == null) { - text = request.content().toUtf8(); + text = bodyContent(request).toUtf8(); analyzeRequest.text(text); } } else { // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values - buildFromContent(request.content(), analyzeRequest); + buildFromContent(bodyContent(request), analyzeRequest); } } @@ -130,6 +133,24 @@ public class RestAnalyzeAction extends BaseRestHandler { } } + private XContentType contentType(final RestRequest request) { + if (request.hasContent()) { + return XContentFactory.xContentType(request.content()); + } else if (request.hasParam("source")) { + return XContentFactory.xContentType(request.param("source")); + } + throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); + } + + private BytesReference bodyContent(final RestRequest request) { + if (request.hasContent()) { + return request.content(); + } else if (request.hasParam("source")) { + return new BytesArray(request.param("source")); + } + throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); + } + From 77e2f644e32a78b4350fa162c4d7c8ba4b0becf4 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 22 Apr 2015 14:50:49 +0100 Subject: [PATCH 054/236] Derivative tests for gaps in metrics --- .../aggregations/reducers/BucketHelpers.java | 14 +-- .../reducers/DerivativeTests.java | 100 +++++++++++++++++- 2 files changed, 104 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java index b6955a086ab..f6cdd8ca1f9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -166,13 +166,15 @@ public class BucketHelpers { throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + " must reference either a number value or a single value numeric metric aggregation"); } - if (Double.isInfinite(value) || Double.isNaN(value)) { + // doc count never has missing values so gap policy doesn't apply here + boolean isDocCountProperty = aggPathAsList.size() == 1 && "_count".equals(aggPathAsList.get(0)); + if (Double.isInfinite(value) || Double.isNaN(value) || (bucket.getDocCount() == 0 && !isDocCountProperty)) { switch (gapPolicy) { - case INSERT_ZEROS: - return 0.0; - case IGNORE: - default: - return Double.NaN; + case INSERT_ZEROS: + return 0.0; + case IGNORE: + default: + return Double.NaN; } } else { return value; diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 1c579c6cd5f..0974d297d46 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -373,7 +373,8 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) - .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(randomFrom(GapPolicy.values())))) + .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd)); @@ -449,11 +450,102 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); Sum sum = bucket.getAggregations().get("sum"); double thisSumValue = sum.value(); - SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (bucket.getDocCount() == 0) { + thisSumValue = Double.NaN; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); if (i == 0) { - assertThat(docCountDeriv, nullValue()); + assertThat(sumDeriv, nullValue()); } else { - assertThat(docCountDeriv.value(), closeTo(thisSumValue - lastSumValue, 0.00001)); + double expectedDerivative = thisSumValue - lastSumValue; + if (Double.isNaN(expectedDerivative)) { + assertThat(sumDeriv.value(), equalTo(expectedDerivative)); + } else { + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } + } + lastSumValue = thisSumValue; + } + } + + @Test + public void singleValueAggDerivativeWithGaps_insertZeros() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() + .actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + if (bucket.getDocCount() == 0) { + thisSumValue = 0; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(sumDeriv, nullValue()); + } else { + double expectedDerivative = thisSumValue - lastSumValue; + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } + lastSumValue = thisSumValue; + } + } + + @Test + public void singleValueAggDerivativeWithGaps_random() throws Exception { + GapPolicy gapPolicy = randomFrom(GapPolicy.values()); + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(gapPolicy))).execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets_empty_rnd)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty_rnd.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + if (bucket.getDocCount() == 0) { + thisSumValue = gapPolicy == GapPolicy.INSERT_ZEROS ? 0 : Double.NaN; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(sumDeriv, nullValue()); + } else { + double expectedDerivative = thisSumValue - lastSumValue; + if (Double.isNaN(expectedDerivative)) { + assertThat(sumDeriv.value(), equalTo(expectedDerivative)); + } else { + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } } lastSumValue = thisSumValue; } From 91ff3f6963bacbdfc8f637e14661d654434b4ccf Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 22 Apr 2015 15:40:16 +0200 Subject: [PATCH 055/236] Test: add ensure green to indices.stats/12_level.yaml Also changed the stash logger to not log all stashed values under debug (it does trace now) but do dump the stash content upon failure (under info as a XContent) --- .../test/indices.stats/12_level.yaml | 4 ++++ .../test/rest/ElasticsearchRestTestCase.java | 21 ++++--------------- .../org/elasticsearch/test/rest/Stash.java | 13 ++++++++++-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/rest-api-spec/test/indices.stats/12_level.yaml b/rest-api-spec/test/indices.stats/12_level.yaml index c766f5eb625..6156437162c 100644 --- a/rest-api-spec/test/indices.stats/12_level.yaml +++ b/rest-api-spec/test/indices.stats/12_level.yaml @@ -15,6 +15,10 @@ setup: id: 1 body: { "foo": "baz" } + - do: + cluster.health: + wait_for_status: green + --- "Level - blank": - do: diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index 3788003eff3..1780b695061 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -25,7 +25,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.google.common.collect.Lists; - import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase.SuppressFsync; @@ -40,11 +39,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.rest.client.RestException; import org.elasticsearch.test.rest.parser.RestTestParseException; import org.elasticsearch.test.rest.parser.RestTestSuiteParser; -import org.elasticsearch.test.rest.section.DoSection; -import org.elasticsearch.test.rest.section.ExecutableSection; -import org.elasticsearch.test.rest.section.RestTestSuite; -import org.elasticsearch.test.rest.section.SkipSection; -import org.elasticsearch.test.rest.section.TestSection; +import org.elasticsearch.test.rest.section.*; import org.elasticsearch.test.rest.spec.RestApi; import org.elasticsearch.test.rest.spec.RestSpec; import org.elasticsearch.test.rest.support.FileUtils; @@ -54,18 +49,10 @@ import org.junit.BeforeClass; import org.junit.Test; import java.io.IOException; -import java.lang.annotation.ElementType; -import java.lang.annotation.Inherited; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; +import java.lang.annotation.*; import java.nio.file.Path; import java.nio.file.PathMatcher; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; /** * Runs the clients test suite against an elasticsearch cluster. @@ -135,7 +122,7 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration blacklistPathMatchers = new PathMatcher[0]; } } - + @Override protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.builder() diff --git a/src/test/java/org/elasticsearch/test/rest/Stash.java b/src/test/java/org/elasticsearch/test/rest/Stash.java index 398b663cddb..4d0a1fb7fea 100644 --- a/src/test/java/org/elasticsearch/test/rest/Stash.java +++ b/src/test/java/org/elasticsearch/test/rest/Stash.java @@ -23,7 +23,10 @@ import com.google.common.collect.Maps; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -31,7 +34,7 @@ import java.util.Map; * Allows to cache the last obtained test response and or part of it within variables * that can be used as input values in following requests and assertions. */ -public class Stash { +public class Stash implements ToXContent { private static final ESLogger logger = Loggers.getLogger(Stash.class); @@ -43,7 +46,7 @@ public class Stash { * Allows to saved a specific field in the stash as key-value pair */ public void stashValue(String key, Object value) { - logger.debug("stashing [{}]=[{}]", key, value); + logger.trace("stashing [{}]=[{}]", key, value); Object old = stash.put(key, value); if (old != null && old != value) { logger.trace("replaced stashed value [{}] with same key [{}]", old, key); @@ -116,4 +119,10 @@ public class Stash { } } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("stash", stash); + return builder; + } } From 1118965aa57b4ad96a69eeeec3b8289b692b0a05 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 22 Apr 2015 16:10:28 +0200 Subject: [PATCH 056/236] Test: move to ensure yellow in indices.stats/12_level.yaml --- rest-api-spec/test/indices.stats/12_level.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/test/indices.stats/12_level.yaml b/rest-api-spec/test/indices.stats/12_level.yaml index 6156437162c..fb71e8d2032 100644 --- a/rest-api-spec/test/indices.stats/12_level.yaml +++ b/rest-api-spec/test/indices.stats/12_level.yaml @@ -17,7 +17,7 @@ setup: - do: cluster.health: - wait_for_status: green + wait_for_status: yellow --- "Level - blank": From ec07a41514749c70c481b204bca6ca1af3c1f1fe Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Wed, 22 Apr 2015 10:40:30 -0400 Subject: [PATCH 057/236] Don't use nested classes for Rest tests. Nested classes have the advantage of organizing the hack in a way where its easy to see what is happening overall, but they have the downside of class names with $ in them. These names work just fine, but can require shell escaping or other annoyances, which is the last thing you want if you are trying to just reproduce. --- .../test/rest/ElasticsearchRestTestCase.java | 82 ------------------- .../elasticsearch/test/rest/Rest0Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest1Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest2Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest3Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest4Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest5Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest6Tests.java | 38 +++++++++ .../elasticsearch/test/rest/Rest7Tests.java | 38 +++++++++ 9 files changed, 304 insertions(+), 82 deletions(-) create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest0Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest1Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest2Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest3Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest4Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest5Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest6Tests.java create mode 100644 src/test/java/org/elasticsearch/test/rest/Rest7Tests.java diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index 1780b695061..5e1d5084e41 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -302,86 +302,4 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration executableSection.execute(restTestExecutionContext); } } - - // don't look any further: NO TOUCHY! - - public static class Rest0Tests extends ElasticsearchRestTestCase { - public Rest0Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(0, 8); - } - } - - public static class Rest1Tests extends ElasticsearchRestTestCase { - public Rest1Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(1, 8); - } - } - - public static class Rest2Tests extends ElasticsearchRestTestCase { - public Rest2Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(2, 8); - } - } - - public static class Rest3Tests extends ElasticsearchRestTestCase { - public Rest3Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(3, 8); - } - } - - public static class Rest4Tests extends ElasticsearchRestTestCase { - public Rest4Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(4, 8); - } - } - - public static class Rest5Tests extends ElasticsearchRestTestCase { - public Rest5Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(5, 8); - } - } - - public static class Rest6Tests extends ElasticsearchRestTestCase { - public Rest6Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(6, 8); - } - } - - public static class Rest7Tests extends ElasticsearchRestTestCase { - public Rest7Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(7, 8); - } - } } diff --git a/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java new file mode 100644 index 00000000000..f86836876c5 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 0 */ +public class Rest0Tests extends ElasticsearchRestTestCase { + public Rest0Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(0, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java new file mode 100644 index 00000000000..d75444fe005 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 1 */ +public class Rest1Tests extends ElasticsearchRestTestCase { + public Rest1Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(1, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java new file mode 100644 index 00000000000..1d01ecc58ec --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 2 */ +public class Rest2Tests extends ElasticsearchRestTestCase { + public Rest2Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(2, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java new file mode 100644 index 00000000000..044e182e7a6 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 3 */ +public class Rest3Tests extends ElasticsearchRestTestCase { + public Rest3Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(3, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java new file mode 100644 index 00000000000..75213143b9c --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 4 */ +public class Rest4Tests extends ElasticsearchRestTestCase { + public Rest4Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(4, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java new file mode 100644 index 00000000000..a2c1af46dd0 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 5 */ +public class Rest5Tests extends ElasticsearchRestTestCase { + public Rest5Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(5, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java new file mode 100644 index 00000000000..bb7ccd10035 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 6 */ +public class Rest6Tests extends ElasticsearchRestTestCase { + public Rest6Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(6, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java new file mode 100644 index 00000000000..aba7c03136b --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 7 */ +public class Rest7Tests extends ElasticsearchRestTestCase { + public Rest7Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(7, 8); + } +} From 7f08ab301d050c91796c7be41d39cad8712c4d71 Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Wed, 22 Apr 2015 23:55:15 +0900 Subject: [PATCH 058/236] Fix RestSearchScrollAction to also accept source parameter instead of the request body --- .../action/search/RestSearchScrollAction.java | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index cd2dbf856a3..df363aadd74 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -68,16 +69,16 @@ public class RestSearchScrollAction extends BaseRestHandler { searchScrollRequest.scroll(new Scroll(parseTimeValue(scroll, null))); } - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (request.hasContent() || request.hasParam("source")) { + XContentType type = contentType(request); if (type == null) { if (scrollId == null) { - scrollId = RestActions.getRestContent(request).toUtf8(); + scrollId = bodyContent(request).toUtf8(); searchScrollRequest.scrollId(scrollId); } } else { // NOTE: if rest request with xcontent body has request parameters, these parameters override xcontent values - buildFromContent(request.content(), searchScrollRequest); + buildFromContent(bodyContent(request), searchScrollRequest); } } client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); @@ -107,4 +108,23 @@ public class RestSearchScrollAction extends BaseRestHandler { } } + private XContentType contentType(final RestRequest request) { + if (request.hasContent()) { + return XContentFactory.xContentType(request.content()); + } else if (request.hasParam("source")) { + return XContentFactory.xContentType(request.param("source")); + } + throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); + } + + private BytesReference bodyContent(final RestRequest request) { + if (request.hasContent()) { + return request.content(); + } else if (request.hasParam("source")) { + return new BytesArray(request.param("source")); + } + throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); + } + + } From dcf91ff02f721beb49a9952f7166d6731e72f36d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 22 Apr 2015 16:01:23 +0100 Subject: [PATCH 059/236] Temporarily disabled gap policy randomisation in MovAvgTests --- .../search/aggregations/reducers/MovAvgTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java index d22656b0ad5..4f0e3c0d1cf 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.reducers; import com.google.common.collect.EvictingQueue; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -37,7 +38,8 @@ import java.util.ArrayList; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.smooth; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @@ -76,7 +78,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { numValueBuckets = randomIntBetween(6, 80); numFilledValueBuckets = numValueBuckets; windowSize = randomIntBetween(3,10); - gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; + gapPolicy = BucketHelpers.GapPolicy.INSERT_ZEROS; // TODO randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; docCounts = new long[numValueBuckets]; valueCounts = new long[numValueBuckets]; From dd679a3a3842aeb34326cce7bdb2bcdaa70cfafc Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 22 Apr 2015 16:10:27 +0200 Subject: [PATCH 060/236] Upgrade to lucene-5.2-snapshot-1675363. This snapshot contains in particular LUCENE-6446 (refactored explanation API) and LUCENE-6448 (better equals/hashcode for filters). --- pom.xml | 2 +- .../elasticsearch/common/lucene/Lucene.java | 52 +++----- .../common/lucene/search/Queries.java | 13 +- .../lucene/search/ResolvableFilter.java | 2 - .../search/function/BoostScoreFunction.java | 4 +- .../search/function/CombineFunction.java | 117 ++++++++---------- .../function/FieldValueFactorFunction.java | 8 +- .../function/FiltersFunctionScoreQuery.java | 25 ++-- .../search/function/RandomScoreFunction.java | 6 +- .../search/function/ScriptScoreFunction.java | 10 +- .../search/function/WeightFactorFunction.java | 13 +- .../index/cache/bitset/BitsetFilterCache.java | 9 +- .../filter/weighted/WeightedFilterCache.java | 4 +- .../index/mapper/MapperService.java | 9 +- .../index/query/HasChildFilterParser.java | 4 +- .../index/query/HasChildQueryParser.java | 4 +- .../index/query/TopChildrenQueryParser.java | 4 +- .../functionscore/DecayFunctionParser.java | 11 +- .../exp/ExponentialDecayFunctionParser.java | 8 +- .../gauss/GaussDecayFunctionParser.java | 8 +- .../lin/LinearDecayFunctionParser.java | 8 +- .../support/NestedInnerQueryParseSupport.java | 4 +- .../index/search/FieldDataTermsFilter.java | 46 +++++-- .../search/NumericRangeFieldDataFilter.java | 5 +- .../child/ChildrenConstantScoreQuery.java | 3 +- .../index/search/child/ChildrenQuery.java | 4 +- .../child/ParentConstantScoreQuery.java | 14 ++- .../index/search/child/ParentIdsFilter.java | 20 +++ .../index/search/child/ParentQuery.java | 20 ++- .../index/search/child/TopChildrenQuery.java | 2 +- .../index/search/geo/GeoDistanceFilter.java | 6 +- .../search/geo/GeoDistanceRangeFilter.java | 6 +- .../geo/InMemoryGeoBoundingBoxFilter.java | 20 +++ .../search/nested/NonNestedDocsFilter.java | 84 ------------- .../elasticsearch/index/shard/IndexShard.java | 22 +++- .../percolator/PercolatorService.java | 6 +- .../percolator/QueryCollector.java | 11 +- .../bucket/nested/NestedAggregator.java | 5 +- .../nested/ReverseNestedAggregator.java | 4 +- .../search/fetch/FetchPhase.java | 13 +- .../fetch/innerhits/InnerHitsContext.java | 4 +- .../search/rescore/QueryRescorer.java | 35 +++--- .../search/sort/GeoDistanceSortParser.java | 15 ++- .../search/sort/ScriptSortParser.java | 12 +- .../search/sort/SortParseElement.java | 5 +- .../explain/ExplainActionTests.java | 6 +- .../ChildrenConstantScoreQueryTests.java | 4 +- .../search/child/ChildrenQueryTests.java | 3 +- .../search/child/TopChildrenQueryTests.java | 4 +- .../nested/SimpleNestedTests.java | 2 +- .../aggregations/bucket/TopHitsTests.java | 2 +- .../bucket/nested/NestedAggregatorTest.java | 4 +- .../child/SimpleChildQuerySearchTests.java | 2 +- .../functionscore/ExplainableScriptTests.java | 7 +- .../FunctionScorePluginTests.java | 9 +- .../functionscore/FunctionScoreTests.java | 4 +- .../search/innerhits/InnerHitsTests.java | 4 +- 57 files changed, 348 insertions(+), 390 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java diff --git a/pom.xml b/pom.xml index 772d7ef6578..c2841471585 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ 5.2.0 - 1675100 + 1675363 5.2.0-snapshot-${lucene.snapshot.revision} 2.1.14 auto diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 229e94a95d2..78503ebb2b6 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Filter; @@ -530,48 +529,29 @@ public class Lucene { } public static Explanation readExplanation(StreamInput in) throws IOException { - Explanation explanation; - if (in.readBoolean()) { - Boolean match = in.readOptionalBoolean(); - explanation = new ComplexExplanation(); - ((ComplexExplanation) explanation).setMatch(match); - + boolean match = in.readBoolean(); + String description = in.readString(); + final Explanation[] subExplanations = new Explanation[in.readVInt()]; + for (int i = 0; i < subExplanations.length; ++i) { + subExplanations[i] = readExplanation(in); + } + if (match) { + return Explanation.match(in.readFloat(), description, subExplanations); } else { - explanation = new Explanation(); + return Explanation.noMatch(description, subExplanations); } - explanation.setValue(in.readFloat()); - explanation.setDescription(in.readString()); - if (in.readBoolean()) { - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - explanation.addDetail(readExplanation(in)); - } - } - return explanation; } public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException { - - if (explanation instanceof ComplexExplanation) { - out.writeBoolean(true); - out.writeOptionalBoolean(((ComplexExplanation) explanation).getMatch()); - } else { - out.writeBoolean(false); - } - out.writeFloat(explanation.getValue()); - if (explanation.getDescription() == null) { - throw new ElasticsearchIllegalArgumentException("Explanation descriptions should NOT be null\n[" + explanation.toString() + "]"); - } + out.writeBoolean(explanation.isMatch()); out.writeString(explanation.getDescription()); Explanation[] subExplanations = explanation.getDetails(); - if (subExplanations == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(subExplanations.length); - for (Explanation subExp : subExplanations) { - writeExplanation(out, subExp); - } + out.writeVInt(subExplanations.length); + for (Explanation subExp : subExplanations) { + writeExplanation(out, subExp); + } + if (explanation.isMatch()) { + out.writeFloat(explanation.getValue()); } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index b64758ee592..69ed1f68d64 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -19,17 +19,20 @@ package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; @@ -58,6 +61,14 @@ public class Queries { return wrap(newMatchNoDocsQuery()); } + public static Filter newNestedFilter() { + return wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); + } + + public static Filter newNonNestedFilter() { + return wrap(not(newNestedFilter())); + } + /** Return a query that matches all documents but those that match the given query. */ public static Query not(Query q) { BooleanQuery bq = new BooleanQuery(); diff --git a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java index 4578e6711be..8ecb6228705 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java @@ -37,8 +37,6 @@ public abstract class ResolvableFilter extends Filter { */ public abstract Filter resolve(); - - @Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { Filter resolvedFilter = resolve(); diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java index 01610a791b2..58d438adb3a 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java @@ -52,9 +52,7 @@ public class BoostScoreFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(boost, "static boost factor"); - exp.addDetail(new Explanation(boost, "boostFactor")); - return exp; + return Explanation.match(boost, "static boost factor", Explanation.match(boost, "boostFactor")); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java index d5455fa1f38..30c8f01b709 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.lucene.search.function; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; public enum CombineFunction { @@ -35,16 +34,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost) * queryExpl.getValue(); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - res.addDetail(queryExpl); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - res.addDetail(minExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); + Explanation minExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), + "min of:", + funcExpl, boostExpl); + return Explanation.match(score, "function score, product of:", + queryExpl, minExpl, Explanation.match(queryBoost, "queryBoost")); } }, REPLACE { @@ -59,15 +57,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - res.addDetail(minExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); + Explanation minExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), + "min of:", + funcExpl, boostExpl); + return Explanation.match(score, "function score, product of:", + minExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -83,19 +81,14 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * (Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation sumExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), - "sum of"); - sumExpl.addDetail(queryExpl); - sumExpl.addDetail(minExpl); - res.addDetail(sumExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation sumExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), "sum of", + queryExpl, minExpl); + return Explanation.match(score, "function score, product of:", + sumExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -111,19 +104,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * (queryExpl.getValue() + Math.min(funcExpl.getValue(), maxBoost)) / 2.0); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation avgExpl = new ComplexExplanation(true, - toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of"); - avgExpl.addDetail(queryExpl); - avgExpl.addDetail(minExpl); - res.addDetail(avgExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation avgExpl = Explanation.match( + toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of", + queryExpl, minExpl); + return Explanation.match(score, "function score, product of:", + avgExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -139,19 +128,16 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * Math.min(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost))); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - innerMinExpl.addDetail(funcExpl); - innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation outerMinExpl = new ComplexExplanation(true, Math.min(Math.min(funcExpl.getValue(), maxBoost), - queryExpl.getValue()), "min of"); - outerMinExpl.addDetail(queryExpl); - outerMinExpl.addDetail(innerMinExpl); - res.addDetail(outerMinExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation innerMinExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation outerMinExpl = Explanation.match( + Math.min(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "min of", + queryExpl, innerMinExpl); + return Explanation.match(score, "function score, product of:", + outerMinExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -167,19 +153,16 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * Math.max(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost))); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - innerMinExpl.addDetail(funcExpl); - innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation outerMaxExpl = new ComplexExplanation(true, Math.max(Math.min(funcExpl.getValue(), maxBoost), - queryExpl.getValue()), "max of"); - outerMaxExpl.addDetail(queryExpl); - outerMaxExpl.addDetail(innerMinExpl); - res.addDetail(outerMaxExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation innerMinExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation outerMaxExpl = Explanation.match( + Math.max(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "max of:", + queryExpl, innerMinExpl); + return Explanation.match(score, "function score, product of:", + outerMaxExpl, Explanation.match(queryBoost, "queryBoost")); } }; @@ -198,5 +181,5 @@ public enum CombineFunction { return Double.compare(floatVersion, input) == 0 || input == 0.0d ? 0 : 1.d - (floatVersion) / input; } - public abstract ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost); + public abstract Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost); } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 5729d7c9b31..437e5a63b28 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -70,13 +70,11 @@ public class FieldValueFactorFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(); String modifierStr = modifier != null ? modifier.toString() : ""; double score = score(docId, subQueryScore.getValue()); - exp.setValue(CombineFunction.toFloat(score)); - exp.setDescription("field value function: " + - modifierStr + "(" + "doc['" + field + "'].value * factor=" + boostFactor + ")"); - return exp; + return Explanation.match( + CombineFunction.toFloat(score), + "field value function: " + modifierStr + "(" + "doc['" + field + "'].value * factor=" + boostFactor + ")"); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index b4ff4d7868c..03dc0fcfb9e 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -175,7 +175,7 @@ public class FiltersFunctionScoreQuery extends Query { return subQueryExpl; } // First: Gather explanations for all filters - List filterExplanations = new ArrayList<>(); + List filterExplanations = new ArrayList<>(); float weightSum = 0; for (FilterFunction filterFunction : filterFunctions) { @@ -191,18 +191,16 @@ public class FiltersFunctionScoreQuery extends Query { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); float sc = CombineFunction.toFloat(factor); - ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, "function score, product of:"); - filterExplanation.addDetail(new Explanation(1.0f, "match filter: " + filterFunction.filter.toString())); - filterExplanation.addDetail(functionExplanation); + Explanation filterExplanation = Explanation.match(sc, "function score, product of:", + Explanation.match(1.0f, "match filter: " + filterFunction.filter.toString()), functionExplanation); filterExplanations.add(filterExplanation); } } if (filterExplanations.size() == 0) { float sc = getBoost() * subQueryExpl.getValue(); - Explanation res = new ComplexExplanation(true, sc, "function score, no filter match, product of:"); - res.addDetail(subQueryExpl); - res.addDetail(new Explanation(getBoost(), "queryBoost")); - return res; + return Explanation.match(sc, "function score, no filter match, product of:", + subQueryExpl, + Explanation.match(getBoost(), "queryBoost")); } // Second: Compute the factor that would have been computed by the @@ -242,12 +240,11 @@ public class FiltersFunctionScoreQuery extends Query { } } } - ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor), - "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]"); - for (int i = 0; i < filterExplanations.size(); i++) { - factorExplanaition.addDetail(filterExplanations.get(i)); - } - return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost); + Explanation factorExplanation = Explanation.match( + CombineFunction.toFloat(factor), + "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]", + filterExplanations); + return combineFunction.explain(getBoost(), subQueryExpl, factorExplanation, maxBoost); } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java index 2e42cb92af0..934640e4ae0 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java @@ -74,9 +74,9 @@ public class RandomScoreFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(); - exp.setDescription("random score function (seed: " + originalSeed + ")"); - return exp; + return Explanation.match( + CombineFunction.toFloat(score(docId, subQueryScore.getValue())), + "random score function (seed: " + originalSeed + ")"); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 787d8c1a955..860588207f0 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -117,10 +117,12 @@ public class ScriptScoreFunction extends ScoreFunction { if (params != null) { explanation += "\" and parameters: \n" + params.toString(); } - exp = new Explanation(CombineFunction.toFloat(score), explanation); - Explanation scoreExp = new Explanation(subQueryScore.getValue(), "_score: "); - scoreExp.addDetail(subQueryScore); - exp.addDetail(scoreExp); + Explanation scoreExp = Explanation.match( + subQueryScore.getValue(), "_score: ", + subQueryScore); + return Explanation.match( + CombineFunction.toFloat(score), explanation, + scoreExp); } return exp; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index cda0fa0477e..6a36198e386 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchIllegalArgumentException; @@ -65,18 +64,16 @@ public class WeightFactorFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { - Explanation functionScoreExplanation; Explanation functionExplanation = leafFunction.explainScore(docId, subQueryScore); - functionScoreExplanation = new ComplexExplanation(true, functionExplanation.getValue() * (float) getWeight(), "product of:"); - functionScoreExplanation.addDetail(functionExplanation); - functionScoreExplanation.addDetail(explainWeight()); - return functionScoreExplanation; + return Explanation.match( + functionExplanation.getValue() * (float) getWeight(), "product of:", + functionExplanation, explainWeight()); } }; } public Explanation explainWeight() { - return new Explanation(getWeight(), "weight"); + return Explanation.match(getWeight(), "weight"); } public float getWeight() { @@ -99,7 +96,7 @@ public class WeightFactorFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - return new Explanation(1.0f, "constant score 1.0 - no function provided"); + return Explanation.match(1.0f, "constant score 1.0 - no function provided"); } }; } diff --git a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index de622e4d689..1a3af2b44c0 100644 --- a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -23,6 +23,7 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; + import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; @@ -36,19 +37,19 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.NoCacheFilter; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.threadpool.ThreadPool; @@ -266,7 +267,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea } if (hasNested) { - warmUp.add(NonNestedDocsFilter.INSTANCE); + warmUp.add(Queries.newNonNestedFilter()); } final Executor executor = threadPool.executor(executor()); diff --git a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java index 2720d32d9d1..8ed25ec91a6 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java @@ -212,13 +212,13 @@ public class WeightedFilterCache extends AbstractIndexComponent implements Filte @Override public boolean equals(Object o) { - if (!(o instanceof FilterCacheFilterWrapper)) return false; + if (super.equals(o) == false) return false; return this.filter.equals(((FilterCacheFilterWrapper) o).filter); } @Override public int hashCode() { - return filter.hashCode() ^ 0x1117BF25; + return 31 * super.hashCode() + filter.hashCode(); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 4bad191f88e..5e11580cea8 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; @@ -61,7 +60,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.similarity.SimilarityLookupService; import org.elasticsearch.indices.InvalidTypeNameException; @@ -72,7 +70,6 @@ import org.elasticsearch.script.ScriptService; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; -import java.nio.file.Paths; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -455,10 +452,10 @@ public class MapperService extends AbstractIndexComponent { if (hasNested && filterPercolateType) { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); - bq.add(NonNestedDocsFilter.INSTANCE, Occur.MUST); + bq.add(Queries.newNonNestedFilter(), Occur.MUST); return Queries.wrap(bq); } else if (hasNested) { - return NonNestedDocsFilter.INSTANCE; + return Queries.newNonNestedFilter(); } else if (filterPercolateType) { return Queries.wrap(Queries.not(percolatorType)); } else { @@ -523,7 +520,7 @@ public class MapperService extends AbstractIndexComponent { bool.add(percolatorType, BooleanClause.Occur.MUST_NOT); } if (hasNested) { - bool.add(NonNestedDocsFilter.INSTANCE, BooleanClause.Occur.MUST); + bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); } return Queries.wrap(bool); diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index 0f83e3408bb..dc8869921eb 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; @@ -35,7 +36,6 @@ import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -166,7 +166,7 @@ public class HasChildFilterParser implements FilterParser { BitDocIdSetFilter nonNestedDocsFilter = null; if (parentDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index c7e8f2567d4..48601f84416 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; @@ -36,7 +37,6 @@ import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -165,7 +165,7 @@ public class HasChildQueryParser implements QueryParser { BitDocIdSetFilter nonNestedDocsFilter = null; if (parentDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } // wrap the query with type query diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index a0f5834b44e..7c45bb7fc14 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; @@ -31,7 +32,6 @@ import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.index.search.child.TopChildrenQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import java.io.IOException; @@ -128,7 +128,7 @@ public class TopChildrenQueryParser implements QueryParser { BitDocIdSetFilter nonNestedDocsFilter = null; if (childDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } innerQuery.setBoost(boost); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index daf9aca1b4e..9129a33d064 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query.functionscore; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; @@ -463,12 +462,10 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue(CombineFunction.toFloat(score(docId, subQueryScore.getValue()))); - ce.setMatch(true); - ce.setDescription("Function for field " + getFieldName() + ":"); - ce.addDetail(func.explainFunction(getDistanceString(ctx, docId), distance.get(docId), scale)); - return ce; + return Explanation.match( + CombineFunction.toFloat(score(docId, subQueryScore.getValue())), + "Function for field " + getFieldName() + ":", + func.explainFunction(getDistanceString(ctx, docId), distance.get(docId), scale)); } }; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java index 55b32c43a4d..bab04d4a1dc 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.exp; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -49,10 +48,9 @@ public class ExponentialDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("exp(- " + valueExpl + " * " + -1 * scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "exp(- " + valueExpl + " * " + -1 * scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java index 26f77d37086..614050a8fbe 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.gauss; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -45,10 +44,9 @@ public class GaussDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("exp(-0.5*pow(" + valueExpl + ",2.0)/" + -1 * scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "exp(-0.5*pow(" + valueExpl + ",2.0)/" + -1 * scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java index 05ecbbbfc04..215a7873ae3 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.lin; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -49,10 +48,9 @@ public class LinearDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("max(0.0, ((" + scale + " - " + valueExpl + ")/" + scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "max(0.0, ((" + scale + " - " + valueExpl + ")/" + scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 07142e5e1e6..17eb059e1d0 100644 --- a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,7 +32,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -195,7 +195,7 @@ public class NestedInnerQueryParseSupport { private void setPathLevel() { ObjectMapper objectMapper = parseContext.nestedScope().getObjectMapper(); if (objectMapper == null) { - parentFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + parentFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } else { parentFilter = parseContext.bitsetFilter(objectMapper.nestedTypeFilter()); } diff --git a/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java index b1bc01d599a..b44bde505c4 100644 --- a/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.search; import java.io.IOException; +import java.util.Objects; import com.carrotsearch.hppc.DoubleOpenHashSet; import com.carrotsearch.hppc.LongOpenHashSet; @@ -86,16 +87,19 @@ public abstract class FieldDataTermsFilter extends Filter { @Override public boolean equals(Object obj) { if (this == obj) return true; - if (obj == null || !(obj instanceof FieldDataTermsFilter)) return false; + if (super.equals(obj) == false) return false; FieldDataTermsFilter that = (FieldDataTermsFilter) obj; if (!fieldData.getFieldNames().indexName().equals(that.fieldData.getFieldNames().indexName())) return false; - if (this.hashCode() != obj.hashCode()) return false; return true; } @Override - public abstract int hashCode(); + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + fieldData.getFieldNames().indexName().hashCode(); + return h; + } /** * Filters on non-numeric fields. @@ -109,11 +113,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override @@ -166,11 +176,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override @@ -225,11 +241,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java index 9c032c225fb..43b279c073b 100644 --- a/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java +++ b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java @@ -85,7 +85,7 @@ public abstract class NumericRangeFieldDataFilter extends Filter { @Override public final boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof NumericRangeFieldDataFilter)) return false; + if (super.equals(o) == false) return false; NumericRangeFieldDataFilter other = (NumericRangeFieldDataFilter) o; if (!this.indexFieldData.getFieldNames().indexName().equals(other.indexFieldData.getFieldNames().indexName()) @@ -101,7 +101,8 @@ public abstract class NumericRangeFieldDataFilter extends Filter { @Override public final int hashCode() { - int h = indexFieldData.getFieldNames().indexName().hashCode(); + int h = super.hashCode(); + h = 31 * h + indexFieldData.getFieldNames().indexName().hashCode(); h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204; h = (h << 1) | (h >>> 31); // rotate to distinguish lower from upper h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163; diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index db1c15ddbb6..2b9363da61f 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -40,7 +40,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; import org.elasticsearch.search.internal.SearchContext; @@ -202,7 +201,7 @@ public class ChildrenConstantScoreQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index e6f3069818d..960739cb3c3 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -36,14 +36,12 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.XFilteredDocIdSetIterator; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.FloatArray; import org.elasticsearch.common.util.IntArray; @@ -264,7 +262,7 @@ public class ChildrenQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index 3617ab29a89..ce1be7e25d3 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -22,12 +22,20 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.Term; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredDocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; @@ -166,7 +174,7 @@ public class ParentConstantScoreQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index 0b437a83b9e..cf16d78af77 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -195,4 +195,24 @@ final class ParentIdsFilter extends Filter { public String toString(String field) { return "parentsFilter(type=" + parentTypeBr.utf8ToString() + ")"; } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + ParentIdsFilter other = (ParentIdsFilter) obj; + return parentTypeBr.equals(other.parentTypeBr) + && parentIds.equals(other.parentIds) + && nonNestedDocsFilter.equals(nonNestedDocsFilter); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + parentTypeBr.hashCode(); + h = 31 * h + parentIds.hashCode(); + h = 31 * h + nonNestedDocsFilter.hashCode(); + return h; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index 5b9f22ace70..a45d1e2a912 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -18,17 +18,27 @@ */ package org.elasticsearch.index.search.child; -import org.apache.lucene.index.*; -import org.apache.lucene.search.*; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.FloatArray; import org.elasticsearch.common.util.LongHash; @@ -232,7 +242,7 @@ public class ParentQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 7ca36745761..330ebe33f09 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -368,7 +368,7 @@ public class TopChildrenQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } } diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java index 014a69fed12..d3b9a4c0757 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java @@ -123,7 +123,7 @@ public class GeoDistanceFilter extends Filter { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; GeoDistanceFilter filter = (GeoDistanceFilter) o; @@ -144,10 +144,10 @@ public class GeoDistanceFilter extends Filter { @Override public int hashCode() { - int result; + int result = super.hashCode(); long temp; temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L; - result = (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L; result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = distance != +0.0d ? Double.doubleToLongBits(distance) : 0L; diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java index a48760657d3..dcc08a01b63 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java @@ -133,7 +133,7 @@ public class GeoDistanceRangeFilter extends Filter { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; GeoDistanceRangeFilter filter = (GeoDistanceRangeFilter) o; @@ -155,10 +155,10 @@ public class GeoDistanceRangeFilter extends Filter { @Override public int hashCode() { - int result; + int result = super.hashCode(); long temp; temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L; - result = (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L; result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = inclusiveLowerPoint != +0.0d ? Double.doubleToLongBits(inclusiveLowerPoint) : 0L; diff --git a/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java index 2a2b99a5b0c..8b769e42849 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java @@ -76,6 +76,26 @@ public class InMemoryGeoBoundingBoxFilter extends Filter { return "GeoBoundingBoxFilter(" + indexFieldData.getFieldNames().indexName() + ", " + topLeft + ", " + bottomRight + ")"; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + InMemoryGeoBoundingBoxFilter other = (InMemoryGeoBoundingBoxFilter) obj; + return fieldName().equalsIgnoreCase(other.fieldName()) + && topLeft.equals(other.topLeft) + && bottomRight.equals(other.bottomRight); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + fieldName().hashCode(); + h = 31 * h + topLeft.hashCode(); + h = 31 * h + bottomRight.hashCode(); + return h; + } + public static class Meridian180GeoBoundingBoxDocSet extends DocValuesDocIdSet { private final MultiGeoPointValues values; private final GeoPoint topLeft; diff --git a/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java b/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java deleted file mode 100644 index 12f35f26b25..00000000000 --- a/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.search.nested; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryWrapperFilter; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.mapper.internal.TypeFieldMapper; - -import java.io.IOException; - -/** - * A filter that returns all root (non nested) documents. - * - * Root documents have an unique id, a type and optionally have a _source and other indexed and stored fields. - * A nested document is a sub documents that belong to a root document. - * Nested documents share the unique id and type and optionally the _source with root documents. - */ -public final class NonNestedDocsFilter extends Filter { - - public static final NonNestedDocsFilter INSTANCE = new NonNestedDocsFilter(); - - private final Filter filter = Queries.wrap(Queries.not(nestedFilter())); - private final int hashCode = filter.hashCode(); - - private NonNestedDocsFilter() { - } - - @Override - public Query clone() { - return INSTANCE; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - return filter.getDocIdSet(context, acceptDocs); - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - return obj == INSTANCE; - } - - @Override - public String toString(String field) { - return "NonNestedDocsFilter"; - } - - /** - * @return a filter that returns all nested documents. - */ - private static Filter nestedFilter() { - return Queries.wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4aa7bb9bb23..0ff0c420f02 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import com.google.common.base.Charsets; + import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.search.Filter; @@ -50,6 +51,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -69,7 +71,14 @@ import org.elasticsearch.index.cache.query.ShardQueryCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; -import org.elasticsearch.index.engine.*; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineClosedException; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.RefreshFailedEngineException; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ShardFieldData; @@ -78,7 +87,12 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperAnalyzer; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.policy.MergePolicyProvider; @@ -88,7 +102,6 @@ import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.ShardSearchService; import org.elasticsearch.index.settings.IndexSettingsService; @@ -116,7 +129,6 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.util.Map; -import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -545,7 +557,7 @@ public class IndexShard extends AbstractIndexShardComponent { } Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases); - BitDocIdSetFilter parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE) : null; + BitDocIdSetFilter parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()) : null; return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types); } diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 1fa8aa85693..da26759d7c1 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -70,14 +70,12 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MapperUtils; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.percolator.QueryCollector.Count; @@ -455,7 +453,7 @@ public class PercolatorService extends AbstractComponent { for (Map.Entry entry : context.percolateQueries().entrySet()) { try { if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); } else { Lucene.exists(context.docSearcher(), entry.getValue(), collector); } @@ -555,7 +553,7 @@ public class PercolatorService extends AbstractComponent { } try { if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); } else { Lucene.exists(context.docSearcher(), entry.getValue(), collector); } diff --git a/src/main/java/org/elasticsearch/percolator/QueryCollector.java b/src/main/java/org/elasticsearch/percolator/QueryCollector.java index f289e188167..e21e77612d6 100644 --- a/src/main/java/org/elasticsearch/percolator/QueryCollector.java +++ b/src/main/java/org/elasticsearch/percolator/QueryCollector.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.FloatArrayList; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -30,13 +31,13 @@ import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; @@ -180,7 +181,7 @@ abstract class QueryCollector extends SimpleCollector { } if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -239,7 +240,7 @@ abstract class QueryCollector extends SimpleCollector { // run the query try { if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -311,7 +312,7 @@ abstract class QueryCollector extends SimpleCollector { context.hitContext().cache().clear(); } if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -372,7 +373,7 @@ abstract class QueryCollector extends SimpleCollector { // run the query try { if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 36ec1438caa..7795cfe197d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -22,15 +22,14 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilterCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -93,7 +92,7 @@ public class NestedAggregator extends SingleBucketAggregator { // aggs execution Filter parentFilterNotCached = findClosestNestedPath(parent()); if (parentFilterNotCached == null) { - parentFilterNotCached = NonNestedDocsFilter.INSTANCE; + parentFilterNotCached = Queries.newNonNestedFilter(); } parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); BitDocIdSet parentSet = parentFilter.getDocIdSet(ctx); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 4dbeec5898f..7466bec3b5b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -27,9 +27,9 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -55,7 +55,7 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { public ReverseNestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, metaData); if (objectMapper == null) { - parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); } else { parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(objectMapper.nestedTypeFilter()); } diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index bf896023803..5e44f4c4914 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -34,17 +34,21 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.fieldvisitor.*; +import org.elasticsearch.index.fieldvisitor.AllFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.fieldvisitor.JustUidFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.UidAndSourceFieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMappers; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchParseElement; @@ -62,7 +66,6 @@ import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHitField; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; @@ -210,7 +213,7 @@ public class FetchPhase implements SearchPhase { private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException { if (context.mapperService().hasNested()) { - BitDocIdSet nonNested = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE).getDocIdSet(subReaderContext); + BitDocIdSet nonNested = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()).getDocIdSet(subReaderContext); BitSet bits = nonNested.bits(); if (!bits.get(subDocId)) { return bits.nextSetBit(subDocId); @@ -390,7 +393,7 @@ public class FetchPhase implements SearchPhase { parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { field = nestedObjectMapper.fullPath(); - parentFilter = NonNestedDocsFilter.INSTANCE; + parentFilter = Queries.newNonNestedFilter(); } BitDocIdSet parentBitSet = context.bitsetFilterCache().getBitDocIdSetFilter(parentFilter).getDocIdSet(subReaderContext); diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 557e4d5164a..6f36da8ee83 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; @@ -50,7 +49,6 @@ import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.FilteredSearchContext; @@ -126,7 +124,7 @@ public final class InnerHitsContext { public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException { Filter rawParentFilter; if (parentObjectMapper == null) { - rawParentFilter = NonNestedDocsFilter.INSTANCE; + rawParentFilter = Queries.newNonNestedFilter(); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } diff --git a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 047ba3fb0f1..e26a6ebc58b 100644 --- a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.rescore; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -148,35 +147,35 @@ public final class QueryRescorer implements Rescorer { ContextIndexSearcher searcher = context.searcher(); if (sourceExplanation == null) { // this should not happen but just in case - return new ComplexExplanation(false, 0.0f, "nothing matched"); + return Explanation.noMatch("nothing matched"); } // TODO: this isn't right? I.e., we are incorrectly pretending all first pass hits were rescored? If the requested docID was // beyond the top rescoreContext.window() in the first pass hits, we don't rescore it now? Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); float primaryWeight = rescore.queryWeight(); - ComplexExplanation prim = new ComplexExplanation(sourceExplanation.isMatch(), - sourceExplanation.getValue() * primaryWeight, - "product of:"); - prim.addDetail(sourceExplanation); - prim.addDetail(new Explanation(primaryWeight, "primaryWeight")); + + Explanation prim; + if (sourceExplanation.isMatch()) { + prim = Explanation.match( + sourceExplanation.getValue() * primaryWeight, + "product of:", sourceExplanation, Explanation.match(primaryWeight, "primaryWeight")); + } else { + prim = Explanation.noMatch("First pass did not match", sourceExplanation); + } // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. Maybe // we should add QueryRescorer.explainCombine to Lucene? if (rescoreExplain != null && rescoreExplain.isMatch()) { float secondaryWeight = rescore.rescoreQueryWeight(); - ComplexExplanation sec = new ComplexExplanation(rescoreExplain.isMatch(), + Explanation sec = Explanation.match( rescoreExplain.getValue() * secondaryWeight, - "product of:"); - sec.addDetail(rescoreExplain); - sec.addDetail(new Explanation(secondaryWeight, "secondaryWeight")); + "product of:", + rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); ScoreMode scoreMode = rescore.scoreMode(); - ComplexExplanation calcExpl = new ComplexExplanation(); - calcExpl.setDescription(scoreMode + " of:"); - calcExpl.addDetail(prim); - calcExpl.setMatch(prim.isMatch()); - calcExpl.addDetail(sec); - calcExpl.setValue(scoreMode.combine(prim.getValue(), sec.getValue())); - return calcExpl; + return Explanation.match( + scoreMode.combine(prim.getValue(), sec.getValue()), + scoreMode + " of:", + prim, sec); } else { return prim; } diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 4d090182813..7f3aaf63f49 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -21,7 +21,10 @@ package org.elasticsearch.search.sort; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.search.*; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchIllegalArgumentException; @@ -30,14 +33,18 @@ import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.*; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.internal.SearchContext; @@ -157,7 +164,7 @@ public class GeoDistanceSortParser implements SortParser { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index 7e3ab76baa7..be2b0d0dbf6 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -27,16 +27,20 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.*; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseException; @@ -131,7 +135,7 @@ public class ScriptSortParser implements SortParser { // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index a003976342e..17eebafae09 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.sort; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; + import org.apache.lucene.search.Filter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -28,6 +29,7 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -36,7 +38,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseException; @@ -252,7 +253,7 @@ public class SortParseElement implements SearchParseElement { } final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); diff --git a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java index 17a91370195..c22d4470875 100644 --- a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java +++ b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.explain; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.explain.ExplainResponse; @@ -275,7 +274,7 @@ public class ExplainActionTests extends ElasticsearchIntegrationTest { @Test public void streamExplainTest() throws Exception { - Explanation exp = new Explanation((float) 2.0, "some explanation"); + Explanation exp = Explanation.match(2f, "some explanation"); // write ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); @@ -289,8 +288,7 @@ public class ExplainActionTests extends ElasticsearchIntegrationTest { Explanation result = Lucene.readExplanation(esBuffer); assertThat(exp.toString(),equalTo(result.toString())); - exp = new ComplexExplanation(true, 2.0f, "some explanation"); - exp.addDetail(new Explanation(2.0f,"another explanation")); + exp = Explanation.match(2.0f, "some explanation", Explanation.match(2.0f,"another explanation")); // write complex outBuffer = new ByteArrayOutputStream(); diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 0fce3aa691a..4f7d62e1283 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -43,7 +43,6 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; @@ -53,7 +52,6 @@ import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; @@ -95,7 +93,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); - Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 07938b53e3f..3f2d4413984 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -64,7 +64,6 @@ import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionBuilder; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; @@ -114,7 +113,7 @@ public class ChildrenQueryTests extends AbstractChildTests { int minChildren = random().nextInt(10); int maxChildren = scaledRandomIntBetween(minChildren, 10); Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, minChildren, - maxChildren, 12, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + maxChildren, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java index 6def1d5a752..41750cff10b 100644 --- a/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java @@ -24,9 +24,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.SearchContext; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -56,7 +56,7 @@ public class TopChildrenQueryTests extends AbstractChildTests { ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)]; ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - Query query = new TopChildrenQuery(parentChildIndexFieldData, childQuery, "child", "parent", scoreType, 1, 1, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + Query query = new TopChildrenQuery(parentChildIndexFieldData, childQuery, "child", "parent", scoreType, 1, 1, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java index c7456fa3a3a..9ee0ecba47f 100644 --- a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java +++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java @@ -532,7 +532,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); Explanation explanation = searchResponse.getHits().hits()[0].explanation(); assertThat(explanation.getValue(), equalTo(2f)); - assertThat(explanation.toString(), equalTo("2.0 = (MATCH) sum of:\n 2.0 = (MATCH) Score based on child doc range from 0 to 1\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = (MATCH) Match on id 2\n")); + assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on child doc range from 0 to 1\n")); // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2) // assertThat(explanation.getDetails().length, equalTo(2)); // assertThat(explanation.getDetails()[0].getValue(), equalTo(1f)); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java index 98df4c4369e..0322b6cc8a7 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java @@ -813,7 +813,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not even have matched with the main query // If top_hits would have a query option then we can explain that query Explanation explanation = searchHit.explanation(); - assertThat(explanation.toString(), containsString("Not a match")); + assertFalse(explanation.isMatch()); // Returns the version of the root document. Nested docs don't have a separate version long version = searchHit.version(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java index b84716bf557..ebf55ac66c4 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java @@ -35,10 +35,10 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.compress.CompressedString; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; @@ -133,7 +133,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest { // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because // root doc type#3 and root doc type#1 have the same segment docid BooleanQuery bq = new BooleanQuery(); - bq.add(NonNestedDocsFilter.INSTANCE, Occur.MUST); + bq.add(Queries.newNonNestedFilter(), Occur.MUST); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT); searcher.search(new ConstantScoreQuery(bq), collector); collector.postCollection(); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index bbb4d01f96d..00aad548d64 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -852,7 +852,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max")) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().toString(), equalTo("1.0 = (MATCH) sum of:\n 1.0 = not implemented yet...\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = (MATCH) Match on id 0\n")); + assertThat(explainResponse.getExplanation().toString(), equalTo("1.0 = sum of:\n 1.0 = not implemented yet...\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = Match on id 0\n")); } List createDocBuilders() { diff --git a/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java b/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java index 60f80c62717..16e4c09d8be 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java @@ -108,11 +108,8 @@ public class ExplainableScriptTests extends ElasticsearchIntegrationTest { @Override public Explanation explain(Explanation subQueryScore) throws IOException { - Explanation exp = new Explanation((float) (runAsDouble()), "This script returned " + runAsDouble()); - Explanation scoreExp = new Explanation(subQueryScore.getValue(), "_score: "); - scoreExp.addDetail(subQueryScore); - exp.addDetail(scoreExp); - return exp; + Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); + return Explanation.match((float) (runAsDouble()), "This script returned " + runAsDouble(), scoreExp); } @Override diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java index 15b82c20dbb..3224a5b3630 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.functionscore; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.SearchResponse; @@ -33,6 +32,8 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreModule; import org.elasticsearch.plugins.AbstractPlugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; @@ -43,8 +44,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import static org.hamcrest.Matchers.equalTo; /** @@ -142,9 +141,7 @@ public class FunctionScorePluginTests extends ElasticsearchIntegrationTest { @Override public Explanation explainFunction(String distanceString, double distanceVal, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setDescription("" + distanceVal); - return ce; + return Explanation.match((float) distanceVal, "" + distanceVal); } @Override diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java index 5c7859a57d8..d65424db919 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java @@ -179,7 +179,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("6.0 = (MATCH) function score, product of:\n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = (MATCH) Math.min of\n 6.0 = (MATCH) function score, score mode [multiply]\n 1.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = (MATCH) Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = (MATCH) product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = (MATCH) product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = Math.min of\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); responseWithWeights = client().search( searchRequest().source( @@ -188,7 +188,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { .add(weightFactorFunction(4.0f)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("4.0 = (MATCH) function score, product of:\n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = (MATCH) Math.min of\n 4.0 = (MATCH) product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = Math.min of\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); } diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 7300331cab2..8a15549f0af 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -161,7 +161,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { assertThat(innerHits.getTotalHits(), equalTo(2l)); assertThat(innerHits.getHits().length, equalTo(1)); assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), equalTo("fox eat quick")); - assertThat(innerHits.getAt(0).explanation().toString(), containsString("(MATCH) weight(comments.message:fox in")); + assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in")); assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat")); } @@ -338,7 +338,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getHits().length, equalTo(1)); assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("fox eat quick")); - assertThat(innerHits.getAt(0).explanation().toString(), containsString("(MATCH) weight(message:fox")); + assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox")); assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat")); } From b3ca94460c92183fac1583c04759a21efcd708f0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 22 Apr 2015 17:41:46 +0200 Subject: [PATCH 061/236] [REST] Add more utilitis for source/body handling in RestAction --- .../elasticsearch/rest/BaseRestHandler.java | 6 ++++ .../indices/analyze/RestAnalyzeAction.java | 32 +++---------------- .../query/RestValidateQueryAction.java | 15 +++------ .../rest/action/count/RestCountAction.java | 15 +++------ .../RestDeleteByQueryAction.java | 15 +++------ .../rest/action/exists/RestExistsAction.java | 15 +++------ .../action/explain/RestExplainAction.java | 8 ++--- .../action/search/RestClearScrollAction.java | 6 ++-- .../rest/action/search/RestSearchAction.java | 16 +++------- .../action/search/RestSearchScrollAction.java | 28 +++------------- .../action/suggest/RestSuggestAction.java | 12 +++---- .../rest/action/support/RestActions.java | 22 +++++++++++++ .../termvectors/RestTermVectorsAction.java | 10 ++---- 13 files changed, 73 insertions(+), 127 deletions(-) diff --git a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 9b83c2648b3..d3939bb8c60 100644 --- a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,13 +19,19 @@ package org.elasticsearch.rest; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.FilterClient; import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.action.support.RestActions; import java.util.Set; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 0d4dea78218..f5967596809 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestToXContentListener; import java.io.IOException; @@ -69,16 +70,16 @@ public class RestAnalyzeAction extends BaseRestHandler { analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", analyzeRequest.tokenFilters()))); analyzeRequest.charFilters(request.paramAsStringArray("char_filters", analyzeRequest.charFilters())); - if (request.hasContent() || request.hasParam("source")) { - XContentType type = contentType(request); + if (RestActions.hasBodyContent(request)) { + XContentType type = RestActions.guessBodyContentType(request); if (type == null) { if (text == null) { - text = bodyContent(request).toUtf8(); + text = RestActions.getRestContent(request).toUtf8(); analyzeRequest.text(text); } } else { // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values - buildFromContent(bodyContent(request), analyzeRequest); + buildFromContent(RestActions.getRestContent(request), analyzeRequest); } } @@ -132,27 +133,4 @@ public class RestAnalyzeAction extends BaseRestHandler { throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); } } - - private XContentType contentType(final RestRequest request) { - if (request.hasContent()) { - return XContentFactory.xContentType(request.content()); - } else if (request.hasParam("source")) { - return XContentFactory.xContentType(request.param("source")); - } - throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); - } - - private BytesReference bodyContent(final RestRequest request) { - if (request.hasContent()) { - return request.content(); - } else if (request.hasParam("source")) { - return new BytesArray(request.param("source")); - } - throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); - } - - - - - } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java index 6d960dc3fe9..32a2d24e888 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java @@ -59,17 +59,12 @@ public class RestValidateQueryAction extends BaseRestHandler { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); validateQueryRequest.listenerThreaded(false); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); - if (request.hasContent()) { - validateQueryRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + validateQueryRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - validateQueryRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - validateQueryRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + validateQueryRequest.source(querySourceBuilder); } } validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 82e11f75bea..091ab27e387 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -60,17 +60,12 @@ public class RestCountAction extends BaseRestHandler { CountRequest countRequest = new CountRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); countRequest.listenerThreaded(false); - if (request.hasContent()) { - countRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + countRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - countRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - countRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + countRequest.source(querySourceBuilder); } } countRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java index ee9935d103a..da00073c733 100644 --- a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java @@ -54,17 +54,12 @@ public class RestDeleteByQueryAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteByQueryRequest.listenerThreaded(false); - if (request.hasContent()) { - deleteByQueryRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + deleteByQueryRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - deleteByQueryRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - deleteByQueryRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + deleteByQueryRequest.source(querySourceBuilder); } } deleteByQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); diff --git a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java index a10143c87b1..85f73f61ec4 100644 --- a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java @@ -49,17 +49,12 @@ public class RestExistsAction extends BaseRestHandler { final ExistsRequest existsRequest = new ExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); existsRequest.indicesOptions(IndicesOptions.fromRequest(request, existsRequest.indicesOptions())); existsRequest.listenerThreaded(false); - if (request.hasContent()) { - existsRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + existsRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - existsRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - existsRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + existsRequest.source(querySourceBuilder); } } existsRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java index 708d1a4ad44..39fe00c663a 100644 --- a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java +++ b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.fetch.source.FetchSourceContext; @@ -63,12 +64,9 @@ public class RestExplainAction extends BaseRestHandler { explainRequest.parent(request.param("parent")); explainRequest.routing(request.param("routing")); explainRequest.preference(request.param("preference")); - String sourceString = request.param("source"); String queryString = request.param("q"); - if (request.hasContent()) { - explainRequest.source(request.content()); - } else if (sourceString != null) { - explainRequest.source(new BytesArray(request.param("source"))); + if (RestActions.hasBodyContent(request)) { + explainRequest.source(RestActions.getRestContent(request)); } else if (queryString != null) { QueryStringQueryBuilder queryStringBuilder = QueryBuilders.queryStringQuery(queryString); queryStringBuilder.defaultField(request.param("df")); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index e7b41316db9..1dfff7cf4fe 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -57,15 +57,15 @@ public class RestClearScrollAction extends BaseRestHandler { String scrollIds = request.param("scroll_id"); ClearScrollRequest clearRequest = new ClearScrollRequest(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (RestActions.hasBodyContent(request)) { + XContentType type = RestActions.guessBodyContentType(request); if (type == null) { scrollIds = RestActions.getRestContent(request).toUtf8(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); } else { // NOTE: if rest request with xcontent body has request parameters, these parameters does not override xcontent value clearRequest.setScrollIds(null); - buildFromContent(request.content(), clearRequest); + buildFromContent(RestActions.getRestContent(request), clearRequest); } } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index a0e04a1af06..f578d3baa6a 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.exists.RestExistsAction; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestStatusToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -87,20 +88,11 @@ public class RestSearchAction extends BaseRestHandler { // get the content, and put it in the body // add content/source as template if template flag is set boolean isTemplateRequest = request.path().endsWith("/template"); - if (request.hasContent()) { + if (RestActions.hasBodyContent(request)) { if (isTemplateRequest) { - searchRequest.templateSource(request.content()); + searchRequest.templateSource(RestActions.getRestContent(request)); } else { - searchRequest.source(request.content()); - } - } else { - String source = request.param("source"); - if (source != null) { - if (isTemplateRequest) { - searchRequest.templateSource(source); - } else { - searchRequest.source(source); - } + searchRequest.source(RestActions.getRestContent(request)); } } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index df363aadd74..98edaa2ccea 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -69,16 +69,16 @@ public class RestSearchScrollAction extends BaseRestHandler { searchScrollRequest.scroll(new Scroll(parseTimeValue(scroll, null))); } - if (request.hasContent() || request.hasParam("source")) { - XContentType type = contentType(request); + if (RestActions.hasBodyContent(request)) { + XContentType type = XContentFactory.xContentType(RestActions.getRestContent(request)); if (type == null) { if (scrollId == null) { - scrollId = bodyContent(request).toUtf8(); + scrollId = RestActions.getRestContent(request).toUtf8(); searchScrollRequest.scrollId(scrollId); } } else { // NOTE: if rest request with xcontent body has request parameters, these parameters override xcontent values - buildFromContent(bodyContent(request), searchScrollRequest); + buildFromContent(RestActions.getRestContent(request), searchScrollRequest); } } client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); @@ -107,24 +107,4 @@ public class RestSearchScrollAction extends BaseRestHandler { throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); } } - - private XContentType contentType(final RestRequest request) { - if (request.hasContent()) { - return XContentFactory.xContentType(request.content()); - } else if (request.hasParam("source")) { - return XContentFactory.xContentType(request.param("source")); - } - throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); - } - - private BytesReference bodyContent(final RestRequest request) { - if (request.hasContent()) { - return request.content(); - } else if (request.hasParam("source")) { - return new BytesArray(request.param("source")); - } - throw new ElasticsearchIllegalArgumentException("Can't guess contentType neither source nor content available"); - } - - } diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 28b30efd893..eebea7973cd 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -38,6 +38,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.suggest.Suggest; @@ -60,15 +61,10 @@ public class RestSuggestAction extends BaseRestHandler { SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); suggestRequest.listenerThreaded(false); - if (request.hasContent()) { - suggestRequest.suggest(request.content()); + if (RestActions.hasBodyContent(request)) { + suggestRequest.suggest(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - suggestRequest.suggest(source); - } else { - throw new ElasticsearchIllegalArgumentException("no content or source provided to execute suggestion"); - } + throw new ElasticsearchIllegalArgumentException("no content or source provided to execute suggestion"); } suggestRequest.routing(request.param("routing")); suggestRequest.preference(request.param("preference")); diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 5de5396eb9f..38dd78bf403 100644 --- a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.RestRequest; @@ -128,4 +130,24 @@ public class RestActions { return content; } + + /** + * guesses the content type from either payload or source parameter + * @param request Rest request + * @return rest content type or null if not applicable. + */ + public static XContentType guessBodyContentType(final RestRequest request) { + final BytesReference restContent = RestActions.getRestContent(request); + if (restContent == null) { + return null; + } + return XContentFactory.xContentType(restContent); + } + + /** + * Returns true if either payload or source parameter is present. Otherwise false + */ + public static boolean hasBodyContent(final RestRequest request) { + return request.hasContent() || request.hasParam("source"); + } } diff --git a/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java b/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java index f7e19304724..af81dfcd0a9 100644 --- a/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java @@ -65,15 +65,9 @@ public class RestTermVectorsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("type"), request.param("id")); - XContentParser parser = null; - if (request.hasContent()) { - try { - parser = XContentFactory.xContent(request.content()).createParser(request.content()); + if (RestActions.hasBodyContent(request)) { + try (XContentParser parser = XContentFactory.xContent(RestActions.guessBodyContentType(request)).createParser(RestActions.getRestContent(request))){ TermVectorsRequest.parseRequest(termVectorsRequest, parser); - } finally { - if (parser != null) { - parser.close(); - } } } readURIParameters(termVectorsRequest, request); From f6f649ce40cde7cb916aa690acecd12cc2108a01 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 22 Apr 2015 18:18:42 +0200 Subject: [PATCH 062/236] Tests: Fix function_score explanation expectations. --- .../search/functionscore/FunctionScoreTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java index d65424db919..5157c235349 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java @@ -179,7 +179,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = Math.min of\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = min of:\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); responseWithWeights = client().search( searchRequest().source( @@ -188,7 +188,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { .add(weightFactorFunction(4.0f)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = Math.min of\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = min of:\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); } From a4f98e7400391be453a403170a3096c8850868a6 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 22 Apr 2015 11:53:19 -0600 Subject: [PATCH 063/236] [DOCS] Add example of setting disk threshold decider settings Fixes #10686 --- docs/reference/index-modules/allocation.asciidoc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/reference/index-modules/allocation.asciidoc b/docs/reference/index-modules/allocation.asciidoc index 9ba0821b3e4..910858f7fcd 100644 --- a/docs/reference/index-modules/allocation.asciidoc +++ b/docs/reference/index-modules/allocation.asciidoc @@ -143,6 +143,21 @@ settings API. By default, Elasticsearch will retrieve information about the disk usage of the nodes every 30 seconds. This can also be changed by setting the `cluster.info.update.interval` setting. +An example of updating the low watermark to no more than 80% of the disk size, a +high watermark of at least 50 gigabytes free, and updating the information about +the cluster every minute: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "cluster.routing.allocation.disk.watermark.low" : "80%", + "cluster.routing.allocation.disk.watermark.high" : "50gb", + "cluster.info.update.interval" : "1m" + } +}' +-------------------------------------------------- + By default, Elasticsearch will take into account shards that are currently being relocated to the target node when computing a node's disk usage. This can be changed by setting the `cluster.routing.allocation.disk.include_relocations` From 2d547383397c8639a02b8dc0cd80f522c69ee620 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 21 Apr 2015 10:03:36 -0700 Subject: [PATCH 064/236] Tests: Add shortcut "all" to skip version ranges in rest tests This was suggested on #10656 as cleaner than " - " to indicate all versions should be skipped. closes #10702 --- .../test/cluster.put_settings/10_basic.yaml | 2 +- .../indices.put_settings/all_path_options.yaml | 2 +- rest-api-spec/test/update/85_fields_meta.yaml | 2 +- .../test/rest/section/SkipSection.java | 3 +++ .../test/rest/test/SkipSectionParserTests.java | 17 +++++++++++++++++ 5 files changed, 23 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/test/cluster.put_settings/10_basic.yaml index bb1256efecd..9955f4519d6 100644 --- a/rest-api-spec/test/cluster.put_settings/10_basic.yaml +++ b/rest-api-spec/test/cluster.put_settings/10_basic.yaml @@ -1,7 +1,7 @@ --- setup: - skip: - version: " - " + version: "all" reason: leaves transient metadata behind, need to fix it --- "Test put settings": diff --git a/rest-api-spec/test/indices.put_settings/all_path_options.yaml b/rest-api-spec/test/indices.put_settings/all_path_options.yaml index bd64d57ff17..07f1956f0fc 100644 --- a/rest-api-spec/test/indices.put_settings/all_path_options.yaml +++ b/rest-api-spec/test/indices.put_settings/all_path_options.yaml @@ -81,7 +81,7 @@ setup: --- "put settings in list of indices": - skip: - version: " - " + version: "all" reason: list of indices not implemented yet - do: indices.put_settings: diff --git a/rest-api-spec/test/update/85_fields_meta.yaml b/rest-api-spec/test/update/85_fields_meta.yaml index ab38d5c1315..d10ac83f46b 100644 --- a/rest-api-spec/test/update/85_fields_meta.yaml +++ b/rest-api-spec/test/update/85_fields_meta.yaml @@ -2,7 +2,7 @@ "Metadata Fields": - skip: - version: " - " + version: "all" reason: "Update doesn't return metadata fields, waiting for #3259" - do: diff --git a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java index bf8fe82a2c1..e7ab4555776 100644 --- a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java +++ b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java @@ -97,6 +97,9 @@ public class SkipSection { if (versionRange == null) { return new Version[] { null, null }; } + if (versionRange.trim().equals("all")) { + return new Version[]{VersionUtils.getFirstVersion(), Version.CURRENT}; + } String[] skipVersions = versionRange.split("-"); if (skipVersions.length > 2) { throw new IllegalArgumentException("version range malformed: " + versionRange); diff --git a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java index 5d940a10b56..1e71ae9181c 100644 --- a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java +++ b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java @@ -49,6 +49,23 @@ public class SkipSectionParserTests extends AbstractParserTests { assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } + public void testParseSkipSectionAllVersions() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + "version: \" all \"\n" + + "reason: Delete ignores the parent param" + ); + + SkipSectionParser skipSectionParser = new SkipSectionParser(); + + SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser)); + + assertThat(skipSection, notNullValue()); + assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); + assertThat(skipSection.getUpperVersion(), equalTo(Version.CURRENT)); + assertThat(skipSection.getFeatures().size(), equalTo(0)); + assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); + } + @Test public void testParseSkipSectionFeatureNoVersion() throws Exception { parser = YamlXContent.yamlXContent.createParser( From 8cd59151af9405bd2959712b40f459908465151f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 22 Apr 2015 21:12:51 +0200 Subject: [PATCH 065/236] Use debug logging if no translog file is found --- .../java/org/elasticsearch/index/engine/InternalEngine.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index ae85e1b4203..7086ee6986b 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1214,7 +1214,7 @@ public class InternalEngine extends Engine { } } } catch (FileNotFoundException ex) { - logger.info("no translog file found for ID: " + translogId); + logger.debug("no translog file found for ID: " + translogId); } catch (TruncatedTranslogException e) { // file is empty or header has been half-written and should be ignored logger.trace("ignoring truncation exception, the translog is either empty or half-written", e); From 2627324ac2399c43a59956d67856e02abec6f5d2 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 20 Apr 2015 14:22:44 -0600 Subject: [PATCH 066/236] [ENGINE] Implement retries for ShadowEngine creation When using a filesystem that may have lag between an index being created on the primary and a on the replica, creation of the ShadowEngine can fail because there are no segments in the directory. In these situations, we retry during engine creation to wait until an index is present in the directory. The number wait delay is configurable, defaulting to waiting for 5 seconds from an index to become available. Resolves #10637 --- .../elasticsearch/common/lucene/Lucene.java | 29 ++++++++++ .../index/engine/ShadowEngine.java | 23 ++++++-- .../common/lucene/LuceneTest.java | 51 +++++++++++++++++ .../index/engine/ShadowEngineTests.java | 55 +++++++++++++++++++ 4 files changed, 154 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 78503ebb2b6..af21524aa50 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -632,6 +632,35 @@ public class Lucene { return DirectoryReader.indexExists(directory); } + /** + * Wait for an index to exist for up to {@code timeLimitMillis}. Returns + * true if the index eventually exists, false if not. + * + * Will retry the directory every second for at least {@code timeLimitMillis} + */ + public static final boolean waitForIndex(final Directory directory, final long timeLimitMillis) + throws IOException { + final long DELAY = 1000; + long waited = 0; + try { + while (true) { + if (waited >= timeLimitMillis) { + break; + } + if (indexExists(directory)) { + return true; + } + Thread.sleep(DELAY); + waited += DELAY; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + // one more try after all retries + return indexExists(directory); + } + /** * Returns true iff the given exception or * one of it's causes is an instance of {@link CorruptIndexException}, diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 3d825fb77f3..6771b432176 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -28,8 +28,10 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.shard.IndexShardException; import java.io.IOException; import java.util.Arrays; @@ -55,6 +57,10 @@ import java.util.List; */ public class ShadowEngine extends Engine { + /** how long to wait for an index to exist */ + public final static String NONEXISTENT_INDEX_RETRY_WAIT = "index.shadow.wait_for_initial_commit"; + public final static TimeValue DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT = TimeValue.timeValueSeconds(5); + private volatile SearcherManager searcherManager; private volatile SegmentInfos lastCommittedSegmentInfos; @@ -62,15 +68,24 @@ public class ShadowEngine extends Engine { public ShadowEngine(EngineConfig engineConfig) { super(engineConfig); SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig); + final long nonexistentRetryTime = engineConfig.getIndexSettings() + .getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT) + .getMillis(); try { DirectoryReader reader = null; store.incRef(); boolean success = false; try { - reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); - this.searcherManager = new SearcherManager(reader, searcherFactory); - this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - success = true; + if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) { + reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); + this.searcherManager = new SearcherManager(reader, searcherFactory); + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + success = true; + } else { + throw new IndexShardException(shardId, "failed to open a shadow engine after" + + nonexistentRetryTime + "ms, " + + "directory is not an index"); + } } catch (Throwable e) { logger.warn("failed to create new reader", e); throw e; diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java index e74f22cb6d8..0420f4b2966 100644 --- a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java +++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java @@ -32,6 +32,8 @@ import org.junit.Test; import java.io.IOException; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; /** * @@ -48,6 +50,55 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(Lucene.VERSION, Version.LATEST); } + public void testWaitForIndex() throws Exception { + final MockDirectoryWrapper dir = newMockDirectory(); + + final AtomicBoolean succeeded = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + + // Create a shadow Engine, which will freak out because there is no + // index yet + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + latch.await(); + if (Lucene.waitForIndex(dir, 5000)) { + succeeded.set(true); + } else { + fail("index should have eventually existed!"); + } + } catch (InterruptedException e) { + // ignore interruptions + } catch (Exception e) { + fail("should have been able to create the engine! " + e.getMessage()); + } + } + }); + t.start(); + + // count down latch + // now shadow engine should try to be created + latch.countDown(); + + dir.setEnableVirusScanner(false); + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, iwc); + Document doc = new Document(); + doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + writer.addDocument(doc); + writer.commit(); + + t.join(); + + writer.close(); + dir.close(); + assertTrue("index should have eventually existed", succeeded.get()); + } + public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); dir.setEnableVirusScanner(false); diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index e2acb3e635d..87ea42c9dcf 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -73,6 +74,8 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.hamcrest.Matchers.*; @@ -939,4 +942,56 @@ public class ShadowEngineTests extends ElasticsearchTestCase { assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency); } + + @Test + public void testShadowEngineCreationRetry() throws Exception { + final Path srDir = createTempDir(); + final Store srStore = createStore(srDir); + Lucene.cleanLuceneIndex(srStore.directory()); + final Translog srTranslog = createTranslogReplica(); + + final AtomicBoolean succeeded = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + + // Create a shadow Engine, which will freak out because there is no + // index yet + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + latch.await(); + } catch (InterruptedException e) { + // ignore interruptions + } + try (ShadowEngine srEngine = createShadowEngine(srStore, srTranslog)) { + succeeded.set(true); + } catch (Exception e) { + fail("should have been able to create the engine!"); + } + } + }); + t.start(); + + // count down latch + // now shadow engine should try to be created + latch.countDown(); + + // Create an InternalEngine, which creates the index so the shadow + // replica will handle it correctly + Store pStore = createStore(srDir); + Translog pTranslog = createTranslog(); + InternalEngine pEngine = createInternalEngine(pStore, pTranslog); + + // create a document + ParseContext.Document document = testDocumentWithTextField(); + document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); + pEngine.create(new Engine.Create(null, newUid("1"), doc)); + pEngine.flush(true, true); + + t.join(); + assertTrue("ShadowEngine should have been able to be created", succeeded.get()); + // (shadow engine is already shut down in the try-with-resources) + IOUtils.close(srTranslog, srStore, pTranslog, pEngine, pStore); + } } From a37d3c02ec716dc306f452dfacaad99b1a9d9d72 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 20 Apr 2015 15:18:06 -0700 Subject: [PATCH 067/236] Scripting: Added a new script construct Added an initial script construct to unify the parameters typically passed to methods in the ScriptService. This changes the way several public methods are called in the ScriptService along with all the callers since they must wrap the parameters passed in into a script object. In the future, parsing parameters can also be moved into this construct along with ToXContent. closes #10649 --- .../action/update/UpdateHelper.java | 5 +- .../index/mapper/DocumentMapper.java | 3 +- .../index/query/ScriptFilterParser.java | 3 +- .../index/query/TemplateQueryParser.java | 3 +- .../script/ScriptScoreFunctionParser.java | 2 +- .../elasticsearch/script/CompiledScript.java | 24 +++-- .../java/org/elasticsearch/script/Script.java | 94 +++++++++++++++++++ .../elasticsearch/script/ScriptService.java | 65 +++++++------ .../elasticsearch/search/SearchService.java | 6 +- .../heuristics/ScriptHeuristic.java | 4 +- .../scripted/InternalScriptedMetric.java | 5 +- .../scripted/ScriptedMetricAggregator.java | 6 +- .../support/ValuesSourceParser.java | 2 +- .../script/ScriptFieldsParseElement.java | 2 +- .../search/sort/ScriptSortParser.java | 3 +- .../suggest/phrase/PhraseSuggestParser.java | 3 +- .../script/CustomScriptContextTests.java | 14 +-- .../script/NativeScriptTests.java | 4 +- .../script/ScriptServiceTests.java | 22 ++--- 19 files changed, 195 insertions(+), 75 deletions(-) create mode 100644 src/main/java/org/elasticsearch/script/Script.java diff --git a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 4e4a2864f90..d00ee181b9e 100644 --- a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; @@ -94,7 +95,7 @@ public class UpdateHelper extends AbstractComponent { ctx.put("op", "create"); ctx.put("_source", upsertDoc); try { - ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptType, ScriptContext.Standard.UPDATE, request.scriptParams); + ExecutableScript script = scriptService.executable(new Script(request.scriptLang, request.script, request.scriptType, request.scriptParams), ScriptContext.Standard.UPDATE); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... @@ -193,7 +194,7 @@ public class UpdateHelper extends AbstractComponent { ctx.put("_source", sourceAndContent.v2()); try { - ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptType, ScriptContext.Standard.UPDATE, request.scriptParams); + ExecutableScript script = scriptService.executable(new Script(request.scriptLang, request.script, request.scriptType, request.scriptParams), ScriptContext.Standard.UPDATE); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 11d91411e73..380ce645a28 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -67,6 +67,7 @@ import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService.ScriptType; @@ -734,7 +735,7 @@ public class DocumentMapper implements ToXContent { public Map transformSourceAsMap(Map sourceAsMap) { try { // We use the ctx variable and the _source name to be consistent with the update api. - ExecutableScript executable = scriptService.executable(language, script, scriptType, ScriptContext.Standard.MAPPING, parameters); + ExecutableScript executable = scriptService.executable(new Script(language, script, scriptType, parameters), ScriptContext.Standard.MAPPING); Map ctx = new HashMap<>(1); ctx.put("_source", sourceAsMap); executable.setNextVar("ctx", ctx); diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index 8ada496be08..b52b6c7544e 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -135,7 +136,7 @@ public class ScriptFilterParser implements FilterParser { public ScriptFilter(String scriptLang, String script, ScriptService.ScriptType scriptType, Map params, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; this.params = params; - this.searchScript = scriptService.search(searchLookup, scriptLang, script, scriptType, ScriptContext.Standard.SEARCH, newHashMap(params)); + this.searchScript = scriptService.search(searchLookup, new Script(scriptLang, script, scriptType, newHashMap(params)), ScriptContext.Standard.SEARCH); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java index 41bf930e4a8..32872f8f7a0 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.mustache.MustacheScriptEngineService; @@ -77,7 +78,7 @@ public class TemplateQueryParser implements QueryParser { public Query parse(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); TemplateContext templateContext = parse(parser, PARAMS, parametersToTypes); - ExecutableScript executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), ScriptContext.Standard.SEARCH, templateContext.params()); + ExecutableScript executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()), ScriptContext.Standard.SEARCH); BytesReference querySource = (BytesReference) executable.run(); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java index f87962702b5..aaa9bec3fac 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java @@ -87,7 +87,7 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { SearchScript searchScript; try { - searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptParameterParser.lang(), script, scriptType, ScriptContext.Standard.SEARCH, vars); + searchScript = parseContext.scriptService().search(parseContext.lookup(), new Script(scriptParameterParser.lang(), script, scriptType, vars), ScriptContext.Standard.SEARCH); return new ScriptScoreFunction(script, vars, searchScript); } catch (Exception e) { throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e); diff --git a/src/main/java/org/elasticsearch/script/CompiledScript.java b/src/main/java/org/elasticsearch/script/CompiledScript.java index 54cca316871..9e3bfaf3f4c 100644 --- a/src/main/java/org/elasticsearch/script/CompiledScript.java +++ b/src/main/java/org/elasticsearch/script/CompiledScript.java @@ -20,23 +20,35 @@ package org.elasticsearch.script; /** - * + * CompiledScript holds all the parameters necessary to execute a previously compiled script. */ public class CompiledScript { - private final String type; - + private final String lang; private final Object compiled; - public CompiledScript(String type, Object compiled) { - this.type = type; + /** + * Constructor for CompiledScript. + * @param lang The language of the script to be executed. + * @param compiled The compiled script Object that is executable. + */ + public CompiledScript(String lang, Object compiled) { + this.lang = lang; this.compiled = compiled; } + /** + * Method to get the language. + * @return The language of the script to be executed. + */ public String lang() { - return type; + return lang; } + /** + * Method to get the compiled script object. + * @return The compiled script Object that is executable. + */ public Object compiled() { return compiled; } diff --git a/src/main/java/org/elasticsearch/script/Script.java b/src/main/java/org/elasticsearch/script/Script.java new file mode 100644 index 00000000000..4e5e2027f9b --- /dev/null +++ b/src/main/java/org/elasticsearch/script/Script.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +import org.elasticsearch.ElasticsearchIllegalArgumentException; + +import static org.elasticsearch.script.ScriptService.ScriptType; + +/** + * Script holds all the parameters necessary to compile or find in cache and then execute a script. + */ +public class Script { + + private final String lang; + private final String script; + private final ScriptType type; + private final Map params; + + /** + * Constructor for Script. + * @param lang The language of the script to be compiled/executed. + * @param script The cache key of the script to be compiled/executed. For dynamic scripts this is the actual + * script source code. For indexed scripts this is the id used in the request. For on disk scripts + * this is the file name. + * @param type The type of script -- dynamic, indexed, or file. + * @param params The map of parameters the script will be executed with. + */ + public Script(String lang, String script, ScriptType type, Map params) { + if (script == null) { + throw new ElasticsearchIllegalArgumentException("The parameter script (String) must not be null in Script."); + } + if (type == null) { + throw new ElasticsearchIllegalArgumentException("The parameter type (ScriptType) must not be null in Script."); + } + + this.lang = lang; + this.script = script; + this.type = type; + this.params = params; + } + + /** + * Method for getting language. + * @return The language of the script to be compiled/executed. + */ + public String getLang() { + return lang; + } + + /** + * Method for getting the script. + * @return The cache key of the script to be compiled/executed. For dynamic scripts this is the actual + * script source code. For indexed scripts this is the id used in the request. For on disk scripts + * this is the file name. + */ + public String getScript() { + return script; + } + + /** + * Method for getting the type. + * @return The type of script -- dynamic, indexed, or file. + */ + public ScriptType getType() { + return type; + } + + /** + * Method for getting the parameters. + * @return The map of parameters the script will be executed with. + */ + public Map getParams() { + return params; + } +} diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index 3320dea795d..1eb6ff166bb 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -35,13 +35,11 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -74,7 +72,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Locale; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -220,57 +217,67 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assert script != null; - assert scriptType != null; - assert scriptContext != null; + public CompiledScript compile(Script script, ScriptContext scriptContext) { + if (script == null) { + throw new ElasticsearchIllegalArgumentException("The parameter script (Script) must not be null."); + } + if (scriptContext == null) { + throw new ElasticsearchIllegalArgumentException("The parameter scriptContext (ScriptContext) must not be null."); + } + + String lang = script.getLang(); if (lang == null) { lang = defaultLang; } ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); - if (canExecuteScript(lang, scriptEngineService, scriptType, scriptContext) == false) { - throw new ScriptException("scripts of type [" + scriptType + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); + if (canExecuteScript(lang, scriptEngineService, script.getType(), scriptContext) == false) { + throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); } - return compileInternal(lang, script, scriptType); + return compileInternal(script); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(String lang, final String scriptOrId, final ScriptType scriptType) { - assert scriptOrId != null; - assert scriptType != null; + public CompiledScript compileInternal(Script script) { + if (script == null) { + throw new ElasticsearchIllegalArgumentException("The parameter script (Script) must not be null."); + } + + String lang = script.getLang(); + if (lang == null) { lang = defaultLang; } if (logger.isTraceEnabled()) { - logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, scriptType, scriptOrId); + logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, script.getType(), script.getScript()); } ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); - CacheKey cacheKey = newCacheKey(scriptEngineService, scriptOrId); + CacheKey cacheKey = newCacheKey(scriptEngineService, script.getScript()); - if (scriptType == ScriptType.FILE) { + if (script.getType() == ScriptType.FILE) { CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener if (compiled == null) { - throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + scriptOrId); + throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + script.getScript()); } return compiled; } - String script = scriptOrId; - if (scriptType == ScriptType.INDEXED) { - final IndexedScript indexedScript = new IndexedScript(lang, scriptOrId); - script = getScriptFromIndex(indexedScript.lang, indexedScript.id); - cacheKey = newCacheKey(scriptEngineService, script); + String code = script.getScript(); + + if (script.getType() == ScriptType.INDEXED) { + final IndexedScript indexedScript = new IndexedScript(lang, script.getScript()); + code = getScriptFromIndex(indexedScript.lang, indexedScript.id); + cacheKey = newCacheKey(scriptEngineService, code); } CompiledScript compiled = cache.getIfPresent(cacheKey); if (compiled == null) { //Either an un-cached inline script or an indexed script - compiled = new CompiledScript(lang, scriptEngineService.compile(script)); + compiled = new CompiledScript(lang, scriptEngineService.compile(code)); //Since the cache key is the script content itself we don't need to //invalidate/check the cache if an indexed script changes. cache.put(cacheKey, compiled); @@ -320,7 +327,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //we don't know yet what the script will be used for, but if all of the operations for this lang with //indexed scripts are disabled, it makes no sense to even compile it and cache it. if (isAnyScriptContextEnabled(scriptLang, getScriptEngineServiceForLang(scriptLang), ScriptType.INDEXED)) { - CompiledScript compiledScript = compileInternal(scriptLang, context.template(), ScriptType.INLINE); + CompiledScript compiledScript = compileInternal(new Script(scriptLang, context.template(), ScriptType.INLINE, null)); if (compiledScript == null) { throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); @@ -390,8 +397,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, Map vars) { - return executable(compile(lang, script, scriptType, scriptContext), vars); + public ExecutableScript executable(Script script, ScriptContext scriptContext) { + return executable(compile(script, scriptContext), script.getParams()); } /** @@ -404,9 +411,9 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, String lang, String script, ScriptType scriptType, ScriptContext scriptContext, @Nullable Map vars) { - CompiledScript compiledScript = compile(lang, script, scriptType, scriptContext); - return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { + CompiledScript compiledScript = compile(script, scriptContext); + return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, script.getParams()); } private boolean isAnyScriptContextEnabled(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType) { diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 64762395811..d8d85deb0a3 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -71,9 +71,9 @@ import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.IndicesWarmer.WarmerContext; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptContextRegistry; import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.dfs.DfsPhase; @@ -653,7 +653,7 @@ public class SearchService extends AbstractLifecycleComponent { final ExecutableScript executable; if (hasLength(request.templateName())) { - executable = this.scriptService.executable(MustacheScriptEngineService.NAME, request.templateName(), request.templateType(), ScriptContext.Standard.SEARCH, request.templateParams()); + executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, request.templateName(), request.templateType(), request.templateParams()), ScriptContext.Standard.SEARCH); } else { if (!hasLength(request.templateSource())) { return; @@ -693,7 +693,7 @@ public class SearchService extends AbstractLifecycleComponent { if (!hasLength(templateContext.template())) { throw new ElasticsearchParseException("Template must have [template] field configured"); } - executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), ScriptContext.Standard.SEARCH, templateContext.params()); + executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()), ScriptContext.Standard.SEARCH); } BytesReference processedQuery = (BytesReference) executable.run(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index c273a3f3cc1..559d02fc4de 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -83,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } public void initialize(InternalAggregation.ReduceContext context) { - script = context.scriptService().executable(scriptLang, scriptString, scriptType, ScriptContext.Standard.AGGS, params); + script = context.scriptService().executable(new Script(scriptLang, scriptString, scriptType, params), ScriptContext.Standard.AGGS); script.setNextVar("_subset_freq", subsetDfHolder); script.setNextVar("_subset_size", subsetSizeHolder); script.setNextVar("_superset_freq", supersetDfHolder); @@ -168,7 +168,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(scriptLang, script, scriptType, ScriptContext.Standard.AGGS, params); + searchScript = scriptService.executable(new Script(scriptLang, script, scriptType, params), ScriptContext.Standard.AGGS); } catch (Exception e) { throw new ElasticsearchParseException("The script [" + script + "] could not be loaded", e); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index a2e03a3b460..877b632dbdb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.AggregationStreams; @@ -98,8 +99,8 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement params = new HashMap<>(); } params.put("_aggs", aggregationObjects); - ExecutableScript script = reduceContext.scriptService().executable(firstAggregation.scriptLang, firstAggregation.reduceScript, - firstAggregation.scriptType, ScriptContext.Standard.AGGS, params); + ExecutableScript script = reduceContext.scriptService().executable(new Script(firstAggregation.scriptLang, firstAggregation.reduceScript, + firstAggregation.scriptType, params), ScriptContext.Standard.AGGS); aggregation = script.run(); } else { aggregation = aggregationObjects; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 7d89b5994af..9d52242b7bf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -74,11 +74,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(scriptLang, initScript, initScriptType, ScriptContext.Standard.AGGS, this.params).run(); + scriptService.executable(new Script(scriptLang, initScript, initScriptType, this.params), ScriptContext.Standard.AGGS).run(); } - this.mapScript = scriptService.search(context.searchContext().lookup(), scriptLang, mapScript, mapScriptType, ScriptContext.Standard.AGGS, this.params); + this.mapScript = scriptService.search(context.searchContext().lookup(), new Script(scriptLang, mapScript, mapScriptType, this.params), ScriptContext.Standard.AGGS); if (combineScript != null) { - this.combineScript = scriptService.executable(scriptLang, combineScript, combineScriptType, ScriptContext.Standard.AGGS, this.params); + this.combineScript = scriptService.executable(new Script(scriptLang, combineScript, combineScriptType, this.params), ScriptContext.Standard.AGGS); } else { this.combineScript = null; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index aaffbbbfa4b..37182685761 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -186,7 +186,7 @@ public class ValuesSourceParser { } private SearchScript createScript() { - return input.script == null ? null : context.scriptService().search(context.lookup(), input.lang, input.script, input.scriptType, ScriptContext.Standard.AGGS, input.params); + return input.script == null ? null : context.scriptService().search(context.lookup(), new Script(input.lang, input.script, input.scriptType, input.params), ScriptContext.Standard.AGGS); } private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index 9a38b4b6358..10614656f09 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -76,7 +76,7 @@ public class ScriptFieldsParseElement implements SearchParseElement { script = scriptValue.script(); scriptType = scriptValue.scriptType(); } - SearchScript searchScript = context.scriptService().search(context.lookup(), scriptParameterParser.lang(), script, scriptType, ScriptContext.Standard.SEARCH, params); + SearchScript searchScript = context.scriptService().search(context.lookup(), new Script(scriptParameterParser.lang(), script, scriptType, params), ScriptContext.Standard.SEARCH); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index be2b0d0dbf6..d0bfebe81f4 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparator import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; @@ -122,7 +123,7 @@ public class ScriptSortParser implements SortParser { if (type == null) { throw new SearchParseException(context, "_script sorting requires setting the type of the script"); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptType, ScriptContext.Standard.SEARCH, params); + final SearchScript searchScript = context.scriptService().search(context.lookup(), new Script(scriptLang, script, scriptType, params), ScriptContext.Standard.SEARCH); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]"); diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 22a797db93c..5fe964017d9 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.mustache.MustacheScriptEngineService; @@ -153,7 +154,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (suggestion.getCollateQueryScript() != null) { throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } - CompiledScript compiledScript = suggester.scriptService().compile(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptType.INLINE, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = suggester.scriptService().compile(new Script(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptType.INLINE, null), ScriptContext.Standard.SEARCH); if ("query".equals(fieldName)) { suggestion.setCollateQueryScript(compiledScript); } else { diff --git a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java index 76a9ff03e15..bbbf8b875dc 100644 --- a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java +++ b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java @@ -56,7 +56,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { for (String lang : LANG_SET) { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { - scriptService.compile(lang, "test", scriptType, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); + scriptService.compile(new Script(lang, "test", scriptType, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + lang + "] are disabled")); @@ -65,20 +65,20 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { } try { - scriptService.compile("expression", "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + scriptService.compile(new Script("expression", "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [expression] are disabled")); } - CompiledScript compiledScript = scriptService.compile("expression", "1", ScriptService.ScriptType.INLINE, randomFrom(ScriptContext.Standard.values())); + CompiledScript compiledScript = scriptService.compile(new Script("expression", "1", ScriptService.ScriptType.INLINE, null), randomFrom(ScriptContext.Standard.values())); assertThat(compiledScript, notNullValue()); - compiledScript = scriptService.compile("mustache", "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + compiledScript = scriptService.compile(new Script("mustache", "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); assertThat(compiledScript, notNullValue()); for (String lang : LANG_SET) { - compiledScript = scriptService.compile(lang, "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); + compiledScript = scriptService.compile(new Script(lang, "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); assertThat(compiledScript, notNullValue()); } } @@ -87,7 +87,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { public void testCompileNonRegisteredPluginContext() { ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { - scriptService.compile(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), new ScriptContext.Plugin("test", "unknown")); + scriptService.compile(new Script(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), null), new ScriptContext.Plugin("test", "unknown")); fail("script compilation should have been rejected"); } catch(ElasticsearchIllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test_unknown] not supported")); @@ -98,7 +98,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { public void testCompileNonRegisteredScriptContext() { ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { - scriptService.compile(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), new ScriptContext() { + scriptService.compile(new Script(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), null), new ScriptContext() { @Override public String getKey() { return "test"; diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 1385fa7b54d..f08eb02e822 100644 --- a/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -59,7 +59,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); - ExecutableScript executable = scriptService.executable(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, ScriptContext.Standard.SEARCH, null); + ExecutableScript executable = scriptService.executable(new Script(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, null), ScriptContext.Standard.SEARCH); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @@ -84,7 +84,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, new NodeSettingsService(settings), scriptContextRegistry); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { - assertThat(scriptService.compile(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, scriptContext), notNullValue()); + assertThat(scriptService.compile(new Script(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, null), scriptContext), notNullValue()); } } diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0187d22c5d4..deff1b891c4 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -133,7 +133,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { resourceWatcherService.notifyNow(); logger.info("--> verify that file with extension was correctly processed"); - CompiledScript compiledScript = scriptService.compile("test", "test_script", ScriptType.FILE, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = scriptService.compile(new Script("test", "test_script", ScriptType.FILE, null), ScriptContext.Standard.SEARCH); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -143,7 +143,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { logger.info("--> verify that file with extension was correctly removed"); try { - scriptService.compile("test", "test_script", ScriptType.FILE, ScriptContext.Standard.SEARCH); + scriptService.compile(new Script("test", "test_script", ScriptType.FILE, null), ScriptContext.Standard.SEARCH); fail("the script test_script should no longer exist"); } catch (ElasticsearchIllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script")); @@ -154,17 +154,17 @@ public class ScriptServiceTests extends ElasticsearchTestCase { public void testScriptsSameNameDifferentLanguage() throws IOException { buildScriptService(ImmutableSettings.EMPTY); createFileScripts("groovy", "expression"); - CompiledScript groovyScript = scriptService.compile(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript groovyScript = scriptService.compile(new Script(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME)); - CompiledScript expressionScript = scriptService.compile(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript expressionScript = scriptService.compile(new Script(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME)); } @Test public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException { buildScriptService(ImmutableSettings.EMPTY); - CompiledScript compiledScript1 = scriptService.compile("test", "script", ScriptType.INLINE, randomFrom(scriptContexts)); - CompiledScript compiledScript2 = scriptService.compile("test2", "script", ScriptType.INLINE, randomFrom(scriptContexts)); + CompiledScript compiledScript1 = scriptService.compile(new Script("test", "script", ScriptType.INLINE, null), randomFrom(scriptContexts)); + CompiledScript compiledScript2 = scriptService.compile(new Script("test2", "script", ScriptType.INLINE, null), randomFrom(scriptContexts)); assertThat(compiledScript1, sameInstance(compiledScript2)); } @@ -172,8 +172,8 @@ public class ScriptServiceTests extends ElasticsearchTestCase { public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { buildScriptService(ImmutableSettings.EMPTY); createFileScripts("test"); - CompiledScript compiledScript1 = scriptService.compile("test", "file_script", ScriptType.FILE, randomFrom(scriptContexts)); - CompiledScript compiledScript2 = scriptService.compile("test2", "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript compiledScript1 = scriptService.compile(new Script("test", "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); + CompiledScript compiledScript2 = scriptService.compile(new Script("test2", "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(compiledScript1, sameInstance(compiledScript2)); } @@ -350,7 +350,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { for (ScriptEngineService scriptEngineService : scriptEngineServices) { for (String type : scriptEngineService.types()) { try { - scriptService.compile(type, "test", randomFrom(ScriptType.values()), new ScriptContext.Plugin(pluginName, unknownContext)); + scriptService.compile(new Script(type, "test", randomFrom(ScriptType.values()), null), new ScriptContext.Plugin(pluginName, unknownContext)); fail("script compilation should have been rejected"); } catch(ElasticsearchIllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); @@ -369,7 +369,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { try { - scriptService.compile(lang, script, scriptType, scriptContext); + scriptService.compile(new Script(lang, script, scriptType, null), scriptContext); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good @@ -377,7 +377,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { } private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assertThat(scriptService.compile(lang, script, scriptType, scriptContext), notNullValue()); + assertThat(scriptService.compile(new Script(lang, script, scriptType, null), scriptContext), notNullValue()); } public static class TestEngineService implements ScriptEngineService { From 30177887b155a4bcb44cf1ec257fd1ae81028661 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 9 Apr 2015 15:02:01 -0400 Subject: [PATCH 068/236] Add prediction capability to MovAvgReducer This commit adds the ability for moving average models to output a "prediction" based on the current moving average model. For simple, linear and single, this prediction is simply converges on the moving average's mean at the last point, leading to a straight line. For double, this will predict in the direction of the linear trend (either globally or locally, depending on beta). Also adds some more tests. Closes #10545 --- .../reducers/ReducerBuilders.java | 2 +- .../reducers/movavg/MovAvgBuilder.java | 17 + .../reducers/movavg/MovAvgParser.java | 17 +- .../reducers/movavg/MovAvgReducer.java | 62 +- .../movavg/models/DoubleExpModel.java | 28 +- .../reducers/movavg/models/MovAvgModel.java | 53 +- .../aggregations/reducers/MovAvgTests.java | 502 -------- .../reducers/moving/avg/MovAvgTests.java | 1018 +++++++++++++++++ .../reducers/moving/avg/MovAvgUnitTests.java | 297 +++++ 9 files changed, 1477 insertions(+), 519 deletions(-) delete mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java index 3f45964153b..ba6d3ebe7c2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -36,7 +36,7 @@ public final class ReducerBuilders { return new MaxBucketBuilder(name); } - public static final MovAvgBuilder smooth(String name) { + public static final MovAvgBuilder movingAvg(String name) { return new MovAvgBuilder(name); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java index 9790604197d..5fba23957e9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java @@ -36,6 +36,7 @@ public class MovAvgBuilder extends ReducerBuilder { private GapPolicy gapPolicy; private MovAvgModelBuilder modelBuilder; private Integer window; + private Integer predict; public MovAvgBuilder(String name) { super(name, MovAvgReducer.TYPE.name()); @@ -81,6 +82,19 @@ public class MovAvgBuilder extends ReducerBuilder { return this; } + /** + * Sets the number of predictions that should be returned. Each prediction will be spaced at + * the intervals specified in the histogram. E.g "predict: 2" will return two new buckets at the + * end of the histogram with the predicted values. + * + * @param numPredictions Number of predictions to make + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder predict(int numPredictions) { + this.predict = numPredictions; + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { @@ -96,6 +110,9 @@ public class MovAvgBuilder extends ReducerBuilder { if (window != null) { builder.field(MovAvgParser.WINDOW.getPreferredName(), window); } + if (predict != null) { + builder.field(MovAvgParser.PREDICT.getPreferredName(), predict); + } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java index 3f241a67b3a..c1cdadf91ea 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java @@ -46,6 +46,7 @@ public class MovAvgParser implements Reducer.Parser { public static final ParseField MODEL = new ParseField("model"); public static final ParseField WINDOW = new ParseField("window"); public static final ParseField SETTINGS = new ParseField("settings"); + public static final ParseField PREDICT = new ParseField("predict"); private final MovAvgModelParserMapper movAvgModelParserMapper; @@ -65,10 +66,12 @@ public class MovAvgParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; + GapPolicy gapPolicy = GapPolicy.IGNORE; int window = 5; Map settings = null; String model = "simple"; + int predict = 0; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -76,6 +79,16 @@ public class MovAvgParser implements Reducer.Parser { } else if (token == XContentParser.Token.VALUE_NUMBER) { if (WINDOW.match(currentFieldName)) { window = parser.intValue(); + if (window <= 0) { + throw new SearchParseException(context, "[" + currentFieldName + "] value must be a positive, " + + "non-zero integer. Value supplied was [" + predict + "] in [" + reducerName + "]."); + } + } else if (PREDICT.match(currentFieldName)) { + predict = parser.intValue(); + if (predict <= 0) { + throw new SearchParseException(context, "[" + currentFieldName + "] value must be a positive, " + + "non-zero integer. Value supplied was [" + predict + "] in [" + reducerName + "]."); + } } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -119,7 +132,7 @@ public class MovAvgParser implements Reducer.Parser { if (bucketsPaths == null) { throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() - + "] for smooth aggregation [" + reducerName + "]"); + + "] for movingAvg aggregation [" + reducerName + "]"); } ValueFormatter formatter = null; @@ -135,7 +148,7 @@ public class MovAvgParser implements Reducer.Parser { MovAvgModel movAvgModel = modelParser.parse(settings); - return new MovAvgReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, window, movAvgModel); + return new MovAvgReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, window, predict, movAvgModel); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java index 20baa1706f1..4bd2ff4c50a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java @@ -27,12 +27,9 @@ import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.*; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; -import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; @@ -44,6 +41,7 @@ import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelStreams; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; +import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -80,17 +78,19 @@ public class MovAvgReducer extends Reducer { private GapPolicy gapPolicy; private int window; private MovAvgModel model; + private int predict; public MovAvgReducer() { } public MovAvgReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, - int window, MovAvgModel model, Map metadata) { + int window, int predict, MovAvgModel model, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; this.window = window; this.model = model; + this.predict = predict; } @Override @@ -107,8 +107,14 @@ public class MovAvgReducer extends Reducer { List newBuckets = new ArrayList<>(); EvictingQueue values = EvictingQueue.create(this.window); + long lastKey = 0; + long interval = Long.MAX_VALUE; + Object currentKey; + for (InternalHistogram.Bucket bucket : buckets) { Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); + currentKey = bucket.getKey(); + if (thisBucketValue != null) { values.offer(thisBucketValue); @@ -117,14 +123,46 @@ public class MovAvgReducer extends Reducer { List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); - InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( + InternalHistogram.Bucket newBucket = factory.createBucket(currentKey, bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); + } else { newBuckets.add(bucket); } + + if (predict > 0) { + if (currentKey instanceof Number) { + interval = Math.min(interval, ((Number) bucket.getKey()).longValue() - lastKey); + lastKey = ((Number) bucket.getKey()).longValue(); + } else if (currentKey instanceof DateTime) { + interval = Math.min(interval, ((DateTime) bucket.getKey()).getMillis() - lastKey); + lastKey = ((DateTime) bucket.getKey()).getMillis(); + } else { + throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + currentKey + "]"); + } + } + } - //return factory.create(histo.getName(), newBuckets, histo); + + + if (buckets.size() > 0 && predict > 0) { + + boolean keyed; + ValueFormatter formatter; + keyed = buckets.get(0).getKeyed(); + formatter = buckets.get(0).getFormatter(); + + double[] predictions = model.predict(values, predict); + for (int i = 0; i < predictions.length; i++) { + List aggs = new ArrayList<>(); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(lastKey + (interval * (i + 1)), 0, new InternalAggregations( + aggs), keyed, formatter); + newBuckets.add(newBucket); + } + } + return factory.create(newBuckets, histo); } @@ -133,7 +171,9 @@ public class MovAvgReducer extends Reducer { formatter = ValueFormatterStreams.readOptional(in); gapPolicy = GapPolicy.readFrom(in); window = in.readVInt(); + predict = in.readVInt(); model = MovAvgModelStreams.read(in); + } @Override @@ -141,7 +181,9 @@ public class MovAvgReducer extends Reducer { ValueFormatterStreams.writeOptional(formatter, out); gapPolicy.writeTo(out); out.writeVInt(window); + out.writeVInt(predict); model.writeTo(out); + } public static class Factory extends ReducerFactory { @@ -150,19 +192,21 @@ public class MovAvgReducer extends Reducer { private GapPolicy gapPolicy; private int window; private MovAvgModel model; + private int predict; public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, - int window, MovAvgModel model) { + int window, int predict, MovAvgModel model) { super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; this.gapPolicy = gapPolicy; this.window = window; this.model = model; + this.predict = predict; } @Override protected Reducer createInternal(Map metaData) throws IOException { - return new MovAvgReducer(name, bucketsPaths, formatter, gapPolicy, window, model, metaData); + return new MovAvgReducer(name, bucketsPaths, formatter, gapPolicy, window, predict, model, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java index 907c23fd213..7d32989cda1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java @@ -53,10 +53,25 @@ public class DoubleExpModel extends MovAvgModel { this.beta = beta; } + /** + * Predicts the next `n` values in the series, using the smoothing model to generate new values. + * Unlike the other moving averages, double-exp has forecasting/prediction built into the algorithm. + * Prediction is more than simply adding the next prediction to the window and repeating. Double-exp + * will extrapolate into the future by applying the trend information to the smoothed data. + * + * @param values Collection of numerics to movingAvg, usually windowed + * @param numPredictions Number of newly generated predictions to return + * @param Type of numeric + * @return Returns an array of doubles, since most smoothing methods operate on floating points + */ + @Override + public double[] predict(Collection values, int numPredictions) { + return next(values, numPredictions); + } @Override public double next(Collection values) { - return next(values, 1).get(0); + return next(values, 1)[0]; } /** @@ -68,7 +83,12 @@ public class DoubleExpModel extends MovAvgModel { * @param Type T extending Number * @return Returns a Double containing the moving avg for the window */ - public List next(Collection values, int numForecasts) { + public double[] next(Collection values, int numForecasts) { + + if (values.size() == 0) { + return emptyPredictions(numForecasts); + } + // Smoothed value double s = 0; double last_s = 0; @@ -97,9 +117,9 @@ public class DoubleExpModel extends MovAvgModel { last_b = b; } - List forecastValues = new ArrayList<>(numForecasts); + double[] forecastValues = new double[numForecasts]; for (int i = 0; i < numForecasts; i++) { - forecastValues.add(s + (i * b)); + forecastValues[i] = s + (i * b); } return forecastValues; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java index 84f7832f893..d798887c836 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.aggregations.reducers.movavg.models; +import com.google.common.collect.EvictingQueue; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -29,12 +31,61 @@ public abstract class MovAvgModel { /** * Returns the next value in the series, according to the underlying smoothing model * - * @param values Collection of numerics to smooth, usually windowed + * @param values Collection of numerics to movingAvg, usually windowed * @param Type of numeric * @return Returns a double, since most smoothing methods operate on floating points */ public abstract double next(Collection values); + /** + * Predicts the next `n` values in the series, using the smoothing model to generate new values. + * Default prediction mode is to simply continuing calling next() and adding the + * predicted value back into the windowed buffer. + * + * @param values Collection of numerics to movingAvg, usually windowed + * @param numPredictions Number of newly generated predictions to return + * @param Type of numeric + * @return Returns an array of doubles, since most smoothing methods operate on floating points + */ + public double[] predict(Collection values, int numPredictions) { + double[] predictions = new double[numPredictions]; + + // If there are no values, we can't do anything. Return an array of NaNs. + if (values.size() == 0) { + return emptyPredictions(numPredictions); + } + + // special case for one prediction, avoids allocation + if (numPredictions < 1) { + throw new ElasticsearchIllegalArgumentException("numPredictions may not be less than 1."); + } else if (numPredictions == 1){ + predictions[0] = next(values); + return predictions; + } + + // nocommit + // I don't like that it creates a new queue here + // The alternative to this is to just use `values` directly, but that would "consume" values + // and potentially change state elsewhere. Maybe ok? + Collection predictionBuffer = EvictingQueue.create(values.size()); + predictionBuffer.addAll(values); + + for (int i = 0; i < numPredictions; i++) { + predictions[i] = next(predictionBuffer); + + // Add the last value to the buffer, so we can keep predicting + predictionBuffer.add(predictions[i]); + } + + return predictions; + } + + protected double[] emptyPredictions(int numPredictions) { + double[] predictions = new double[numPredictions]; + Arrays.fill(predictions, Double.NaN); + return predictions; + } + /** * Write the model to the output stream * diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java deleted file mode 100644 index 4f0e3c0d1cf..00000000000 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MovAvgTests.java +++ /dev/null @@ -1,502 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.reducers; - - -import com.google.common.collect.EvictingQueue; - -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; -import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; -import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; -import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; -import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.smooth; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.core.IsNull.notNullValue; - -@ElasticsearchIntegrationTest.SuiteScopeTest -public class MovAvgTests extends ElasticsearchIntegrationTest { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - private static final String SINGLE_VALUED_VALUE_FIELD_NAME = "v_value"; - - static int interval; - static int numValueBuckets; - static int numFilledValueBuckets; - static int windowSize; - static BucketHelpers.GapPolicy gapPolicy; - - static long[] docCounts; - static long[] valueCounts; - static Double[] simpleMovAvgCounts; - static Double[] linearMovAvgCounts; - static Double[] singleExpMovAvgCounts; - static Double[] doubleExpMovAvgCounts; - - static Double[] simpleMovAvgValueCounts; - static Double[] linearMovAvgValueCounts; - static Double[] singleExpMovAvgValueCounts; - static Double[] doubleExpMovAvgValueCounts; - - @Override - public void setupSuiteScopeCluster() throws Exception { - createIndex("idx"); - createIndex("idx_unmapped"); - - interval = 5; - numValueBuckets = randomIntBetween(6, 80); - numFilledValueBuckets = numValueBuckets; - windowSize = randomIntBetween(3,10); - gapPolicy = BucketHelpers.GapPolicy.INSERT_ZEROS; // TODO randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; - - docCounts = new long[numValueBuckets]; - valueCounts = new long[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - docCounts[i] = randomIntBetween(0, 20); - valueCounts[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket - } - - this.setupSimple(); - this.setupLinear(); - this.setupSingle(); - this.setupDouble(); - - - List builders = new ArrayList<>(); - for (int i = 0; i < numValueBuckets; i++) { - for (int docs = 0; docs < docCounts[i]; docs++) { - builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i * interval) - .field(SINGLE_VALUED_VALUE_FIELD_NAME, 1).endObject())); - } - } - - indexRandom(true, builders); - ensureSearchable(); - } - - private void setupSimple() { - simpleMovAvgCounts = new Double[numValueBuckets]; - EvictingQueue window = EvictingQueue.create(windowSize); - for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - window.offer(thisValue); - - double movAvg = 0; - for (double value : window) { - movAvg += value; - } - movAvg /= window.size(); - - simpleMovAvgCounts[i] = movAvg; - } - - window.clear(); - simpleMovAvgValueCounts = new Double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - window.offer((double)docCounts[i]); - - double movAvg = 0; - for (double value : window) { - movAvg += value; - } - movAvg /= window.size(); - - simpleMovAvgValueCounts[i] = movAvg; - - } - - } - - private void setupLinear() { - EvictingQueue window = EvictingQueue.create(windowSize); - linearMovAvgCounts = new Double[numValueBuckets]; - window.clear(); - for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; - } - window.offer(thisValue); - - double avg = 0; - long totalWeight = 1; - long current = 1; - - for (double value : window) { - avg += value * current; - totalWeight += current; - current += 1; - } - linearMovAvgCounts[i] = avg / totalWeight; - } - - window.clear(); - linearMovAvgValueCounts = new Double[numValueBuckets]; - - for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - window.offer(thisValue); - - double avg = 0; - long totalWeight = 1; - long current = 1; - - for (double value : window) { - avg += value * current; - totalWeight += current; - current += 1; - } - linearMovAvgValueCounts[i] = avg / totalWeight; - } - } - - private void setupSingle() { - EvictingQueue window = EvictingQueue.create(windowSize); - singleExpMovAvgCounts = new Double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; - } - window.offer(thisValue); - - double avg = 0; - double alpha = 0.5; - boolean first = true; - - for (double value : window) { - if (first) { - avg = value; - first = false; - } else { - avg = (value * alpha) + (avg * (1 - alpha)); - } - } - singleExpMovAvgCounts[i] = avg ; - } - - singleExpMovAvgValueCounts = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { - window.offer((double)docCounts[i]); - - double avg = 0; - double alpha = 0.5; - boolean first = true; - - for (double value : window) { - if (first) { - avg = value; - first = false; - } else { - avg = (value * alpha) + (avg * (1 - alpha)); - } - } - singleExpMovAvgCounts[i] = avg ; - } - - } - - private void setupDouble() { - EvictingQueue window = EvictingQueue.create(windowSize); - doubleExpMovAvgCounts = new Double[numValueBuckets]; - - for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; - } - window.offer(thisValue); - - double s = 0; - double last_s = 0; - - // Trend value - double b = 0; - double last_b = 0; - - double alpha = 0.5; - double beta = 0.5; - int counter = 0; - - double last; - for (double value : window) { - last = value; - if (counter == 1) { - s = value; - b = value - last; - } else { - s = alpha * value + (1.0d - alpha) * (last_s + last_b); - b = beta * (s - last_s) + (1 - beta) * last_b; - } - - counter += 1; - last_s = s; - last_b = b; - } - - doubleExpMovAvgCounts[i] = s + (0 * b) ; - } - - doubleExpMovAvgValueCounts = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { - window.offer((double)docCounts[i]); - - double s = 0; - double last_s = 0; - - // Trend value - double b = 0; - double last_b = 0; - - double alpha = 0.5; - double beta = 0.5; - int counter = 0; - - double last; - for (double value : window) { - last = value; - if (counter == 1) { - s = value; - b = value - last; - } else { - s = alpha * value + (1.0d - alpha) * (last_s + last_b); - b = beta * (s - last_s) + (1 - beta) * last_b; - } - - counter += 1; - last_s = s; - last_b = b; - } - - doubleExpMovAvgValueCounts[i] = s + (0 * b) ; - } - } - - /** - * test simple moving average on single value field - */ - @Test - public void simpleSingleValuedField() { - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(smooth("smooth") - .window(windowSize) - .modelBuilder(new SimpleModel.SimpleModelBuilder()) - .gapPolicy(gapPolicy) - .setBucketsPaths("_count")) - .subAggregation(smooth("movavg_values") - .window(windowSize) - .modelBuilder(new SimpleModel.SimpleModelBuilder()) - .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) - ).execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(simpleMovAvgCounts[i])); - - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(simpleMovAvgCounts[i])); - } - } - - /** - * test linear moving average on single value field - */ - @Test - public void linearSingleValuedField() { - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(smooth("smooth") - .window(windowSize) - .modelBuilder(new LinearModel.LinearModelBuilder()) - .gapPolicy(gapPolicy) - .setBucketsPaths("_count")) - .subAggregation(smooth("movavg_values") - .window(windowSize) - .modelBuilder(new LinearModel.LinearModelBuilder()) - .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) - ).execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(linearMovAvgCounts[i])); - - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(linearMovAvgCounts[i])); - } - } - - /** - * test single exponential moving average on single value field - */ - @Test - public void singleExpSingleValuedField() { - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(smooth("smooth") - .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) - .gapPolicy(gapPolicy) - .setBucketsPaths("_count")) - .subAggregation(smooth("movavg_values") - .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) - .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) - ).execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); - - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); - } - } - - /** - * test double exponential moving average on single value field - */ - @Test - public void doubleExpSingleValuedField() { - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(smooth("smooth") - .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) - .gapPolicy(gapPolicy) - .setBucketsPaths("_count")) - .subAggregation(smooth("movavg_values") - .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) - .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) - ).execute().actionGet(); - - assertSearchResponse(response); - - InternalHistogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("smooth"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); - - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); - } - } - - - private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, - long expectedDocCount) { - if (expectedDocCount == -1) { - expectedDocCount = 0; - } - assertThat(msg, bucket, notNullValue()); - assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); - assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); - } - -} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java new file mode 100644 index 00000000000..9c3a6f23419 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -0,0 +1,1018 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.moving.avg; + + +import com.google.common.collect.EvictingQueue; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.query.RangeFilterBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.SimpleValue; +import org.elasticsearch.search.aggregations.reducers.movavg.models.*; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.smooth; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MovAvgTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + private static final String SINGLE_VALUED_VALUE_FIELD_NAME = "v_value"; + private static final String GAP_FIELD = "g_value"; + + static int interval; + static int numValueBuckets; + static int numFilledValueBuckets; + static int windowSize; + static BucketHelpers.GapPolicy gapPolicy; + + static long[] docCounts; + static long[] valueCounts; + static Double[] simpleMovAvgCounts; + static Double[] linearMovAvgCounts; + static Double[] singleExpMovAvgCounts; + static Double[] doubleExpMovAvgCounts; + + static Double[] simpleMovAvgValueCounts; + static Double[] linearMovAvgValueCounts; + static Double[] singleExpMovAvgValueCounts; + static Double[] doubleExpMovAvgValueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + List builders = new ArrayList<>(); + + interval = 5; + numValueBuckets = randomIntBetween(6, 80); + numFilledValueBuckets = numValueBuckets; + windowSize = randomIntBetween(3,10); + gapPolicy = BucketHelpers.GapPolicy.INSERT_ZEROS; // TODO randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; + + docCounts = new long[numValueBuckets]; + valueCounts = new long[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + docCounts[i] = randomIntBetween(0, 20); + valueCounts[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket + } + + // Used for the gap tests + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field("gap_test", 0) + .field(GAP_FIELD, 1).endObject())); + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field("gap_test", (numValueBuckets - 1) * interval) + .field(GAP_FIELD, 1).endObject())); + + this.setupSimple(); + this.setupLinear(); + this.setupSingle(); + this.setupDouble(); + + + + for (int i = 0; i < numValueBuckets; i++) { + for (int docs = 0; docs < docCounts[i]; docs++) { + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i * interval) + .field(SINGLE_VALUED_VALUE_FIELD_NAME, 1).endObject())); + } + } + + indexRandom(true, builders); + ensureSearchable(); + } + + private void setupSimple() { + simpleMovAvgCounts = new Double[numValueBuckets]; + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + window.offer(thisValue); + + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + + simpleMovAvgCounts[i] = movAvg; + } + + window.clear(); + simpleMovAvgValueCounts = new Double[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + + simpleMovAvgValueCounts[i] = movAvg; + + } + + } + + private void setupLinear() { + EvictingQueue window = EvictingQueue.create(windowSize); + linearMovAvgCounts = new Double[numValueBuckets]; + window.clear(); + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + linearMovAvgCounts[i] = avg / totalWeight; + } + + window.clear(); + linearMovAvgValueCounts = new Double[numValueBuckets]; + + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + window.offer(thisValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + linearMovAvgValueCounts[i] = avg / totalWeight; + } + } + + private void setupSingle() { + EvictingQueue window = EvictingQueue.create(windowSize); + singleExpMovAvgCounts = new Double[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double avg = 0; + double alpha = 0.5; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + singleExpMovAvgCounts[i] = avg ; + } + + singleExpMovAvgValueCounts = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double avg = 0; + double alpha = 0.5; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + singleExpMovAvgCounts[i] = avg ; + } + + } + + private void setupDouble() { + EvictingQueue window = EvictingQueue.create(windowSize); + doubleExpMovAvgCounts = new Double[numValueBuckets]; + + for (int i = 0; i < numValueBuckets; i++) { + double thisValue = docCounts[i]; + if (thisValue == -1) { + thisValue = 0; + } + window.offer(thisValue); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + double alpha = 0.5; + double beta = 0.5; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + doubleExpMovAvgCounts[i] = s + (0 * b) ; + } + + doubleExpMovAvgValueCounts = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + window.offer((double)docCounts[i]); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + double alpha = 0.5; + double beta = 0.5; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + doubleExpMovAvgValueCounts[i] = s + (0 * b) ; + } + } + + /** + * test simple moving average on single value field + */ + @Test + public void simpleSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + } + } + + /** + * test linear moving average on single value field + */ + @Test + public void linearSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(linearMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(linearMovAvgCounts[i])); + } + } + + /** + * test single exponential moving average on single value field + */ + @Test + public void singleExpSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + } + } + + /** + * test double exponential moving average on single value field + */ + @Test + public void doubleExpSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + assertThat(docCountMovAvg, notNullValue()); + assertThat(docCountMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + + SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); + assertThat(valuesMovAvg, notNullValue()); + assertThat(valuesMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + } + } + + @Test + public void testSizeZeroWindow() { + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(0) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + fail("MovingAvg should not accept a window that is zero"); + + } catch (SearchPhaseExecutionException exception) { + //Throwable rootCause = exception.unwrapCause(); + //assertThat(rootCause, instanceOf(SearchParseException.class)); + //assertThat("[window] value must be a positive, non-zero integer. Value supplied was [0] in [movingAvg].", equalTo(exception.getMessage())); + } + } + + @Test + public void testBadParent() { + try { + client() + .prepareSearch("idx") + .addAggregation( + range("histo").field(SINGLE_VALUED_FIELD_NAME).addRange(0,10) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(0) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + fail("MovingAvg should not accept non-histogram as parent"); + + } catch (SearchPhaseExecutionException exception) { + // All good + } + } + + @Test + public void testNegativeWindow() { + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(-10) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + ).execute().actionGet(); + fail("MovingAvg should not accept a window that is negative"); + + } catch (SearchPhaseExecutionException exception) { + //Throwable rootCause = exception.unwrapCause(); + //assertThat(rootCause, instanceOf(SearchParseException.class)); + //assertThat("[window] value must be a positive, non-zero integer. Value supplied was [0] in [movingAvg].", equalTo(exception.getMessage())); + } + } + + @Test + public void testNoBucketsInHistogram() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field("test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + } + + @Test + public void testZeroPrediction() { + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(0) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + fail("MovingAvg should not accept a prediction size that is zero"); + + } catch (SearchPhaseExecutionException exception) { + // All Good + } + } + + @Test + public void testNegativePrediction() { + try { + client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(-10) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + fail("MovingAvg should not accept a prediction size that is negative"); + + } catch (SearchPhaseExecutionException exception) { + // All Good + } + } + + /** + * This test uses the "gap" dataset, which is simply a doc at the beginning and end of + * the SINGLE_VALUED_FIELD_NAME range. These docs have a value of 1 in the `g_field`. + * This test verifies that large gaps don't break things, and that the mov avg roughly works + * in the correct manner (checks direction of change, but not actual values) + */ + @Test + public void testGiantGap() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); + + double currentValue; + for (int i = 1; i < numValueBuckets - 2; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + + // The last bucket has a real value, so this should always increase the moving avg + currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movingAvg"))).value(); + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + } + + /** + * Big gap, but with prediction at the end. + */ + @Test + public void testGiantGapWithPredict() { + + MovAvgModelBuilder model = randomModelBuilder(); + int numPredictions = randomIntBetween(0, 10); + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(model) + .gapPolicy(gapPolicy) + .predict(numPredictions) + .setBucketsPaths("the_sum")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); + + double currentValue; + for (int i = 1; i < numValueBuckets - 2; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + + // The last bucket has a real value, so this should always increase the moving avg + currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movingAvg"))).value(); + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + + // Now check predictions + for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_sum anymore + assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + } + } + + /** + * This test filters the "gap" data so that the first doc is excluded. This leaves a long stretch of empty + * buckets until the final bucket. The moving avg should be zero up until the last bucket, and should work + * regardless of mov avg type or gap policy. + */ + @Test + public void testLeftGap() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ) + + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double currentValue; + double lastValue = 0.0; + for (int i = 0; i < numValueBuckets - 1; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } + + } + + @Test + public void testLeftGapWithPrediction() { + + int numPredictions = randomIntBetween(0, 10); + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(numPredictions) + .setBucketsPaths("the_sum")) + ) + + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + + double currentValue; + double lastValue = 0.0; + for (int i = 0; i < numValueBuckets - 1; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } + + // Now check predictions + for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_sum anymore + assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + } + } + + /** + * This test filters the "gap" data so that the last doc is excluded. This leaves a long stretch of empty + * buckets after the first bucket. The moving avg should be one at the beginning, then zero for the rest + * regardless of mov avg type or gap policy. + */ + @Test + public void testRightGap() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_sum")) + ) + + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double currentValue; + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + for (int i = 1; i < numValueBuckets - 1; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + + } + + @Test + public void testRightGapWithPredictions() { + + int numPredictions = randomIntBetween(0, 10); + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(numPredictions) + .setBucketsPaths("the_sum")) + ) + + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + + double currentValue; + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + for (int i = 1; i < numValueBuckets - 1; i++) { + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + + // Now check predictions + for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_sum anymore + assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + } + } + + @Test + public void testPredictWithNoBuckets() { + + int numPredictions = randomIntBetween(0, 10); + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + // Filter so we are above all values + filter("filtered").filter(new RangeFilterBuilder("gap_test").from((interval * (numValueBuckets - 1) + interval))).subAggregation( + histogram("histo").field("gap_test").interval(interval).minDocCount(0) + .subAggregation(sum("the_sum").field(GAP_FIELD)) + .subAggregation(movingAvg("movingAvg") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(numPredictions) + .setBucketsPaths("the_sum")) + ) + + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + } + + + private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, + long expectedDocCount) { + if (expectedDocCount == -1) { + expectedDocCount = 0; + } + assertThat(msg, bucket, notNullValue()); + assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); + assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); + } + + private MovAvgModelBuilder randomModelBuilder() { + int rand = randomIntBetween(0,3); + + switch (rand) { + case 0: + return new SimpleModel.SimpleModelBuilder(); + case 1: + return new LinearModel.LinearModelBuilder(); + case 2: + return new SingleExpModel.SingleExpModelBuilder().alpha(randomDouble()); + case 3: + return new DoubleExpModel.DoubleExpModelBuilder().alpha(randomDouble()).beta(randomDouble()); + default: + return new SimpleModel.SimpleModelBuilder(); + } + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java new file mode 100644 index 00000000000..156f4f873a7 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.moving.avg; + +import com.google.common.collect.EvictingQueue; +import org.elasticsearch.search.aggregations.reducers.movavg.models.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import static org.hamcrest.Matchers.equalTo; +import org.junit.Test; + +public class MovAvgUnitTests extends ElasticsearchTestCase { + + @Test + public void testSimpleMovAvgModel() { + MovAvgModel model = new SimpleModel(); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + + double randValue = randomDouble(); + double expected = 0; + + window.offer(randValue); + + for (double value : window) { + expected += value; + } + expected /= window.size(); + + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testSimplePredictionModel() { + MovAvgModel model = new SimpleModel(); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + + double expected[] = new double[numPredictions]; + for (int i = 0; i < numPredictions; i++) { + for (double value : window) { + expected[i] += value; + } + expected[i] /= window.size(); + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testLinearMovAvgModel() { + MovAvgModel model = new LinearModel(); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + double expected = avg / totalWeight; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testLinearPredictionModel() { + MovAvgModel model = new LinearModel(); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + for (int i = 0; i < numPredictions; i++) { + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + expected[i] = avg / totalWeight; + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testSingleExpMovAvgModel() { + double alpha = randomDouble(); + MovAvgModel model = new SingleExpModel(alpha); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double avg = 0; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + double expected = avg; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testSinglePredictionModel() { + double alpha = randomDouble(); + MovAvgModel model = new SingleExpModel(alpha); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + for (int i = 0; i < numPredictions; i++) { + double avg = 0; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + expected[i] = avg; + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testDoubleExpMovAvgModel() { + double alpha = randomDouble(); + double beta = randomDouble(); + MovAvgModel model = new DoubleExpModel(alpha, beta); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + double expected = s + (0 * b) ; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testDoublePredictionModel() { + double alpha = randomDouble(); + double beta = randomDouble(); + MovAvgModel model = new DoubleExpModel(alpha, beta); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + for (int i = 0; i < numPredictions; i++) { + expected[i] = s + (i * b); + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } +} From a03cefcece609ebf5ef5507e0cb6f481c12e1485 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 15 Apr 2015 16:33:28 -0400 Subject: [PATCH 069/236] [DOCS] Add documentation for moving average --- .../reducers/images/double_0.2beta.png | Bin 0 -> 70338 bytes .../reducers/images/double_0.7beta.png | Bin 0 -> 73869 bytes .../images/double_prediction_global.png | Bin 0 -> 71898 bytes .../images/double_prediction_local.png | Bin 0 -> 67158 bytes .../reducers/images/linear_100window.png | Bin 0 -> 66459 bytes .../reducers/images/linear_10window.png | Bin 0 -> 71996 bytes .../reducers/images/movavg_100window.png | Bin 0 -> 65152 bytes .../reducers/images/movavg_10window.png | Bin 0 -> 67883 bytes .../reducers/images/simple_prediction.png | Bin 0 -> 68361 bytes .../reducers/images/single_0.2alpha.png | Bin 0 -> 64198 bytes .../reducers/images/single_0.7alpha.png | Bin 0 -> 68747 bytes .../reducers/movavg-reducer.asciidoc | 296 ++++++++++++++++++ 12 files changed, 296 insertions(+) create mode 100644 docs/reference/search/aggregations/reducers/images/double_0.2beta.png create mode 100644 docs/reference/search/aggregations/reducers/images/double_0.7beta.png create mode 100644 docs/reference/search/aggregations/reducers/images/double_prediction_global.png create mode 100644 docs/reference/search/aggregations/reducers/images/double_prediction_local.png create mode 100644 docs/reference/search/aggregations/reducers/images/linear_100window.png create mode 100644 docs/reference/search/aggregations/reducers/images/linear_10window.png create mode 100644 docs/reference/search/aggregations/reducers/images/movavg_100window.png create mode 100644 docs/reference/search/aggregations/reducers/images/movavg_10window.png create mode 100644 docs/reference/search/aggregations/reducers/images/simple_prediction.png create mode 100644 docs/reference/search/aggregations/reducers/images/single_0.2alpha.png create mode 100644 docs/reference/search/aggregations/reducers/images/single_0.7alpha.png create mode 100644 docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc diff --git a/docs/reference/search/aggregations/reducers/images/double_0.2beta.png b/docs/reference/search/aggregations/reducers/images/double_0.2beta.png new file mode 100644 index 0000000000000000000000000000000000000000..64499b9834281744a3566ef1d3a5d597745e82b6 GIT binary patch literal 70338 zcmaHRV|ZlSwszQ2$41BL*tV1I*fuM+?T*oLI<{?gY};1FcD|f@&)sM5KF{;jvwl>q zHRl{ybB=esV^pY|j5s_j4lD==2>kbNA_^cN;GG~Kppnp@KJF}y^`nD;V3C^%3(I{M z7ABOlw=prZGzI}t3rcW<;ZWXKc;~xc9okDhP&Z+kAid_95T;t-nPC-;jrp8l^o=M; zBuq?EC^+6|SS}1rs8}gj$_=sP{QTYP{rsBm+&jbT?4Y83emS%Ke%T|_i5>*6EH5So z1dSHNPf*{;0^CYRoZM7rRTBgh#~0E-Aa)lYC&$>#4EAlHh9wX{vKQ2wSoyAg{;qk* z!y~)~>Pv%svX6X>|6|hwBx`9+cu*fi&mEc5$4G4uLq~vt1e*vN1fw&k-G$)*!+GWR z^a@wQJQwslB#5jiacnzi*80^1?$%l4LpFlo@?ijvX8_Wi2*kP5J&_wo*Lv{b3Bu>k z>)qGT7b=M-S0QmaK%b|M#8{y zMv`lleFDpxpkky|q+JthKRtH{{>$6N`HgN8zO7*A&f9}kVW${Q@KKN~U#Ra35Wfet za*}&qPwTu5)luA5Fs8&)o`bQM-ukU~^?)01^2iz4?rc6)(S@-`Hkm_$p!+@-Wc0dT zBoHLA!7Z4lhQO*G)H#eT3!5lCbBO!krBL$Q~-fv1S@Y0q+QPYpx)==Obx4Bi1a~$>dR-g!nuBwlb|iWn1Lj7 z@DSkqn=8Dk%psmDW>Zk40-x5qP`|w-zEs;TTEhrsuo2>Tip7vazgiTwuN}h>BR%%( z9*rx4xmq=fS!zB6AVMRE)aKsj%uN;yDxwzG0F3K~3AlAq`P&U{t6J2A%lK4o=n-84 zoj??Zy6!ti-l}^&yKeN?4L>pNB&&8onB9u>xAid`o)4b<-fc+Q*g$;h|M}kc3p3fz zu2<&${%{ouxWvx|APTGp)4(l{x)5*5O_@3o0HckL5P>WAi;xD5eIxI==%jf|8S%Mq z>^tWlc+ixg6)V3;3tG{A`TQGxvJ>NXD7;x7+xQ?V5>83{V?Htj#XnF(zw*`cp_@EYog9Q}oJ20PaI z^xHHBLr;b*(01vX(^OS7#6W$^ue}Z)9`xJ2tc6E;-v_@9K9DfCeFzz_$%=xjxQUt!C8-DR zPlzWHh(*Zu0}3Uumq`5^_A=a&U_lJ)E=04?x*QT3!j~YFES5t&N8}c;1mVVPu0!M- zwBrCB!SgKjL+-jTe|aSeF*HmGXtE#$F|%wbxrU-w3Y%FXB^D}Vr%wstR6=V4%DMR2 z8QGuYcZy_-M6tT!8JI_*B)f*I5G+BtdvqAEqkYLx17_?B% zz|KBZz$`?kb-(s1Zo6I7xO%lQ^Fb3tPyE&$VBdbeoOxh{)qoX*RfdIwMGT_|a|}ZX za}UE0qYPsO`y!Z}B~wNAfEP6fC?PSd9Zwan65kPT7vCJ;NRcVmk*k%R zQ;3o~oo$~DTgWBs6j_pTj6R1*97`cGOUi}X^TYJVUa!hF)+ISBcs*%7ay`^7<1x>% z`LXvgJqsF(KZ^zn6d=dU9&k2&GdVh?FbO+#JQ)WtG`j`>0U>6XM_7kOhct&^M>12| zg&q>@!qK^yISaY>S+aTb1vdGadcN2X*p67M*eF;MSj*T2Sk)2T5n>VE5oQtW*sa(H zDTgV$DJLli%zKQyMmVOyDM87R$>7N|Dd!A`jFmOE)qrZ(n!GBdnwwghTKg*V8uyxl z8kK5qt$rM|`TMUo}@>c-j$I|lnAyC#P``_(P1EfKpgI}Hc? zEt>t4ZLQtyeWp#=t(LC2Vb+1M(ek0wf%Lw$LEe#`uG*x5B=YdW&;t^B)JhB+5F0uh z+;biR@=PLKjt%q<>yCQu?$#6MzVni0)?wLA)&2YJiEW#$Ow3JW22>6zIU)w)Z$w2j zha|2#Rl>IF-_!;L5A)kipDCj8Xq$y<5WIJRQq@CYjzHv$# ze{=fw8mAJ25JMO1O)E@qNP|kRNPn%ZA~mngChIH^MNvraB2}Ssme$tPR`^2pm<0P7 z)&!OuT?35`EdgyPEFr8`0!*Sp;!&b!IAS<)xG+AmNKdIwIjWeYsH}*pB)E88{S<3cs}?zvDkhOX>V!7F@-TRT6tRj za$F7Fy8QZ~<@@?1CnYBX$5bbMZSXavHEUjXUhejAcf+UFoAz7kn*(^X&rw(`xCWo4 zKQn(0iEN5kjU0~Lk4%U#j98H>lCqSdOKoFEXC_OVPs<*S7-Je+Plii{N?A|?fg=f* z3YVAE9}!MOEUQvURB=-wC^IgL)^J&5Sj4MqwyrfQwMw&oUY6;lTdnBJ=wI*aSoK~K zT-f^6)07);7_q}l?o6&*sA2l6w%wuldYiA^Yw5W_3r#CdEBFrXoWJe;2=opd6bc_d zkMkRs>jL`D@uvHlQ@W8L6)|lKcJp5ASSzq8)8&0P`zUsg5mtOd*lt{ZQ+F-91pAn?9#8o&q?q`@J-O__v-JEfM=mz z;W(jw#A5tmJ{DOvqNHG<>}O$%9;{f^^a|V93)HQ1;ybZj(U*w2=r)h!Guz3n3X1BY z_@X1g>tU(*r1)xd9o`Fl-xe}{&V9+TQnUwp6s88mcgw6S}J)*KH<*{Q&*z&(>UV?RAF+njmK zL+ikGf3uHW$-H(Hc9b-3F!3q<%kbnN1-G~h-P5W_fx3wkhK-8(ywKh6lS0lb&Y%OB zgPw`#gVgM6tBy!6NzO>HS3qjx($~^iP^(hWcaFR9wu31n>o9us?1`OA zucg%|pd_f4B$rNG`)O`9@~u0!^EwXf%8Y35wIwl0(K=}mPY+n(4^7;Emh&fxc@k10+8<5&l3 zQ<_ta$~$2@h#N;8KzEz#x7)zy-ol~Dq1)nK#9lj%#JXo*b1%!R=N$aEkuO%)&&33m{1vo=^mQ~EbWyaBbmg>#Z${7R4LNPo z)yGv_?HJ8bniFeCYenzISHLlo&3zN-P^o2abl=#pt1Gb0XI79OG9V0uZg_s~M7ds4_Jh@LTM=fp9 zj02BDvdR16WJ`M2V_cS8kJeS=Fa;VZ0reV%zq3`*KwUy)S>mfqgOIfxXg)=5oi3+x zzQ*kueoo7kj%p;@*ouaY<3&rD``MEzygB?p1Q9BzA~-YuwPUzPB7M2{GQ-O7lKLVP z*DvQXNB8{~>y5UNtGD`qt^@%Cho80!S5A%0bkHN3%W%n*|fBV+9_Yz_w-dXGE^Frc91cfFAsi@gR_tL7Bd&V*x<3aW9m-00};#1GtoZ~AN;c@uFewJIGjijX#-L8_Kf zB~c^O@V%bbILSQo5NYpl#|T)~D%&IK(|PST7_d~)_>I;&{p=L~p7**v%9Z=!+q1&s z_~Y%<0#rQIK4d=BKu8Q?a}2Ano6th{ryd(IQqh(#^Wkw(7#!VB@wCB|iFa6ciwdjH zJL#8=s5Pi0vL~`ke8p*&sX`LG8P2z}J2Qr+9eMN_+T&_G_aqK{K!-c6@uX{h`biz7 zA-G|B+QSYOc@7V^@08(&v2J{=m4my+JMB=& zUOrK|aU8VnyPh={vts)&(yK}?ZlL4QK80;ekd9mG;bFgencnO1@{EXD{u5F$gbyI2 zD2+IZHi7?q_&j7de&Dc~6jMobO|&t%LEv+-Jdf4vlTJ$x{??o<*P zdwPorWfcQ+gR%!>xaC;K(E^1Unb1AHU9s&&kw>8dogN>g6f0t3u8w1#O17-(ICiu5 zIVFNoYJGH`=C~}!Qpyr|!i@L&D^e?AwU5WGjnh-Tb^7rq1Tn;SA@}aKZhp}g5x72~ zK6sbw?^&cKWFh$(-xfx+lExCkf4V5k$@fy-Q5aKmmsAzEDbUJYFOrl-DHE({md92c z<>r^t<_VX=Ir&)xM>n^L6Ye|+P zmo^+ZjsYjVyEpUAmbwh~@0BM$u7qb*Wa&1=7};(o0D{yD(m z<^bRHY~NF&Q(Ou?V)7Z29KJLkU5)Bxjra78 ztp->XWM3)Li8}tsS#=O5^BA6MP!Jjj5Smbs#S)O&AqiimB8Zsx(9V0PK++6X(gUMU zJOXHgBq;@ywqi^NJ%o@$6gIwMlZbr2PCxKbf>>f-7|^&PcLPo*Az+0rv+LyXi++Af zT!_{Pdemp;z@ZBWnR2b8;zH7n@D9ozNG8Nqr^e_uH`uA7<@h{~<$`gKG7*BJ|fO19;4u;zt*~I(F$ve_OH&#L-{#faAlgnIOknI z&U<=n<-yI;!YW>8QkUfDhmXN|a1rcE0{ejzVv2msZVnFNgd;SipMaE_Iq7ZfKZPT9 zj0@Qg%rJ*vr=d;|@ttUioLXORkS^*jxwh6eYmWy_WB8hhr1;vX6t*u; z4|&Di4gI6-`r-Rb$x<`axKa{oxa(iz53mbv8nGvs#cSCe+~`Yo({#98IZF=hhXz?g zuSVGOs0VCxn_aSkKbV%j&rer4+19t8IdmoXu^st-5&E&JUN&^?yzd?F0l<@C)2B-I zjsVCbvV^^{gxR^}dcRvJ+`djBwuFWWM8GpUlzSIwpfAlNh)EYDguhx0sD!|mLr_IM zaD(7HLc>8g3E!|R@ktLf9I^-|(O7v*`SPMl269U4V;=TMIv=Ie^|k_4@sssO3>t^^2nB@0hE0XLsNOR(MmV4|QVGD3`6HXi5&uG?@ia9mz%lr!D8&g&l zmf2RaUvFfRq+Gnu0q@;ijoC*@s3gob80$jk(5MlLU^BtBoDLGYiazmlP-TvlWEG9p zG%p%UIzDJM&qpBaF(4j(=oes&gixzs+M6UIprOJhNZ7+csoBvKCh>GspOwRK{EqZU zIFPM_*(c2p0gk-xUvh)K_kA`Ltsbb(ZE9~K=D{b3Ajp!-rXb9M_6`)3&>n_TqEd?f zk=Tnkd&%O-GFC@dC+TQRP~`S_IY( z_eSn&^uFMN^Gy7L9)Sh!6A|m~Cd1jq{NM#eG>NW(-X~wH+ibn2JTGtKCZ?zDd#_Gp zlQMa!xv0@gd*@>2-dKa`SL{J2#x;Xn6=@{t$ zTKi)v&%f?+$(gwtTdIkeSs7b9e8}MAWaZ@fpYi|oXl6U*vl!3vYVnD!zK)Neqx-k9}BhY05Bk*v$EFK=}_lFn| zXlPLAP9ZSOf133#sg-1#JN3*xy?^K|&?O0@v}aX#pGc1Ss3BEl0U|mOnO$vYK-snJ z2t3D_Vn6SY9T+A#x`QfIFj|;DpGv_eeBly?zyH%j0dAC{aoK#EpB>g5{&UnvRvWJF z`ct(mb>*a+SC&t%PACa-(9+3A9o_V!8MtrFo1g#bA`s{%c2HgU%BSf-Zj)(xQ>kqE zSM#~9xuKh+#s+pgALSlJI?mh{_$l~ion_~}nCwTVp&2ofKixKo4?%p??_rahT>>0x zT$9vXZ_3>NimrIOzvp`+NnSN2qXO+lP4OB0j33wFCn${#{Y zxgdOf%q}gadZu>2Yu;1(;y~SgF{%VhA)CAKelI$F!WEqH;Dg9I@~fHZ9HjmJT=i&W z)&M?VceR`a=+c?9l`Vp;JCWN-KaurdXBYo|YtE%s9*oWu^>&}X0R)Qd#kbjmwzoMl z7fT*%@Qiss-Lp6o!Y%C8?X5x98QtzLKUOt7+NGB78&(QjTSOyO1pM)meOFM53XZQ@ zen+N1LPG+3%&Ao4c7D5LDrx9pczT(|qp*3E)*h`iX5{i$NaYSTAuB0pNkMh0OD`Q~ zynNABEZ`ddt|J4boTiM83$TddqC?0+l4n_PntRZB1a&{&I;h(O;-XfvPIDHQ9?>K6 z2#P0AHFBL79kU0pRj9m*^&efTVA>RPzMN7UNT17)b-f2bZp2`zRkIZ`}cqfUBBnlhKC_gJNi0QbFHZI(F*vH&;Q98b?IGX! ztvM^i@)|r<758yFrA>W5evY@91ze$a+x&F3MgCLs)f53v>-U=yqKl_~ zd44!!Kq!;pJQ;VpO4ALQu5M2Dj`eh*0~@(~!E&qJzElgfj9JvrpAEBs0^ppP?MO+d5G#(X-1&#<1SC{lw?|B?5uV&!0Jo+D}N772o-fS|nBFyaKYaHt_ zq5^yzD`A84t%0n;);C_^9doBaa@~`DXt7y&8%jO9ZVy&$QW@Z2@j5BerOb!){GFQE zjMSZmy#7d6s$edo(ldiD2`Js{of1Ab(z#nyPdCd(705#Ot7wvS3aSmS$P6)Gdt;{W z3Z^&S?>?=u(gm`WFdh^wvyWqoBYSLQHE$F(JdsTsG7rerPccFch=fsCwQ4(Iq;7~! zZG!!2SQELQZ_E#k3H6c7M}^L0yPLqP^VjdcvhSo_J^5bkwesepD$3D`b5RNMRO2hl z<>Qa8>#z(}MYW<-!0dVt?dlhV01DtgO*>fRZZE%;2RF!Eyzc$tSNN=Z=JaJOjagB5 zLP|s0V##xvJ^1Y1$ANMk3uB>%(VGuffdA2_awZRpOu^K$z_{M9!}$p>6&+Ke$8=nI zS%)?A7isEqqtCPkx-^%XXyMz2*9@i6dv9B6v z*`<>_B|W`@U`b1y55QM?%%YhobpK8?=A;Bl8%7*y^5N8kj3<`DgWRK-PG)IWXe>r0O8djb3|3upM> z7;PK&f$N1;TVMY~RRAQ|5Aju4xyY|XFAX>vET8|hc8p{bxHHp+E4QNm0rOwPg2Mhw z?G^~I78i2=yGCU~k~#TOJ<;4hhiu~c!C}%VJU(74X)6lP7XNgJ+^qQYk%X`b_@n+g z7zAux48&w&{Z53D;7@fy{QkeQx+@!bNRnvL#Ep_9n=ca#k&oR%R+#o0V?Ha?&yR62zwtMYQ8z=pst1eo7 z-n}IxByv7n(=<2QXuT#eG&H27t-YqFWwB5(C-hvbSP1Cy1!b|_6rhFhd3!v#Su)LJ zE!v1fJuQ1w?mk<=E!q{rEGhEhV@<#;TI}tv)Ro*1^7u2Y%j!WooX!u=_*hh1**P!x zC{~S-&-H>Zo!29=yQjzO;bLpw9TF1qNkc#Y9j;ZDOKQ_+2XS*aqI8m-__3MFr2l>>)3B4V+wD+bltJ4JG@ zCA_A=B)JOr=QjKjFQBZ9-hMMJsidT2^c4vgSF+Y*EN699%l&aLY3#uGpyT;0SHCBi z)qMksz*CP+G0~&r?a9LZZpqYar$4gJWDJM1ys-h|N;m{zuC>AONWuLuI|Q&lnqmUV z!OA*!9kh|Eh*^;W#QJamAML?BRiCJ0Dp8smr--F}{{QVpB&yB+p(A8kI zQn$%%Hci@c)&e}-iRS-0-%{h8IF{G^@^Zjm*wn<~4lHk^U}pa5>FHTi{QUCrS#oXY zC;3d1-Oi!z`x9pM5i8tc*_g5ZJ)+M;fOSZa1x8mnoP~>em-nV0R2jV)=tCs?de+WB zG<(zjNvQMm%l`DcmuHND&kG1Vf=nR}>yrj8?;}O9nQmnP>TH5_nisBU!whsfWZ3hc z$#NDij8=c|gd5=Q}QFTKmDTyaENhFU_pRDJ7^0E23ER=8;cF)O>Y znbuQcjl_KGTs-`%1Gpobdf|72H-j9gLS)HqZ+XU9ZvKD{wdN49wd;Uooc{gZP}JKf zTEtKAI7V(iy#74&hqe$RG4Y|b1hh$%iUmj`QU~Mdxi2rCVPRo8Ue9+&7RJWL%LDA( z+?oUqxYBDbn_ZyVvV052IW*&f^eqt{kaT~-nlY#R|@{?z-z01R*PxYFmfl#RW^^)g4s>n4EU#n<_K zUHZfj7Xw2Km(6l=zgL#;kyP8R4}JONF+o))+j_0VOuA}QpSG&}Im2}urr2OzBG7Ce z-+>#iJw=henqjnHWFCxjgTrJLYq{Dmq1(U%4LOFb4-&_XC zqt)ixyMx!MXm61X&+AA|rt8zj4H>Veo2hrTZzj@o;(jKz#u#zx$;J-Gn)h}PCUmO} zXvo%X&p_SxR2`2~)~C1U^ZH3#9GnsoVM*;J`}dRcR@*HhS`d1*DuIQA_-`~1d=Hxe zvz9_6FUv0AlEcbs*m#yC{})2D1PFXUN1FT!bLqSrg~$E_qxqjpeX#i|$q#_n7Ag8~ z*1mc4ftc=3sJ#z=MtBg26BOSI3ru9HKPfE;v?~`VMBCL3`H##0;wzAT$C9k^D)w>1 z=HoLLF@Cc>0~_1s18YGSD|VT@CJ6rw!keNHbXqOnYzSWCJ4Ph4kHcBrP11`{;o*;E zJ8`|)Z%=S`S1n5sCo}QYg%owXLQzsV!-#E&WUAyhXDM-|K!5?qR;Ek61B5hgA4NVKLhe&lbcyGg%%%)F<5X!?cwL@NOGi zaB&yHO>?lL1N+0h+5DCz_KHX0_WcDs9*IUeHRW?Wgum~*wrBx~DaO{H6104RWkhGw z#b)4}sMIG@9m{f9>4}zlchT05 zvqeWzReM2*u9R@-Lj$dum@+bd*i$7y(BjprHzK+C_m_1+3>FeLb2`5N5+pH9!);ZV z5z0n6W1J~}KV_(h3Q&6yaX-3eFY+j<-*U*bD4eca^2rN@=_z{eQai4!_{AyzZL6n$ zx_9wxm)W+hV}Ip9qs47G_vWG(0*8enG3SZ89axO2WKeiN-}ZUT#$sA@g>do{#1(be zSi9xunlQM>&CI~L^@^-pzVhzIE7+Q=1I(+}%|y{dy2P?0aX4FEdLGaaD*3zz;csu= z*A(@~A-=@Hc*=-HP8?<-R*+_pko@s{D|izP<-2gOwCah~BEdiLT$6E!d7< zk&{%9Pdx_UX{(ySod5A=gS!SNiJT8mP#0BUNJz~SE65@M z3dC}pfXNc}nzq`4UhUDfgGEQ-uefV8JSE}U0qyUer*dY&uq<-fK>JDRe^d(3i5Rl) z0kAjnaR5|f``xCm6~kmcVY1LU*O~`2Vc?Jwdc30Hy84;A^UF8|6N37$l%pFW;`6ix z(lqFr-#S_I$q$wMLd_qv?LXF zPOXW}ot=#-Ag&AWb&7-Vi7f9a^l z%2dnqIr14(o+@dqG3>2K9_dc$zPJ2(+F(j(NwUsAz;_GG@HU2o>rNu>HV0ho2^Xv0XFP2f3r;z1 zqF98k%*+TOViNovp2`IXxv~)3UH3@pS}zz*6{j*sK4L36QGl7_gc{B)NewpdUG|Y| zyVy*rv{A_&nU7ESMl>oxE+lHgCsmkM`*oudmFJSjP{dLTr?s{M1gVmo%9cS|rG>MF zIlu}pbf?t_Zo8kaBusoMJqsQ3nvhdgX}GaqynWG6K6b7*9_d47jq~e)o}RP3NO9eT zf$!q7DHDc&XGC2d(1DMTGt#dW^Y0ow)Ve-3@gBfsL%*i8Z-JXF%uXI}80hNI))Qd0 z2X}eV9fDaw5KQL6bK+2U*~E&FR;$N_ns{7Ialtpof`;8Yqr6q0ups|IxVWf33b(iXfNf7LB16=ojxU3Do@xa08j~!A(PY*1n^yD}|{{b{s0w5o; zQ$P%pa8d)J_rz@j26b$$PTb}CCjrhr=?S={c8;*}4>x6|@NsqBZpOcz^bfFO1$}sN zzuoEEZAVHtVd9aTImzzKif$9s?a%;pxYIJjl>aQEcL8Ap zu7S7FSg1EDU@3cIz$HV#-kdA~1EV{1$2LX2879&zg7}-4Hqrl;3iKKFl5!FIy=ZK)AC%cYDUJRk|sJ?`E{oZz@qCOWXC<#Z~J&??PTRQ<` z0Ml724~G5EcGgSs?IozzL`E0rl7<#|aQX@5r|&D^0F!J^Rn$}z-a*S()7aA}9*7HC z85fmd|A4Oq@z*g1C8~N@#B{tbFg@HEb+U)mM7n?mgLd6Rj)n<<@C$~JNwqI->Xp@v z(Nl%D*!o)`y(tl-P-&M~bD<$>UP~~mW*^Mz`$R!)0jvbxbY?dgPC=KWD1XNMr$Aj< z4+AWEXo`Q~zc~WrV~0B~f!W4}>PK%~XSOv(6xx+c>ObuY0H0q^e#%%Uj7uGrUurm} z+1-^5;HV#4=5B z*4%zY&!)@PS!ddVi9|d#aR*G=LYjoVT94U!mgi7xmeEdqkHQ9;{ANjZ`QlV(wG-D= zRu3-tni8nmgWy%$PY|ma0^Y{e_=a|7#>DPE<5IVR65idIrdCskraSN)8##MILo2JOSgR(b(<| zZVpe(&>Rg=+aC)1jLMJw_7`CB5CpqWp{r7mp;}9nbQSPU%=OfM2=|=j2kQO}M*uLO z&h2sC-8Nc-gq*z%Utrp5t@vp-Jo2WH@u(>ZHau_O%TrnVOup#mH%YJ5#ponw3aLwY zEc05TjXmx3un1B6!yWb{>1N-W1s$)Sv6wH@xV!R9@q4pJPdC>;Mo+@+5agQ{hLpNH zSOBq8m&r5I1z)Z10Pnl8^ePJQkvXRL8Q*v!kSBf>hGggn4w226l?C~8SDkft-o(tGt;B%M-0c|9a({WfY)>CDQJ=6*AcNJd#&KX4epGd%x z24b2iV222%OE6x=jr4XpfMcK;6tvMDk9PINhu~GmOtuGB5wPw6S1h2cbAL4>k##CF zW5O@NPq2}xkT{XsSL_8CzDc8$(nf4O{?xKp!sqyrv6bksN~hg=JiV5sh1N7oPdm*E zs+1OQj$(5u$_YSXypI<-kV#YLsHstu(o%|uzUA!0LFh6?`t-e ztrXuu$@{>8rqByrTZjdILAt;Ld&!}sV}wwHXubM%5FGpOqM>ggYRrw;8kb9cSh#Y$ zx^w?b;V$dWhU=)tpe0BB^e~mg_;7CNryDcd*s%=Ck9{yZ(SK|)@mB4N?{0^hk4`#s zV0JuRcl6PgSi^K7Fec-d)@;a|i3stLs+Q~|o~c)))K8{ISCY*)gUJOLcygq`mL&Aw zQ*${9nT3;U9C}L+-7Fkn#M*CJ$@TL;QArc%V>l%wmqN@h>MI>cYYD4dRZj)`a9}et zP4x%)E!yCo=Q#Z zzx=I3SS3nWyZYk>4|Or=BUvo2DhZsP*qiLaf3;K|Xl5g2V4z8c9kvRMcM6cJuB2!& zm0N=u;U!b<*Tz+KF_CMVKCUR-&E3 zkeR!89jT|O#a$$0&uV=qo3(&JiuLTw=QZ2TIy5vqz}}(q3^XKy)?Cx7`uTQwLu?#; zD!gMlmVE@XrGlRIXaxd6F3`0G^Gz>w7}oHVYW=sKDo-Uee-C?;MI)8dEKGN0E5Bs~ z;=i$UHM#49k=ij`m*FTu-C zQWDr6WhE|X@i;Q}d05+3;4C9XuTc3q6IrSd!SDrqgEpwhS`!o5=@-#u%Rka3_uvA! z1S>wp?-R58Uvh>2V!TPnn@uEIV4f|g7Cy~{8uVn#CLqiA*m#E}YCk zhniJ^T+?XMvfBA!ym5>QJS3o}$wQTB;;Zs3Mw!h8%tx#Yia#P`ayf;`q?-Y@pleNo z5FsIWLc1#bg+D5i56}JLch2UA$Xmgb64Ua}9YiutdOHQtObau+gVk!-4ZlAX{u}qZ z6rq1);eJ^R35G(P?cp;*zd3%JST8iK=mb6Ygu*v>*RSqT=RRS@T%nGi=XYmcV>s_r z0+bY~rbBK(^0$V1$Li_NYgGo@A+c2PTcy7LVshR`5n<=&(hQ2cDioX;&0!vvz3+Qp z%CCGV@@$orCMdW!41t`2(j6=8OdKy*QrrP7T#4QcsyGdiM;PRKQqX>6%PeGa{^}K{ zvWljollRN7hy1Z7(Hq~dJ5$9Gz540{0LuiXNWcQ=0@V171c{)zuf~4bCFAkaIR|ec z)YyFoR8I$;1XYtVJtyjudT?O)g1!M#w(fo}VEnx6`^h|w2N3F-VXla5|F8Ze1-yXQ8EDxuw| zhs8eRa`i)bHw|yc8f)pO_#UAV5W=2x85mN#&)?@5BT^$9j*r=0=)Ip5>-5}v7$2iu zeKBYlG$a|K+mGh6AFWamEBt*mkvY`tg`x`!c;u`s22=D@R|N#Jkhl42i?TU?oOSgP z5SU^CLvSZf^09@j9qfPrg#Sy6v&x~9%|AyHr|~*$;dQo7;1}er^7hV~NzPMu(i{tk z%V5r^^-yUn^oR?Cy)Pz<;6@)z*Xnvp(y!VZ_Q1+ygu)&PBEH+!y&Y(wc9{I0d-B#L zyVZC)($Hm(dVFi4CWcr@w==1{WMnO5eRyFD!8rC8;6{znau=7uHW?Yul8d{cjwwFQ7kNVTSy_r}+zbks6mr%Gta_LU-@ z?XN!CT{_cLY1}?ugsKz!l@|BY>okTNk_871GanSLA^H1O5X4zMF2Y{2#v1#SLAxpCoB*jmhP{YCE#ML-OeKJLUl^ zpU-ch!>~||SeKv&$7X-J{+4>PNckfLwL&igph0NK3cPsL%HnpPN0f@!75yyzWLpG6 ze8+B9V#!gPwogi*vxmLGraZRK+L83?tG4oD^lMAB`>Q;~U#V@zcY)E2*H$d~AoYhm z8nj}{YS`M4#J zZvQU?yh7VXkq%KmOZfQONXCM!#_Lv$}AXEzSoecw(Tt6vSOgvIsyjVSSlv6%{_hG#4TM zHg3H-#XcEu5lvY1~Uy}uB>6l1(8i(=n&T7Pafc=ZRbzQ?$K`ya})KU&k zB&qN+1oeyH97U^PiB(JMT}FXF37y1981xAM21Q%mLxgXd2UKwcg{(G0YiY?qwi@#x z?D(PUkU`lnWhM?wJX#HRXCUt(Co_x!_f?()_?>}LfK4$e;&s`!veF|M(W8MOp$p} zuF$4441r4NG{OyMNcN^H)WMt|xTVwh@xV`0`bRNQCN!@kw7y<9E=4%tP z-{{qhyK7C%c=u`lj{4I*1YDZEKTH{hy2S zfW#p`tBm0I<*mz>U1qfixGO#!wNTG9g#%p&3+lZTpWu>{6F2?@$!x4tCi#?NcZ=Tg z<&n6-nx(U*Xq|EflB7|${U;j&M>{fR=*8&s2bG_`wqT!W+;GXBSok!(o5X%$_8-q7 zlIa!S!r!uEEN~*J02l&w-SXNN-;KxFwh6OD3t-*7cC5W9O$6%4WARY+NQLQ&Ce&(7UQ=^3G9Px<5ILd`Fh|vefZXVmp6W!Q`N9sk1a)P1mx6Aba=4 zi-U2m@)X(e<~B2xZ%m9-edC*)vS;NtSZ z%{Q$k`jIY#@Iu*wlm#upP?CU~+gmCwQrQ1vA-+~%ABE0J%p!he(71xO_ArACAH`Or zrHVCY)0t~2HZLz@2)y=WNs}o~&N-q+l#N~cceUkuaslQaUsMWmxe*Z=a}2J(KU(!$ z8yY;dnQv4h43*WB!*4a#!{mA)*@F6wg+wIGq|p-y%$JsqS!GBkH2uI(eeveJ%E~uW zUVp6RBBxK=rg?aZzvFbSqla?9>7FVd{*+%t_;vUH#5Qo$Fdr$EL#&qkA;$}0v`AvlxKTaQr0 ze3Z&4YcenQ5Nk6E0NXhKtFAbSZ53(yBL7YP8ujS%+(Lm&-Y92hf$G_}S&rV_rt+m9 zOAa!AM2lgG)9!e`)QJhZ9!{s_N%(3}{1QNqjymG)OVprfv|l%6>wRoKYOn?y07{gb zFxmqepQJ7JI3)WujP6RHS`HZ!fDu%N(IQ;Or1N(**;6+HyP- zenwNL>&H2741ULnObv_MemF6-!zA@vI$1bu=^e>lZ16!hB6KHRU}%9rRx^ZJSDVKS z^9_IN{*(`bkJLS_rf6gPtvoMtlBi}x(nhj=aOF3LXWKkJxQ`UQhI8(t4871;bp&6j zf|0?j;=(D!3}q36=Mz~^-A(^oxj6lF<%R4`gobSf@Ftt`FgzXOzxr;NWQ+grbg3;0 z1nS#I;cl4E3ZmunVWvzX-0ulloWg@u{AFE6k8w)I96n7tOHDrLhKu75qu4W+a%1x{ z*}Tt55l$XbN*G5AWp~4s_L8( ze@4vtx3J18KK&hGf9=~QBx|;XIC{I{@&8BEIXKp_eqTRL(x7qDByDV?v28c%iEZ1q zZQHg_8aB3V+y0&2d%y4dAI!{|dG_9Gebzp@Z|!ByZI2L3Va#KWO9!D#HAl$*`>b8K zK0RsLbP+_9m2=4}36oQu#mUhZ)3L~9LWqTZSgV_RKV@qCT9=3p9Gp}MJ;V@1y=pD! zsUa}0sA9{)j6b+W<)bREn3^yL$uX{FzFnfP=r^#9`rmKEWU>d5!s>mf1mK4F@{a|9T0K_BsyUyG(s&g2^+-r|Hd8>rIi~drl$FA_wGY1nwCIJtEIA zEpp?jbD>_P+Rr;R#3h)Uc($iKAD4#DH&Y@e2~@Xec_>vEmF=X(gOpBASj1*uA8MEo z?Fqn&jFOhCFWm*E_=4PzSCgbuY0|6Tk>7vHpcXEnzp>6knS1-cdNdBOp0(&&_0y|p zRd%?EmvmXa5fVqnRLyh(Udji9xPU%9(YJ zx7-h0o2R#r5aM)vmhliB5Ori)5ZK^OOvN1jw`JQBG(@(+{UkxEl&}$*biq-D?R9gG z?Qq_VJ)ki?NQ(ZCt#xgcSQGBTYY z{8HV8jQ@bN0nwolyX0+C2dRTgWHO&UDxso1yTo zqwfRT^#w#3lP}P2%cpP_M^Gfo8-su<>-bqQ)EnZK`r2=J)16m3fQ;jx$#g+Lb5Y$; zHoi7D`yJ!HG&foFRJ-X%(3rfR?5}mj<3Znyg0G56(Vzv*Zh`_5{%L!H(*?R+!S#mQ zKYURB98a)mOYP(bAJKP(ycgbftMo#0IEfdsy4rP-51zkL=;K}iyLnx|u8oA8z8=cd z?x*8-uxcIbkI}X*npkNu?5x|5{()fMF(VFZXhjLfoifly$?s7qXEe6VtQp}0b3*?n zk4mcziP5Z&`M>ictb1Ircek}@)W$z91bA9?+pBfuR;4F90$uTD=uE0b@iA9Ne*`im zaJR2Kt_5)|>>ZQz?<%p!*ezRVI10v2&0n*`$H(if2F#bMRsuCmjWDG4Fl@4KU@Sr_UDns;` z>PWNLZo&XCWO*k<^2F)}?&IUJM?Zoz<%Lg;xot0h+Bf4ANZQ?WB{i>lI@2^!ffE z5l^mAQ=gCZggr$@Gc{>l8dWEfxj_QEm#F%4CX`)?Fey)OSj&Gt_&*-MBh;tmvEZQW z=R}#x7rszW__OMinc#fP;Ey0UK^DHk*l% zkKwfCtj^~bsMX99S7nzYcf97jA2!C`xag`~@MDQ;Exqqe8q%2X2$da^Dim3q1hMD% z99dc&H)&8s^RUU-MJm~+TpIMaXwCi0YOHfZUnBFnYqTtg$mfIjH|8Z(#gpPa6Ay|w9RZdIVdcLQzJ zeXCpO4&F6C(z8wRbrILveQZ&h zDbx}EaCNEYNNA69tDBXl0kYyZs8fsS51&GzHe!bmk?=~&L)()(bM&GbLOJhX8~iTK z^DOpIzq#MgeqG>;8yKt&Dh;D?1hN!I$?>u4f9SR3DfR^yW@C z8GEsw0VY`z?;Nyh^kG1D_=CON?P7lO%QK|PX<4S{eZL;9{^23gC*0C#L`Tnz(NWyL zYVvNpwsGl|%gH0}R9ounIyabl;&3;5%io#FP-Gi#T-BXsKu7bV{wN8i+oJ7m{g|qM z008{s{`hpL5j@hAIeB;WKR<6>)F)8E3;$O((|T@_ns#bAWS3fcqq1yjGffEDhiwps zpDOacH;yxL{h>zZ!(F<%)6CRPQ1$se@tv~b{y^GdEVCF6PJJoS#iOim(Bz6d5pVbg zYyFaA9Y3)ojv3!#Vok&Si)GZj%5ry;DR?Ep>+?)Bewqv)u9IvkFMU4d!Y;B10-;pC zOg4d20-soFkOVjSe!Z(0!ycA^)ll%4!;^j~lZ&NwhY7iALLsraTVWe?kfsNg{@vK{ zsRYL>^KN8l8Z$uqTSwc75per~ima!SMGU2mgVIwmuv$CQChN7`si-ymK4$k|uNz)_ z-(rS(zM^z5c{@Sg@x3Rq(bUw-k`m1RP2vhA;p(;8bjX1iuWvOw;y~_ngqv@*!St)TU!cy@!>OF;vD9^!5!#{a243v3 z1JR2DPi4)MATd`_+Lt(Hu`$?zK0)?m1XFTs(RIfSVze;Og7w(_=W+cnhCbxR1 zL|T5zkHMMC5Dc!JiU%=|qyrT#3zJLBw*hmo`Qw0=He2ipezJ=Xma&n8Q1 z!v!{2H@zo^#Y{yCe1&ssc74^#7 zh<3&5jE6i`JvEyeaj#!Q`-`sMa`BDVM}|UPS{rZ%%sy zq)IvE`wN!h!^kHo6R~7p=6h%eO802S-!PE{;>y*cPeEc)?q z927tRB$s<6X9Ag!0`p!OxS@LS~m18aGFNR}9N*eQ^adQRMbN1r{WAB)^mf5X9p8V*LGMswx zf5iL*83A0k7y2-b0{V##AgLZ34X+rppTjD|a9n$YGowd5z|<-G`C+1J;o_uAVC^Uf zcCSzHNKO=S>Ilx+2!lDTYqCyeX<~#;n$a@VPzv|NQZRy{dY9%x)_GRJTYHPlXyP@Z zRp#pgs!ivml)&u{K2mX-zxJ_`n6)>^+!WmqWGms;ZhdK*nfGiJ=7l&K%y*YxK9Ew% z3~bnv{+z#i((~;;?Xuw+z{qHPlcQh=hEp0sdOY8EaSo+d??6g5H>g zVD4tvKbwVLb=Nt@layTbxWVcOOwq=t1Chz57c&L5U24QDKy3t`J7VHxgK7N-<~iX` z^XtzOMM~U=i1)wW$6xK5X*8l)dNf!w|I~HtI=;L<{C-Me%c#wi9X=t8fc+cZuK^hM02M(!jSXFng%bbzh#*BREWJ zJ^~rG);(unLdBBW0aH#6Y0+4y%eOUz-;O5c-Q$ay?9NfS$FMc2Sz%}*X@ zEO`46VO&0cLez=zTn>p_RC;{L9*~{|&xHx8WO0?SH>gBtn#(SHVu4;IK(BckjF%`D zOT&maIY}a$cyeaY%KA)I=BQ8!o7^}Lz}m$0t)5UVt2(Lxl+yYKNZ?v8UT;7jXq>Mv zMOR%YCWDV)?+0=G3t=*ofX=Bz!}sSr9z|7F&v!cd)WzGU`4ZbZtXIIX0_~;N(hcJ_ zJR;omFVq7lFFG#_p6rfT_P{Ud3ALnvQ6IBm(9|WBa54>Q4d=`UpZOa`p8|spy9F9g z(X~2^eOv1kTj#k!)zD3s!S&=tJu83fLS1(J@#Tx66P&)&YEh#g|FO;q&ff;ikYX)i zbHzY@suEn?5mM$pDx6@F4${fcxWorVv&4NtRz_6VD7U%svo;igX;k0GJYE7Y+G{Zb zU*(vSOBV5HB9Nu)mFOU@Bu{mjin}Xj6MUHdRQh{hq1MZF!lr(Q+QdF+Oh;^L*~Iy$ zq=E3b8s?J^Hx1#-sWc8rq4+q(!A9+7ctAcE#pKpI>SDLQ)<`PL+(54~6TU z^gsC@!Z~qK#q_KsbN0a0l|@yMX#a2uatpQ!+HE~xO@z`mcRan(_FOD2YmALCAT;j} z0n2}7NKf}v-?88{NAOdp@HAWSw_rndq=Er=c_n!(irKAV2p<~ zB7;BwLK!tQzsM@8x{*>FMsn|8fO8-{a2@ds3@FI~DDs5hWB&3(iQa5@sway^g z{};XaDWzkat9w0~re=F$DhY#b^D4VbB%kg4wZKd_bb`lYDbsB$0aNa&bJ%d`ibaih z^g@G|9^78KfpCg$y;gg{{M5g3R76D72*bUnwUU)YyOIqEblvhD$r=^xOyoH6H}d*( zlgOIx%2mcD0AirfY;0g5HDq*#uI3VAXC&BOZl*+^KLH-L#NCtps!Y5@2%L7k0Q0el zx>iiJ(Uz6;+WKfmVBSKHw7k|8Q8%vE)E3}$B3Y3*`v1l$HZ>s)e$P}-!+as-uss`# zlSyl>`I+WL7rc+Jl^b1*C|+UALN=943YJuuh&lKi63P&;A@{(Bcb6)Yr3%B83SUdu zw-IGnj3@Di6pDCOiIVMlsc(KMdHPpm8(*g9dSG#?rmghWWv(57DBO#xrL1I0_pa=DJFhj#s5vwqT$tA* ztE^!K1sY+;d;-e|CQ-rR6o5IMFOgt!Qbik*x$jY8k`@xc`*U<%De0WkIp;e^*aj93 zvx>DM2yle01$AQ4>*G#Clt`aFj?;fpYZvHG@8bu~Kg0}qRSDj^qF{j*ZKFlg8Y*B$ zwC8^{Dp=75pogu?CBJ>qT*r|AT1S?{3#Ip<)%7RO*73Dr|dR0?7qaz z@L!TaWHTr1d?HX0v(NxrP}ly|wEs3W@PK@$hz*Ot*3MvUyACpK*NZf9`wyWgRG8cY z{@%>pmq99&_#;`Cpw@b(9(pz^vNaryk@PR=-1ae&Ud|V+FE7i{L6*_J-1zQyHl3~@ zZHF}))6Ad09`-VPCv$DZf%8+B>s4ob+FDS#HL`C!u%b2Oes1oKeH!oOx~?>dSUz%z zkDX}r&qMlYV9~_(4)sx%F!cqD*zBi&(6dV#@H}}#K8>l6u%Ox4)DRF9n{Si~p|aE# zP20ZcJ-Zy-kj&`5KV2Ct@ls-hk(uAADmqDKI><<{uM^U00PQEir?T!}QhAYqc)PfKEbYhZsR>7d`bT`HCEl2y!oW0rmIiCGzX zAx#c+z}%#6AtIY#_$Dt{FSvTTKV0T-kZR3UxUZDixtn>pMn!eBwBi9dH-!MJN$fTEUpEOOylI|x^}&!!pqg^qUVc~ zPk-cL-1w$&CxiU12Ug?5lWLjFlIC#_5<~`RUv;zA!WmJcDrX}YI-uBV)6(wK&!#H^ zL&Ds`OjX|Oq^7`^6Oi9psqf2!Y{TK3x%B<^t;=lfoQp4e_tLC%1X{0H-~=--^Z3k- zgQSIZ;X;1Jmt)z{hW0cAu+h4^_Y?!!hOBsq!5X>pFLAIM;^WbLlUHRTD8jUi)oUOy zJm0P*ohycTzpz&K1E7Pr$ z^_$LjdIb7GY9l-v;RZScD)kRQLm(?W$bP&^1_Q4Y->jAEniHt13OSOE4{jGTBUYa= z1ggtX;M{8y@fu^eCQrnq*eIvBO(T4~$Nci}{>6vN5O^PO;Lq@{>&Pz3U*WcRT}B(Q zF(69%OVkxz#Nj36cbl5R**4<~31~c>3}u?@^Qss(@f2w>l3_dO+;d_#DTG;!j4Us` zu?QPOsanY=DeV}pa7`jI+QMm=`3|%+Ev~h6SKnu0rIX`D+FFRu#v3@Fhh$INYT6Om zJ+Y5{w_Ewvj9uOlUS3cp29hwy>Hpy@uLm#tTHuAg*Mz?Gpla$H$s%TL~bTLL& z)_$vC)T(PXmdePwM>^xzkl5zQ>MG0Wq;Nrw$nu(|ImbXBl>=g11GE>eh0O_aWL(cx zTKww5W&YCoNF<2%=2UgZC-eRlxEUPRy2icHocWXP(%B+#Hq3*QP!%6z6BIO$aqnBP z?AG5ar6q0x!~*_%RX_>E+!;|XXMj08F0NoecIiAs!lYSx*z@WAT36v)syut_h0`G;xV%z_lQ zp{715fL_ESn^Ag#)XRGvS=x6i&e+20R<)ZGN(R;2U7lTtp6Xz33iz2f z*oeo$l?1ot%LyDV<_tG&r;KMPsFjGSL)rSPKAkV0-h(!(>x~r8`?Lpo_yVq#6O8Gu zuRH1nMy@pqAi@7WByRRDx+?r#?{LVJNX6yUF7xUVo$15Gp!I`Kh z>kx04J&=0d+;1W2;W^OvG=(eVCCHRil}pN{c*i%wA^6x>;QLP)6GWKSg{)-t(5Ii( zU5=1}T(fH~i9E^`$>?2pP200AcaB4Jb4iXvW&41QOfwG+GK%oF%+JuFGZ01;e5*~U zUXBN>ylNvG3!(t_J#g^LC&y31%u}|orxfCa?tqE>#-S&yo=(0mMX*BoA)lGw2kE~$ z({-3p7WJKT%bwKUom=icqcahooMbuC2r!3zF%R5W_W547JzhR$v7ULm$is==4?^^r z8L5QDP!C$&6gTPR+?_ydIA{=KPWFK`+unesTylNBzXz56@k*I&k>24Qb5oQpRS&OL z9%Fy4P@uKo;vR{74DE{Lk;>UxwzjT;Au^v)>4w^S*EDYvh};;fHh0ZyL=^TBTAkpk ziR)UzD=}IN#B2A0?BumofF_#cRYbQm;a?b6(sGoC{$@TtZ2J+m)DWm$C8()+`WzkL{i!c?!&SFp9>5`f$J$^(jO)mJ|I9n{Qf*R9)@9V8Bpm z4G~>R%>jVs>(%+oO@R*6=8%at`#ZuUK;_m2C{}!;28u+qC6TM$@Pk`;Ie<{L=nMb` z$k*jy4sV2JqCk%z)fhE93l>}Vy63Y6IgyTaZCSB#7r9~bkyibG z?xZ8u#{`)Ra6@#iY9<$Nyk&0sEKL>j6%zmhTpd@YdLJ5!Zv{b?G4GN6l!=SOM+R3M zwA`F|cTJ$`9H^R?iYP( zxRG_Wxiw_Tfh!&;UcvLGUQb!%JLiSLdc}u{_84v)ZKS}<4{kGzHO^J>8wV219&3FM z+kedU)kJ=;Qj~^T9)C^Mx?1?@qc7gtrd)&;E$ci6x_LA`M9`gefC`zc!md}m%^ZNG z{3GsY;Ad+}i9y*TP1ZfptH1pX&kl!fLZx^&ql(`irafqnW=w}zIv4b_c3!a+&We(< zhCRcltEao0>M82KhNe+HF{bm}={}8riZpIaiGP+Yv3XrH^D@Q(3=P$}^xRzLokzu6 z@)PMu>DPDHx%}JHKiX5EnVj0!rR`}C8%HUBB}oY`(kTG$Uvt(>lb)U#S$z(z1jxY>1>e@oJ5238`=1nb1Hy;N03vMCFvF1VxG;Xvl z+;p2P`}&Y*9aNDHwRh@vbd3M7%ydZHc{)063_TemCqNT8Yh=sG>WeJ2k zeUWU!ttSD`{T3M&qw=hI*cg!Y?9d|W+qv{+Rm^61+gO4H@q$AqqRqeQ>P@G=IV4K9 zg_=j(VDX709~E)OLSS_R?Nlo1UMyti_6aJ3>am&?2nr*c_{(E8R%!${R;4#XPd-P! zH|*W&j;t&j;Jm+{nTDhIuw86Vlok|LJ>3PylpoNcg%}*HAJ|3c82h1o78hb}v5HY87NJ1)Jl% zH=D;!13vt`H~q@Yun7oTyH%T}vv>bkZ&DS5zc^nRDh=7{VBBbL*1gY&gVEyeC^Eg_#uy*MGfo&-?n$_=-PTjN7EU#hZ`KjmMlZzInCZ;#=Gwf5($HnhV z=I8A2(9dB6eA;Idze5j`Pno3wswdd9mF2Px>lpgN_;;`&stjU)F8u=CXBz67J9AEF zl206u=Q6HCf2krAU_k37%XsTZkxG(?R>Z4acB@0BR%{n2fBbyeReIZ(*-1fEA)4*( zu?Ge6qa##Qym!js*ZPD)d)0R713;BT4*cbxJMMz)E8N#BL}5??=WCq_v2KjdnpmnG zwx)sANo4Q)V-@3YS};b&K|qpB?XFei&s~IxF=ITO7X}A>SyDUh-Own;OnER)L^Tgw zDZ5lrxfunbY+bDUn$?^^x_)g7B38RX1@FjfwwUbTk>TCDOZnX@||J8d$pXS3U~gq6*$@60Jf& zOBRMeslWF0`0_?aFz9X-ZqGVi=b4~u$PIY9Ptpu6s^?Uu!i&5xk z2IhhQNlDKTe=SXXI6ens_HDespQ#BPc!$ZzoMq`+p!fl_WL$~dBKN7I@?EF2uozU!SWT>`Sy0c^9MwLc!px*n~{xOl)^0d|T z)bdv`p{sFpvaGtm{RPFWuLDa|kixy>Q!nh8YwK@o4VxqTWt4Jb%-+B2e%9`;tDBy` z^_G@@2{JK#o~H5_%8x`v9;1g&**@r@uC6FNNK^rQ1w9D*Rm^G+;dHZ538sTZ!+P^5 zcslCGRq~gpzZ!w{L(&Oh%_cRy4o2KlmK93i?U+7a?%)9S5tfH1-LyX&7WKw>RJyxe?wca(2tDp9UiHe3q zjTF?M{n-Z+_?da-1FEvnorVU{Sf3-|BO`HP?>Z9Z@J@sYZ zJ*g<|iittK9Q#wcwtLg(2Nv(2Y9C!~a8t-PpP<_%c}#>Y_mxe}5h_yL-`*yPurL6^ zz)V!pohpVYWlyMPla`DJc9L0SFO=Id5ei$Tzs+m|=O^3Pp;4x;X3}HZa_3VrT=7}s zp$ft(sqGQ!-c?l>@r$q_Cg^^>2PTZ9;)pVQi`(KUw#h@6UKZ1;FmOv;eyP=@0!x6! z*n8LTGkkwNNLsn8I0&3-OYuo5s{{M}smeh4ihKcUnP>hUmuf8MDHxE_=z%)LM%In& zqrpwWd;t0B3cJqBm2)bgOXRgXpEir3`o4}}l$4EHT}JaXoa zhVOB2^u7C)t=|i@c{h9VcNvLwuHoALDJV`XZ#020)#P1!}KKj!{ zTD2h#!+N9Id%<>c7({C)&1EiQ)*FA&1xYk^SXW}UWt!ozHBy2Kr)3y+W-In53Hv^9 zNblOZqcPCv&|q-g+wj=NH?tbBal#KrQ!~n^2h-g#K~@oGJ-)U-i!wlLBu3;hJQ4$; zt5S?+Mu#M&=z`)zyRzwnW$k4j=X81`8~mkyEP4Kddg`U(t`Ys!??X$)f7DyC`LM6H#)G_&F|AtC>BM1z4_O8T%w;SfwQT55TTFrcGc)B!cod*7~)LmH6MR~HCE%WQ9T zI2|DD%h&2Kx-Vw-35LysQGZGaPw7mf)+^S8t*IG`O>7nChc|{MDH@$>EE-EXyM(&{ zjC9QwOBc~KyuN^UX8&?Yn~tIuvr#sRXce@^e%S<;f-*2O;nZ>iGS(Goy$5^NwZMZE}lME`n3QZO|GS7Qz6oR(QB>(_binrZn2$+tVEks;VWA!ZIdMX`t_0{i9_ zI$+*b(=HnA`~qdv!$-Kiqbh%^vXjYB<$k1ke7{3%=TDp~ps8z{{y6)QC)yqs-(lRp zH>`Y?m)S%Jl4mX1^D)^-3A!>jPFelF*3L%(2x;NGuXsUr>2dZVlv9Nqv)Mq@ik! zoL`fL;$RU;X}saAssWOw*`6UF@>&7EE@y4-AkM3_t80%-Ez2$h1aB%jRMR1NcJeW*yS^uGPur&f^@n7X$Zj(?sgB>vn!jaK!B4AcgZk`E7YxTS4D z1%i5nsg1w4$D_swUIcVNz#RPSm6QHsort66BNe#lFvwvWn3& z3Dnd~q|$iNh|qtRO}Fg-D3KGc+;3)m z6HG?{!~J(J>mmt(t;k~@O@-ZROdjgafwNfTS8g)yaf|0sze30wJw7!jP1M{JMPi;V zy-u%(>%4lNkL96Q{jrIy;LdH+dlMlQUb~|6X1`WJCMOb}#8(Ct{u!0(kgW$mDa zvL|$M;5#Om;HQd2U2LZ(D=8cZNat0Dp(K>C$fsqRc*$e3h9XJ+K_Q@SrOl=(L_7Ks z{U$uc-?)a49j+_mJZyv*P4r~;%U&WTMRh~iD@X!K7rVD@QZK$hJF~U2%-V_zlPq58 ziTv+JAqL$}iwMdgyOp?O;?)gTLHo%K2yVEoT!~BJl~OWx^`>8~RfG;QaIW7)GrTPg z+xyM?JA-#pYb($;3i0jd>DT8mL+7a!fdM}UtJqq0iwiHYs(P|CZ=}nVwCAxyarKfJ z-oM`%lJo7cC?w8Zr|ymM{l^|rniyM}XfSpdjWBo94Z54biq!42?}O>;B+QtAba5J- zh{8|t6%W2@PWwyvdlppCbbSN<$5DTz@ren45V3R?1H5Acuh98o*+I-~59eRdw!0Ii zW|I0MfUpy9~kT{qCJUZv|vaPrStU5qG?uhGei~8&)-pm)v>PHl%c~1{tEOt z{-}>2C{(qG<2dKrZ3D=0s;qo`P|>S|wKe+fx?IcUqxH%@*m3y^TE`%pt0y925>opk zBg0bI+^L9gYE#Ss&KFqC1h#gH2LrUa=l_w>L`=Yo^Yk?E#Ujr?bhsa<=}a^%%K~Nd zPgDk{xPn40*dN|*06a4WQ%2}B4Tw*_s{!M<@o#K1*K%w^3>d&{(Na=Ei0l+K^-;!& zxUYk`d|;41pgX}W8%)ngEN*`Mcfo=QANcmc$a}E{jb_i3eyp1PHBld50LPp-&(B4dH(a?iue{Go;K8T2% zOqhg)Z&M$VGRe0vdOe&*!@f>E7t>a%hX(NuxS7J~)3kQdeW4y0G-tr90q$C0=zC-jhc& z^hZ9_UwP{r0Y%o|U0MgcToJ3EA?@rD@FhHATfvI3nQhHy+0USHP;?<%V!FRFGGn+c zvAM|H8*vz}OA?^YZ-xyq7t5JZ{Sj^W;zml!S#w6rBehW%33ENuj_ef(^A9IJ=>;jx zg5G8?D?tvfDJExc@WxE@SqJgU04d?A-HDkje!aVbu*wJsk_MBchTm!?7CUl!7HFA@ z1Sz+Nx;hk=Qa+xx$D`4HD^2#d^Jb8EzxF3Odm=H{noW=Ho`-1Ir1Jn7(s{S`9CuZf zL(6&59E*u5(o(lWZs@d#?BFdaT+PM%0OrY}@jy2R5Q%}2-$(aPUFCO2Mm8tHxD?J2 zO3M$D>rnvP|0-4$k^P&2LD0-!ixih@sm309JY?x6wO;g+26lX}yRu|E2ohc$?8G-Q zyI^9$BA;0GfWpl$tj38XeLy-@cuG_n{bZRP>Qgi+!M+$S%T&i8ZwndJIQSljq&H>_ocp4=#-4epYHT}zn#Su>U@-@N-LLOg?j91fBb z+xIB`>CCQ4rEPYBYReYqN%r6ES~uNgt)-}Do!S0PbAa-|kCvzOw=RCK9&2VA=ZxPl zd@jPW4yU(n6?&}2O9&f^47kW&hkZ!m{qmOtc6lw1}gyj4m))M;PRZL3rDnQ^ngs&^B!*#tKS1Ad<_rN0v_HV-FlR2O7E_UA~rwb9&q zWwgo`i|gGO-hbC>yh!OXn%G^+bO{9;FU{)n_OwpA;Xqh3wfYA4_ib0B=hm!i%9Z_!fG&ytv%r4);e+v&zbVp13cLxum#W=8$p?$>0kKt6|@eLtz`+h?Y zp(?B1dT2NMkeBhx=Ad_}F|CIj*xmuHQaU=xpbE8r`D@JleV55GVEg1P7`3Le*r6l7 z|CNuqGkuQyP);TDu5LWy>LW!Y3VvC%7mq9((4pQ+Vi06}_buSS!ZrLOGTz|pzMKtu zm~4z>(vf9hzq&qP4XLPodpymLKIU=c588gN>g)o|%h&WO$Mu19gBHXl?q?WK(av1-S88qa!!d*RoS_sYZ=@DC{|&FyhZVecw&h-%savLD%qfhHN)9 zdEaBuX_>8QerrU$T+b(ybp?j7Ua$JOQ$eAcO4na(8o@TUa*A&m($|Twn-`~_G-!xP zOYrO(My*qEynTh5$e&TxDYqb>yp&{C+?~YK*99z|cn}4@J{l#R+Zz$pr>=K|3+>jEJg#T#$g$;`Z<;zOaE$0<+`~IaLTRUGA6SJ@$G+& zlbknDj2RVL0}FW?!d7(o(e}_mjOtJsWXwE&FIXr_w=?(_N1E4`S6+n}?YN$W zV=X~N`}~?CX>ULzw5q(UJwy5Yn2DZnv~K_VkiT0O9On+;rzzn>5Ns_f5>3!8XeCU) zqi*3ZX`%gV9Rxxl<&_w(=RIO#&TOB(G)|82iHm;NGgxwGiJXqyycXEvG4P!TKs|m# zVT8VC8qFfLNp|GH`IiuC;Q7?Vza7Wk;JxC!{@XpFe`_QkoF#N$Mbt1UbN;~qeb&rt ztNQH9TLxh?E1W5s@^>l@*F+|{+FK#yX*p5UBKwU$J!Q*^8;|RF_z0!n-_;7at6k^f z(%`P4-l);BEX%p?0~@VeQQ!5!|3Wp^q%qpRmlQ(`bf?i~4qaF3zfVs@J7sQ`tVHcu z?a`oQ2*%V4XH+v!lM39NJb9xdFat@AmnNFwM36A>wY{-;Q*K_Vv;#c~IHw6Nx27PS zFmMX;^mgR3$D)6BB!j)E-G%*V0Cw?WDxdno8_*0X}`thgdp&jLRh&xj2+b zszeHQ@V*?pU)1=;7F+Ezq0%>E! zL--cKo>H+3gMw<8;piPjs;G>8kqA9(n{XDWTx6`{WLKP_xLn#9r(O~``0d(GZ0mjFd+b-AWPdpQfWonV1cPKP zX8*j6Aujmfi#SR98}!EvNE16x7x&D(V59J>tr=e?-LEhkQf3v zVuamv6DlcRt#KaKI1%Ce9+G*o8Yh01p9K31JSGUn{4N33nOE!uPmRBX%- zH3ldpnZ9cO91td56I_rgBR1ZmyK4b0e9~=IDGP-oxgU^W%S%rSyvD}g5Z4!F`uy4ZG6At2@d|@|U@Jbq>sHk?^-qwY}^pb<*2o3YLH-zb7 zN~psda_#f|>k2KgI4w^uKLrI0V-ui+yr?0z9n^>$8De4(TPK%v!%(qo$G^U@j5zL# zWd7=183g6DX(4l9(m$;A6Wmx>WK#%gvG6x;jl{GSDOT z9)1!Or$MA|^Kxj%-F9dHq4H4$BVg8UbC(j2vBySii11eL@{NbNJZs*P+x)Pb6Piji zF5TX@%i>t-2ER336vkOHy0vwBY92-h9qFpeFu7X1&3FFNWiPgN3zljumSZ6gfH?c@ z>o?lm*8wqo+hB@%B^Ru($k$UlD4P-#P~ZUJ`s;R1b81gjhm@jwe5r{@?d6?8Du_$a zW|0b54n>vH7E+S_SoaHRdPJmfSY_O!kgM0>ETH{uhr;nLY3WDzyIG#j6N@dCPi!^w|szjyi(|JuNF*lC-b`+UWw zFukWsOE#ieQYEIbTwNVyGDL+oVjbP0J3WMmQewtTv63FRbNR?sOvI49WnYI5xfXd*uDedH_JRiH}ev`4dxT>!Jm@3 z2tQDR=^cy1`;9_Y&npuisI-s?b4o1%7~4BAFJG)n-D$6PH64+G84bY`d%XGV>xdly zxcl@hlc{V(V-N-duI1yibUiXm7v$KQ8-xt4p%MZsm4q3g6tsx|v)O8(>OFf|#Ju|s zoBtSNu@>Uv%^xpmnUbU}xFTz;sD4D}8jEl-$7co1am(OVNNHPhImwuB2)v|XCyE*% z5mh|Ts=R$V*K}xLNE0PNYn?tzfKBDnhX~_Y;5F9GS9Qf zyEbwg&~C=jYl?2Z7IH6Z*@W2LG}iYr4Jo#36e|(HxL|?1NC@eWN<~bQ1fF%S@>G6I z;|p|SfQNMY6BtE@o}|2eN1I(;1v4`_vEj|Gg{EQxgZq~rm# z?At9VCirO;V=hP}bjNf|`F7p!m4X?JkHaD(P}7O9w;=6=ZlgR16Abi?Wo$U#6uEPYH+;kj6GW^1U^6dupO~|&Gfz0(Zu>Zk$ZHw}YBCKbV$7kHOZ&FC~DE-Z!9j^NcAveJvB%dVZoGKj3Piy{UGktxb(6^RG75#@Bk)1xi<;)V9m({do^H=Lcb z?-?8nTCzgG`05cFIr(S~v>y7po+YzK!+TR8{5;$9nz9i08k{_h>ZogmZ@3&3G9HSh zZWE1j6Ze`YQrZuLLq)-4^jXl8m3~&n9yoV*QxM<8;@{c!y_%)Cn3gk%`JX9m@}|)W;7OkCJ-p+n(MUW5XvO^0&PVwNsAUMRt*b zHp1T|x)bl55`q8p(B`}@szdEort7rJk=G}Kol?g1O&sCD_%v>-rdPnF*L*Q6bMG$2I~o>mY}WTvGtq*Qkxj!gKvw4FzKd)ghZ5_ylL;?91VK(0Bi*@@hCOp z3#YrsYihWB^|J+u8-I{%U)%12zC{vmFhUl@q~QB`zO2peB-oLqlMOa2SzNt<|F)Jj z@*Phjn0Nmi3h-#CdyUJrFtSjUtM2SfTYq_EdOqBdJDl)V6)=xZT4dW?wovOdi~rsF zO@_&;D>R8lUE04i!zq)obzM2}XA=q(gY}W8!{BZD;Yu`!IUFglR~AJJn#TE>EiUAL zd=%dZt=`Dt@n}F3v?DQ4ilB@n*-V)BZ*oRU>ahnZ|F_A{Z52i-Tt zhG(f~*h#ff@9Sx)P^{MrJNR7W^Hbnn(9Me;czg3^L++SzUDc zVB5#-{pgYn%(!QL)0wC?)gPBFjBxR)#=X!riZ$}YD9_zl$BT2F)}pt(?aDVvO?843jD4Rez$&CPODcAq7}H??I921i4&yvS{cpf+YHL>>r0@_Fe|zHNqHObf;5N9f-< zaQEVgFlU^%l80hNTmLeRSbwI8hmz|GxB9*-7ij8k>YSE)2^F1ZJuXvVugcYV`uZ$)#ng<>KJk8PjnKPeMJqZ9t?{5SDsiLPa38(pmNF|H|S9Gl3 zW;rWXh=AHfw`F)0C?(Xhxr2Cfc?(VL$dERqAoIkMl)w5>Jnwgld&fSDO`Y-;C zzRI3lPdeHSr^PP=UoelOhE0Sgbn>C!=ElRXbL}a)9`V1+St$lLJR^uz8FEHmeU!{! zU-dTez`}G9+G7k_N~Xlz zM8;sKR!Vgd%XUxpaUngSvSzCX?wFn}TQlO8A3}@~ntt}}fxoqjE0$AYv+Ec}diX{e zItxpC=Id-t@9QEzSs0k>)eqm zT;(H5StmVR)9>>uY3TWQyt_-&;6_x;GNnIec`GpF2RpP?`%e>Uu94UCu5kVC+nJab zoX`crbgJa1`FiEG~>&;WDrQf=d!{n}S8H)+oN z%<1Y5RxQWyW&-0VUAUP>EQ0iJQHi7gJ$ zG#;>Y8S=2JYw5WSR*smrU{kE2j63441Kf~sJK^J7;Ha*;cDB3!ImSoq_kdeY2DLpF zM~4%8K|NRg`ydyfM#zh=Ey5qB2@xYmfXIyB=xb*dCEaOSwrm=iR)A#OJUu|@h0VfG z@6_Y7hUQPZ#8rK^y1Iuwu|-n>jt#hz>EVh|G7Dw|n;5uMl)&Yb)re93kyOtBs?{6q z+~TJF3_b104SRmM35mb4!a))$8T@6A*T7A_1_P-?s2y=rFZ>`uJ5&8vDI`$VLB)S~ z=k~q~zG6RRni9?l7YY|SSRN-0oPwRH4>`DHk!Uw434Le+Me*QR8b^*yhmDCNa<`28 ztMS`66_a&Cp0Y~Cw9k8(ztb{c@W6|Z6BvJ#*Z!Iy3Ox?2E#=*6rFb%(+^9(#5C}WsACBX(Z7H4ae(Gg8iDZ=3RJiky?ak;TLA(xRni;8(eU8HmC=UykpW2HmP#ryCdZYmm@+ankV_APdNqqIqbDwL!49)O%r)7eR9p!qcu9w&T!>Gq(TWaHeA zYh%-f1)Yx20V_{7TnW1jeZap2SCz_SQ|s*p(LG?M(MJbfFnlVWs?jxkUt%#hxUE|H z74tJ3>cTL#*iww_2y}QSxEuIZQFq^G|Lb7^0@#SHUB|~3W^50GdKchyYc-5f|?{)q)53k9f$nx2Zg+k~5b!DGyY z-mhwau8BuCYEw$olruDZ+gnZRDLMdT1C;t%j2wgxZ8%A*Tnn z?UXyxNO&Chzbq|y1e{P8RpA2=M|Mgy?JBbPUXtK@YB_~Ef*@May8WfFGTkrW&odZn zcN({4YcFwlvR3|DFP|kTOL&mxDV=ohB*cn;?6WwuV)g&V%57PY9=ULL@$jK)Vm zFr>KF=QYV4z;#nby4%<=ETSdv5Hf3VksEUO^>KSk0$QLmBBl47-?$&omXZ&%%H^>A z)cU3WkWz%ih2`TU{VdZ~t_K7%!CHRY8ip9C0SlyPK7|B>?SsKVbZ-n9>2`q%J)e$p z>L0twk{#CM^*A_oSEJ*Df%PDJMOF926x&qKBHlF8z2hmb&TzuITgmS-gDHE;F^)in zNi|9bHlS7$kJ4-_I#v)zgY7F-FVpt57};4-$=$GuMvG4#ejE{XGCZs_{iig z{F(v)EW0bO-@}sxc&7WjV0!J@9uuNKsJn_S;Brtq-%^oP%geOHF>51pcMPNb$_QlswV zx6>CAx^;(#Y$!s<4g)7S`NP6cfTWM!=sWy^!|Dx*y^=VEFY|3$a{tP>m}`kta-MbA zrjSrH9bbmswL0s3@RPIBsce=NMwY_K{rDGCS1CLwxT)zC!;tm7qGzebhO1p@v?yqg zxa!*m=1lWq6mE;73p(kVUGl1n4HVu(foTOUm8jx*Sp*@fm< z?YjS@u$q5g`S;A~erfi##6E&5z%1k-rAnoAShywe_R-| zcR!6gmq51nbFx;Rx=@3U7d@eu6C2zl9v@ksarb9yNCm4wVPqf{9S~I`OL9Mj;<_xn zaMDcFB(Fh&kBsm!@^PJ6SlUV@yM9M=_|E1}nnz{p;tlp6r6>RkQGNgBlwnS-UCH!B z)T^G?A=%2wwCD4pBsO&V3j(}8e-3?#T%H@1y+`q*A-G={`Cyz*M9qS?E9W`qq$zsK zXeh^@*}Yplg@Y{NQ-fp9y0j=3Q}U@uhR6q6$w`a-V&(%|IIHK=)mj%QlQ#1qm!YJqV(172$UENlYU+7o4MZ9%q23?a9k6MB( zhj$WmjNm_iL`3oHikontp4I~njNg2B8&O_5Dh3gt5wx(AEDRn#i;wyq-wb(oDIGBm zWHqNX>lbLz;BzNAg_gKTkn4GlP+woCEZn9qNEMGX!_oXUGv3&7F0fULUgU)L#vHCZ zRG!xkDHG4_bZ<)Lemg`tc{L$#@4Pg3*&8vFI&8e|8$szzoZ zxF_^^KeuRqZNXMP`(n6X&cyXB&U+;R?ca!{@2AQ@;x22GW3IoVA2taDp}jKz@L0S@J%3&X_ed}^GgEiIkl=PG((P+PTXja?!udRq?j{qjhVxSI zG&0(fyJx^Ck??#?ixTPCVmaVleJzip6Z&xG@s-oaZh(5cbQRd28tl24Fp?A0KjYBl zSb(zt@nW;3WS5;6!7q@qU+5^Vw6`OGf&vW<$(OZ9+%+tsvw*=8-1pt_0V(r=hqL-o=Nbu0Ky71%vO`skKOo5e^Nvi(GS@hZDR_NYT zj~ITz*&6%&Per}F$YNU!sXPO|EWIzw)ui2v8k?5K^lQ$sbJF)ja&42ktUp9K*4|cmu}{`N;nMbMs0k<$){u zfxlP{VNoJsU9)YS*?{LEVne^Q!hY+yA`pWBk$~`2fKE}VsoK^;QyBt`t|)tXrZ{2A z!@*azUXa~jt=Nx1iLQT!c-1mhUe_MCs;mWoA z6KpoBajfl>D9iT7;82Gqm~175@P_B_Yy5$r-#}M_D#BNxuOP2?~lEq0)m%D?n@*@$b_4S>Wq5Wxu)| zc4(IVm@_M0b=HNewsxFmO<=wBq+9a_QPsq^LbhSX?l?j`T9b4Bk8yGth-LGM7P-5# zfs2=%T^Dmw?$eu4io%~-bx(iUT2Z5=gJaCO`N!_7#6y#p z3kl}w+-5=dxPMG3(z)!y--;xq6! zagqpY>SNT~IPacTn~F+m(9x!{d3!iD78LAlsY?P|TuID8;*v&u;e|o`ogeKZA3Vy1 z2lQgfwd34iH2%jBdXE9H;p0^2s~!oG`-5Zew%_fwZ+uXS=t+IlavOssxc>MyY<`5T zXKRKuW_JXCTU{$)3OAj$rR?igR9J)Rk3w@X{QPhxIQtqyMm@b#NsL<^4M&p$Yj&CB z-_KzioNb?MY@UnnWqoVzi3g}{@<>j`_M>k~LO`r%brmHujuaC&Elnw%byS3~oS2YA zEtm6ZTh|fwiXP!)Ld$LhFULc|M#a&$``NIdDdfyYI$A8W)9e0x{&x=52|k@W7hY_B zY^Vjdl{=S4Qk(RKhyj` z?CG=L#6g(0pvZl>Lh6a4*TR=XIX8vyd=kWL=dQFJZb|jx^{`-vUxXk1_>YdiEY3_?uKc zuJB^&MDAhS4XEND9_{*iGcEO$;#x$%f$=Yg_g790wW|2J1Rpw3j(=17D`Rig3udTg z<0`tA#V5?rtv6Pzg?OSXTn=8B^n2i9^HYFEt&qn@3+s_F_)8Mhu5ot-H zw14xc;7veEmK+61(*VftNUia^7Lds!}wMCc+z%+)_MRUae5!t#gE>N%Lp zI!{#689~NI|G>0!`9<^Y33vj0UyMv$iaZU85@~IPmWy) zty)j!qGuU(?IAyH#Iu(0Q>N}>1MzAt$bd_=am>1V$j`{WQ+VOQGiNB)KsqFU zp;nhgKhaHfiG?dB%Q;SfYw#KaMKQ3)NA!a=nC;CKW84)(%O&G@O-?m}*wcTnVA6Qm zdeJhub2C+(QvY_FVdiyW|0wg;ef&t(aA3$YX`ecB&W(O}x%=~GTZSDs6g;E|ST)WM zkTO7)7`M|OdTkkv^k%`A<`UnaGnSEvV>vuk_h!z4JCFJ27`-nv-1P4&LzrU8pv(`& zU$;Yil@q1uyk+aXb~GndQT-`=!ITy@t?9#v(r~hwi_Vm!i@_MC3yXf|qX$J{l;)a? zV*T}1D|yj1^YZbsw_F^Q^&HaJadQA7^YGFaic@KaLpsyRo`_9;^O?V=H6BDPhu6_< z#A755c_Shmvp^_`<}k)3215$0x`fDMQ-3N>8eFBDj&wg%X@S8tQ^x6S|c2;l+bRw z&eF%2!Xi*qPnUp1cGMq?$m#J*>@SzGJ&-SmhXq>H*m6gVh#|xror9>o9*TBQ5g8mS@x%j>BJ`78)w-t<2j_r$AE;a0k z2N|)%m6i`rbyB~7#m_o$m_yzaDI$E`iU(`FdbUP>?%QR8*tk3WOe4ziMF@21iY1=O4octck#OSSHPjHYMTVJeRMOKg7$FqQMZ?I5g1xgg zKFXEQRn<~VOJ$vY&fR~gC*Q-rpZJ?En2CHwfb!vOB>X4(FvgXy72xsmajNNFWHQZO z+4V$BfOO2VaT#68*iT7hfx{qz3U%5TaiBJ zI#eweclnfGz5Y-b*df6Ba!VlU%=fBTE9+9U|ITuX_LhcPR#)^_oE^hM`%{0HZ_N&w z^VRs*To$#7&tL*BWw^xp8iR$A-GJwQ&d;hIchPpm&xt94G7|0|Za8N_;zWCmP@a`e z{zM^0cPRLLQh=V*vo$Wq`%gi1VQIyil zr%*c06zFW`QE86s_HlSkl%mkbF)vIf&Tk_Wwlpm3FmmLz;+bg^bwp1rLdTHtC_fXq zb2pT#@yjB*=2OWxNun*dz4igMpw_lBysBabo$$CN&Em63fU&;G&#B+IPNmNw<&#?^ ziAO%4=;cGKJtcZho+mNn-v>Zn=fo8fC?ZgE627w9lW3R})ilrdxNkLo#DQ9ONPlis zh)dAYwO)H;#%M$ExX#_iKAG*aAAYw_%f0~QbX@PHL~n3sGJhPgVgEuP@pt&zwD*mnB-^$YI&|rIzc@fsUqOUtnoILp=ZkX(>Qh78xNhLN&#Bx%z`!3lEuIT-WCF z7plR$4R;CyCvJbGZxiEa7;zdNzbQYK;qn$gd)}>x)S{uOb_J{*?g*VujXAff;WoZP z!Q57(BGQP`ZBkwY-U%t>k}1}91ayM&eyUJ*rrJ165bSapDj$tgjjU9G)(DlIQ@asp z9~75F?Fa1Z8+ZDo^Ee(|mem!|XW64o;dn1xX?MJ8bn#ygYD~%1s{#e#M4X#5er;mg z{VEXDD$Ky4$TZji*=u|MQFqgF(LEWcJ2S(kO$P~BE?c-Q_WM7JFQA|9z ztSLLxk~V-4?k#gpbL04zGEkOf=_~(wM+qQr z#S$|;j|SAgABwZv(h^Rs{q2FM>}k231vrV4nVuvd0Q;;c#7n)t1)PU^+Yq2!=Loit z?|Wl7VjIy9y5n--Ou7tlrM&G`0U^v@f>~y`}4na=bb4Z#^V8&u_~N5v#Rv) zm*N8H63n(ya!fn7&0n zz;M-VRfb$((xqFWtr}6}9jCa=t*f9PR%%4bP>z%Al{lkLeXVMs>DaY#3MBOSg8AoK zXT=e7;>A;=6n!1w;>G`Zx5v%g0bxU0_3SbG`C3bl$X@~b)_*jnF@;?#?h3vu$IV+R zi(izjTRvULmJCJ~$T_PL^(kxMA&-HLPxRgCzvSAz>bqJSOj3SM+&p41?6O6)^id{Z ze#YNAI|$a=nz~-M|2Uy~DfGxp8uj(7>r_}V%k|hr3S=hYMzcSxfNM9HD`P3p;dxp1 zI@M7ktJ6F}5mOtt%`jlR(1MbXIHV9*WMPvQJ;>hk7CM}K% z09AUdUJG}R^Y78sgb0&CuZa?GVM`KG%-U=Py52ckt#NPHBYF+ zznAHNKqF8B9B%y$BPc)_<<6KoBNjZ;ut(UnLCMG$UH4Mv;R)`v_||T_kwvNwbNBUh z6juom@vH#Xe8pc0`Bi!S_f^v04sfhH_vz@#T|YN3ePMff#_y`=+M`1tUI1^M6s3eD zRa=434>fY;1sPsKrM&MZqxoCQxOrd?4EwJGAp2)fbmaU-&B4o%r zcl?6b?sa(3l=LknAnvvA2TNGJlF3*?7 zgWEi!>v{Kf<126W7G3520vziW4Gc*~=tMOX=f5vcZpO_kejys>YiJb%Ox>Y4Yq;E* zuVAGh$jU5hJyT;vS(F!>Ne$*#9wmxKCnen;3ouY9n-<^L2N#1glTb{6ipGi9O4F30 zUAf9w%sbtpNqnotBU;JG#V#gyOm$ezHJK0I^I|YHP9;%-=~It&eD=Jphw+GD4GA0^ z6#hF&HOKSm8)|Z$>&}r~o7NxO3@t3Lr^#pG(*o4Fwk%E{JWsSod2oLD_W9H&L5{czEgf9QYx!qus0VNvz^y4gcATyCyxM##ls z*-}omokU0o{L+UP7S75EfovJ~Y@}H(Frc3*AGUMAKrCtD5U~%mH@e4RG-+H&Al{^a zYQ)Fr3l%oM6H|cDVl7Jea`|Y7`40T0}RnuXzRu78G(KI4LS^y z>vg2)VXD_xe$5Lf%TxH8`A2X?+ zWUy>_)n8oSw0LsTwFD<;PqOQ-$7t)W#EV6#&ud*zY_lyYY1znuMZzQL8kVSBH&M8^ zI;~%Oh^S2Xk`V$iAVXDCYLIBi@I&CsCx5JN{b|rvkJ-)6o)6f55A!U5e1J16#m^80 z9h>421Tg7#l7<@`{jlQxC`Geq03`p6w4kLfq|bm zjaJ#pn_2#d4W{fo9ln)e;3a2mpQnE!dR@i$GV*fVrJ+e;8VL-eoVk$@en$NMm@U8o z3Y%MO#z_2@0TEsc`+|uq%SeHwprAPth{T*U+!qLOT=a5H*(7!S0ZiE-J|U=X+^Sy< zI~AkVT!g|8JN=bL?a_!*BP^^YQIg9{x*>CVlpoNc0W!NvTEay^)3@qFbVdZk+bsXk z)BQXVOWG58gshsf@5Z6AqDOgo$02iKINdP54+g~Z^f7Mr$9 z(V3NK5(}cf@L%C*{uJRHG6Qngc~IRnr;?nU5QlPecU6n>;;RxlH?j?*RIYRvj8cGv zD&4E>{F!wdy_8&D5p*UrOxcvN#$#V4>!G#e(RDCv8C`u3(JWsK>RMpY!m~T~)yQNd z6W*LE6xP`LzLOZZE-8g@>T3oAuu6K+s?A)iKQwYm3S3C6It%)=G*==^e zn^V!E0;2YOUH))g`OM$>sXIIJAevBQ8x_O-g_;lqBzKG*Z?|P0@4gPqx1)t14HKW> zGI5g3&c*h=tDXlG`Dg}hOwvi5J93|EMy`^oqJ^4bbfy`4*&V;f{T5Mv8}aKyiz=Xf zdJ?i9RUxQq6Zn2*f1H*p4&L?9K83i`cSvTHL?F}{ z`wb|*9{v#)^Cn-!+V%F4{A295JYOTD{qlLnh5F=`@Qm5(FZ&QgeJJ;K4Rt4=DQo-9M(8~(h$-?gcL6tH#Ct&h``&j5#R9a zq3At9DHk)9aD%z4>E`EoB$lYWrqgQiqE}M;76VJ^SE~4^#!RO5PCLRty*wI^gwm^Y zszR&_YW++Ab&hb1?-WI=P-8r&A|W!C=}U!a{sMH>hk_J#oama@d?k1^{rnB-w;q$! zwIf#r*WxUv`b>6MfQ0H9(&97R>Plz@frMJ;n|2dVPEV~1S7uPX8E?qLnO28z zCGKmNO<8;TVZW-_;V+8yV~3Sh7V~spDC(=ghJVpp@Ae!Ss$9>5>F(c}T+L9Vo(B?W zzk72QliQUl@jDl?_jJsbym}5n=o7ioCOSS$VK^37ZzeZ~P@S%pOR2n|Rk9=xb{?Jc z>o|^TwC+Zqzk(Vna7=(;6>kVSR?$@WeaKCzE9&X$tn$Mc{j}-)u9Ma&ye*4e9BllE z>lS8$MBW0VX0Qw@;2-fPgX&wGSR)b#dg;K{ukZndv-tV{_?zo7Zx5+hFh#PaiQn!v zGU=FTI!)sP=i;U?qmT$@@Jx3R%I6621*M|8 z9_|cRF6=Zn6_I)dLUkSXY(6fhp*bnbf6M25VerO9grYvrX`GdL^L*ptYQ+qxL?yw^ zdHBH1rO4a(eDOi_b%iRmlTwgnaiy&{3B+5*H9XDt@V|&2&N~$RNIss^dX^-7N8dDd z-|COKnjnr$rz-xochG9A&FUbYa_9lwfPtG8yJz1TN*-Z6;|(3_ir(2ZS^*A%)Ge(P z%}1e{UN7g3`{3i1iYgK|yN$%}va^db(YLE3;VZ9jH0PxIcCN_MU9xu+8h-p(K7}Vp z5eDdj>br+iKU;DB+)PI{NG!>&H?-Cdn@@!Rb+I`2P5RqiC`nCNo=Pd=68_|NlI9xA zP!V)}Glzcx=k^#+2eBiZck(YW*}xgcl;t;7sOL+TS?=dAk7p}~0x$_Mu#(WK6rI?` zsr$&DYXa{5SbkU=iThXKYQ{Yv%Il3qi8^YzP4^Y&+?L!E9)e|Eib_=-jURgarDQs6 zyvtZgOnc!I*8Bh<*AIq&{P~hs1C%tp&S@y?D5r`CCQA4ZC@chXJgi_|eUKoOkD5pb zaVHNKmpVErTF+C=79aUYOn?bJdJysD$#mx~D3Q7|O%*Qt^XTO$;w+}NI7b@hksqIn zKCQG*CZHyo^nAs78QCngy9#Hyz7A(C7L4C3;O1RRLKCYsJa{0Q{HEd>$D2*JDP6LQ zJ3VVY=m^{+6zCQ-$k0nQD=V({H-&=g5VJhNqSCw>)4Q0Bg-IHgx+Q5Z+lNv!S7s=y z;K!lYwbE-4XOzYR`9UoGO&(TpaG6F%Gj1gcITE5QolFqkAK5U+j_G1_Oo`B=FWE3a zM|R*RR`^iQo~++%WE4_tek?i1i%G16@D}ii!+G2E{82K0F>o{;;H+X)?j$k+l1Fy1 zzb+{06FV7#Wsr}{ZCiSa{VU`HsS^ytnz=%$Cv|mnaC(i73(QX8G1XpK-AOf06^b#{ zqf(Q127rzp=mCr};U(pO_-9+rB(!6Dm-Q+r=yC}P=YbfwKc{?IDi7tk)AH>xxGcIq zNagiqh49$X`L766_^0%q!(NElofPrxq@?l{3W*>vMVdz6Rjz_o-XWfKuS_Ksx4C{r z>zKf-adzej%XYg-5g7T(ATo#MPNjH~N~f?@w5b0xH7{_`tOL(2o#d0gQi&E|T(YU1mz^-7_{N4S8)ILcFb~I1klq4d zS#RYqq(bI?NsgQMxtcH)a=5t7LRU7v5|^X>S;HPPF;8#)JDV=}0~U_HtZOm!`5#aT zuXxK5DMQdR=>hkzs2Q!TUZFI=STK`al(6eJyy6cxGqO z{B=ynyQ(;?ri99Pj>sGapVJ`kP*^qP6@N*pu<{5MAhpBKg)ph%Hbakg|hnff&I zZxiQf;<)z~V=jr1Yk{h`J>m6mrmt~weqKb-g^`J}NN#9PgGshn0k{KLkPXMvu<=C# zxYWaY`b$hGUN>J(iw6@us@0hsrA!PMPq9h%Uo}&>68^O`s(D5)!q@Pqy=gV}L02Fo zao^uTUVTt@)8eTY;xepCc{91ZGg=sIU2&|^h_fwqR)AkEtvjVo z6h-H0tHQXkGoW=1aqmVvX&78G-cyR6{`W8SOW^Fp(H-4s6{_Y<-!>w>2pk;a>(d52 zv$HO3c(_SB)v`{$?E8g(2hViSvIw6`Y;0jA3*wkS(;XC@CjtY1EJYllFJO)=B_kqg z>*L=RFjdQ*iN77@lUSDgqZmcY=zLxJQQkfakYtdvYaC>P#d4L{r{K;h;fev352TGN)#{g|pdO+&r{pSn^j_?c1oHE^e0i5gGsa@=~)?Bg40BTeb zTDI(x$TJpIR0V|@mWj>Z!Pi^?WROO%0!I*TFDfrMX^F%PkHy$V6Z0=1p^geh5|~B6_XuIF&g)78F#Gk$b9&XE$d&qnII;W zi`(lK^_&%9BnX}@0-;7jD%&5jQST73)1)QAYtFupt**nrQUjpEDwIyW@Gmj6#RGL}X+0?d}P)tzHnieRN7UIa+2v z`Qq!5m<^pc%bneV1z2>6AseaM{y5hsX%s)S3SfxpyaO0aXohUk$p@XtZ45`T)XAVL z6cNg4#kEve@f@$x?2vk_`o@9nqqbRB3yEDjVNL{PPt?>D1&82<^l=NO9>~NqO0|f) zF}mr~{vnc+$od_}*4z;Ks$^6yNmN}77K{n-9w};2t>jIyqUbfaaAi_%J=Oz=aCjjT zYF9sBDnI@5W2Nr+xhbVrAdcFX`1fE#&B1vbd|ZgG)g(~fx(3&d$o2JnDi1unU}E=nMvieLcklRlzmd}LN_LWyfuLu%2lzz&kgS8 z<|r@E-?inkCrHMs>Pq*lj_&q_!j(PTXj#VYORV1Pv!v)S{oYv8bG2o;Jq2Xby`M~y zcMrhjS7ja=9iNv*hE)38482xMmfVBWf`6C_krvNQbP38VT*jZKKM=CTz<_>Yc-I`N z7S58#H-Cnt(fmGv5f|3eyy!zu2fo#*XvY_Ern$gKjw^6r&G-ovhk3>fo$M1_EC%D} zP+xh)5(kuGX}M~okIm_Ns@5I>DWD#Tdr0VshNfxTt?$zX0m0psP|wDO&xw0idmrGe zJE|TfBLWfgwz^AOgAjd=nD*XW&3gFBiDt45ch5VTNmv{Z5pyqSXo!=1v}~^n*XLYc-3uXf-}DgU5ca^ zOs+EW9h{VHhW;oJCM_<=y65K|CVMwxK3;`HKoGES1r>BNuz8t=R-o!c=B%2ix~Xuj z6j3gJqs>1XpJt+9{jl(k-zqYyEKtU6(FT3M4q+Fp9|7vjC0x7d<@l-9jp2B z>2bd<%+dZbvJH2#8@%5CHd1?tJt#sCEH^L8Ke(XFqYqM0g#$m`h4zdCHba-&G*BA| zvm*wC=;9L9mAc|gVZ1H)y{^*xg$^=b<7yXHrftCHA3HNM$O3Kb+UU`7QJ+w-w24fZ zmwm!r5$4osmiv+R^Qq=-$fi0%IGfPBqe4PbG%|$Qb}x_1bR;4tDI*iUEpL`l<>UW{ zpwdFTC^{wmrA+Z|11~}_LQyEHPP^`jn($Dwa11IdGpn>J zmgOTuccyS=qbq`Y^g9|H>j7L0_x$rN$top!RQ|`quc^@}1?}foEkB3jrPpIKW~8U0uYL8SBAv$fr(?dXK@jF#NN|FB4+gO=a}n*w=~bm z3+k7}JoZshdGNkFP%LZ;(VYJ0{7ZPMYMF&uatzf@ScDYD&Kz_MUTaWt0E08ue6WrL z0m>Ghb*EO|(a&?82_lWp<~1M2e}ta%3#7tq`vJEW4$9&eZ`_(g*MQ$1L9$kzTz9RM z_1B!bItHqa>8wc{E^bqwDR$c8@U)Nl^55X>5TSY=KP#S0KORie?aXYxF5{o4;oups zEMq^7Sb%XYY`#eqvV8$e%Bkj&E$&qWlQwBUOoCC9z`!rl9Y7x?e)K<1#ss$lh=G!WR2W1{@IKqXo;(S^Z4WN&_!Vwjc4C^n;RfOjMKMo)VSZt z1BQLA4IQNkM?^w%VBz4t>p@6YpVOfE0>~pLi%aj*%$9@+v%$P#-o_ItN$`mL)zyn= znHotYL%^+F`k0lbgj%?9d8;#<@I%yPRi~HAa(qeP={8W> zU4UND=H|HVAY)GEAR1U2I}WI%{kzb%oW(J!*6EXzySR_6JIAsb8Zkvfu0JKG@m}`) zl^G6fTuT11=GRyc+^5fw6>@a@d<=xhvY_a*_C>P!B!H!L)Axb9>fHP3Z8Dm5Nx@5r zfUCrXwn>lRC$q4vszme<<$yOgw#>uv?yXlLc181P7jrYPrA7d z!>3FSlwDAEc44XJn(Wt9rYH1mH2?2{V?*}dZ8~-;=@Gs>=9;lPQpNy^l&R_I#v)5z z3h$ri9q=09L<{8gSxsSa*@StmSgb|4z_W?O)6lZ=C1QREJ4Z4ra=wbG}8nY(b*`92;Qh+I(Gt!NoIs zR7FIc=YN^sGW_jcAmrrg7p@yIGUbXrQ678^i3df7SJ6cqd2BfH+g}_JI<^>F4F5#s z$DYnpS}1y;f|$cA{#`tE!Os1FOJ>5QBN!mYb z6_~G?A*-O$u`H`9>saQ(h;N8bq^sK5poCBzZ0>Qb++aPXH z@^|#Ygwplv?TaH{dpb-mU7`X!!Uq;@Z3@^L) zRF6x|5>&6x{|?wyJ8M<#F)oH<$!Cffui+hr>c-^=qdopxkAM2m>gUud63zXa_xl`Y zIo1L`M&kA~C*vjL=^|Opov#zj`Yv^^ver>Q%;EDM*3zY-^E?NKq(g@0*Y58W?3OKY zU)DENrdJ$-ON1n>`WX2!*ghEVt=fcK@*FS@(=TtJz(PAxz^Px~o}CETBS~EEUw@1F zsRa}2_Wpl(Z)v?RNX9KCTOJaHf980-Y@alnaV5zSh86Y)M;up#*4Au;M31N`oYm2} zxQ}!e9oSPX9=2fXUiVcJH~SG#Hf58KO~%ii5*rReV_O#!rJDiNyuw9v6o_*2mE^LB__nr=QB_?y&X$k%_ z0B}%1{h?;}qYzl^9mKGE%@;ME6*zCmyPn%_Ur|)=dN&|@MRbIos425$cq9O%tLH;P zx#QIE)pz@@{z#Es9u6OsQ3h1hG`&XBMugHYuS?UAG}OFq@9+qeH(y_eWNAn|OBIb2 zCs^`n8_DJqdv`2Qpxsa)=O&=O7Od{y6EU}BqJmRng|Pb_O^e5tMUCx=?7-zcG=*Ud z+U$1=A4O>q^IAJyS^HFq_kg5w5;e~RgtRK(#Ls?m>YN=mxx=McQ)PcWO=k>T3?s<~ ztoUs|E9oe@@eSTs&bg;%ybb(jHxchO{^YxXQ1(5GgO*m5j{%Xg<{nUD387xz* zT>Pbi{{JVFn~Y^8HE>J~GBQN>Ct;`y{Qevwm6y$Tb3LRwrfJl{)DwtIuI9P>U1Np5 zf~jmKK${ecJ{4&55O=g>d-u5zTdvOZ2m1QE>kj&qAo%>UhO`v#-&AVuXx9#mWx6Qe z4xBlMwiyRFH%1S#L`{cq0)USS$dPb|breh5P?5I}1V!Lpo%m{Vt-rTg2;~Jruizr3j57Qku7c1Y z|G96BV}{X>rTx~+$&r#_`+x8o7S*cb(zMB72J_G4A}zaOJ|WP7E=;oT207nH4%XPz z<+m+}nljhR==>L#^STG?4{Uw%HOV!%KQ$ZH4T*~RjfyGFWu*a>x^D4~W89l70K`9*#OC*3_#Pa_+k~ zN6=X4{*$<(K&yL0=;~Et&7t{~8{yr=FWB4K@+-(RJYL!lJ|MziA>3q2dH%B8Z4;|@ z_l+||QP0$%#9+DMci0j2;wn4c8oTH)d+=|>P8tXlx+R4-y{t+lT|osL2z-C9t9UJh zLf6yZIsIFy2Rxon?Cm!yWJnoqpq1}~nFv$2lc&rQi<4k!mK)K}DVPB_*``LS1 zzA}d5iyxM%XPGyP+d3!FoDx|uxOUqTAm)f?VofmFo1Fs;+KNJg=6O>Z0K}wHBego9MZ_% zzH(2?u9@A8qMCl~^`WnR(HB;Ov6ZTEj)ej@vD9T^LcDz1wvArP7oBOu(j_AUt6c>3 z6A|puVD>S$hj%EO{YWw_f@qzB>|jM2f@SEiZcZv~r=hwny&CiEIEaRdF`AUR)9l7w z=hY0})C87;jr1LQ^9sWmp&1D0<8l(P+0eb*FEq=WZjW3c|FC)Zq2>7!Lw?{=LG4r~ zV?g6+x*9MUW5}0~uRoeDc3|ju*?78i8FEw@Xg>5bhMNh?Qtdx)Ro1Ujo{gl@VLmEed-o2!J@~ER-nWn?d_s_cna6;S_dyo@AoU5L> zJ8g5KIeGYvY!Ap~{uae>k)vJ8Z^7Nwyf}e$J%j((($k{CLvMw{<5#Na$H_Hs@4I)Z zb91vaXwntC3~Z>r8G-aWNn z(SNfyM78qXxl>j0kq9}f&&}uzRBi%dWi9G+MY3#=7op~vBl`;39B(5H&qgl>fqXa(SeOkR5D+K zow8q2ZW`tCak5k@-$qdIbwz+}EJMOvV|(4YR*g+Fus**9RUMQf7OA;h3FrYRmzAyU=+I{}K_yz3*Oy&C|pbq*o* zkStw}u`(}_UC;cXk5BVtEv->F-q~!MzYl`lE~w+S{)8Pllim37y$BZ;f9B#7!pp&S zphMvrF9u;t32WF0<?fGLX)v8Q!J;y5ZX}2Z$V&9oThKKa4j;Qn%|OL0TsW`U26)2t zor?!A9aVyLMdw@jg5g5qh?Y{@cxm6xv1b54P`IVQb{w2PGf(auv&4UBlEbQPFSb$XAx}9HnlJNBBc-OxS2by%GoZW7f#c!|Zp;y9xpBM@}9NXNTh% z42*Y8U)HH$O#4g8Ji^|y2B+H^eKWpJaeHWQK>TtaqCL*W>lR0+5N*vIYaTe>n}p}l z5PmAH0;bhs*Rq{-^w*qLh3x6Mk&iQsU5Jy$&ytlf(TxG$WCVbegQTtiZ$b<`Z&gx9w!Uv;eQT!Gzsp+>#F1ct!>zj zxv(-4gqOmam0(XwUr{LhD4?rC%L}fR?c6f&HC!KK1Mb@>3$TDE)=Tt_+@mZb9+pQC z1w+E~lwBoB$J56{wt~K&F*-K4R?R9iEG#UC*X0#7k8`;l@$Bh+B9Q}*GPQI6U>>WB zFx0Nj@HX;j)6CijT`XeAjc|lcY3imYz|dW8un@DiARkSyCC#YKb_5AKwNPZBj&z%30l#*ZX17FNMc9Bfl1_A z8PBgt=gZq%prZ`K0PS?qm{mT&&IOIg0n)bE5 zj;G@ZlI@^4i+cU$E0``@avo%kfc(A8FglUQKaG_DgQlFJ@;411uFv+C3K5}ekX2{ zC7EhUNkq7KA~g8h0O-yMAWB6HcpkXvlh1)D@GhE9%^A}<$27*+m8AU zoY(x`Kso0}ryw%jkjgZz$;hcIuuJxsm#Bu!+6>GSo>0kT9y~bkHCSdzK(Lsz5yZp{ zs#kkiyCUQ`+WLL7XQrT)YiDXae>YC3FwEPqwSB6G{6 zr64z>W(Dme1Icb3;xwzmFm5+J!o5)@O2Y&h+{tfI49xejtpkqA!*OQDULJ=v#d z>J=sG%`Xn=rgpy#E9ZUQ@%S*?}nD(8SY>%dYuyMxPOU%IMg#L2Q1}>d& z-@~;JnV70D)tn7NBd@^?LLGW=tJB|CVN*jjNpjr}E+{8|K5EmTslij%vX6Y{*-|nK#G=IP2qi2k8X$Pa(!~5h@<=Zr- zPi@get=S;}t#Lu*9~bAeT9QUN`x%cIgxw5lm zJAIbn0L!1lNkf%T0vXNd$ahsxZquv4k*rk?SQ^GSNE(_R*G$g8rm?}rXfFY5 zb5oGXnmfA7;)_O>^i+op928usg*5*WByg}At7(>>H_qzjZ5vRgWKA*)a&Ff6WQR#7 zy-!?zPHN#VJdF;x_#CKtddiK=C$ro+vo!=V+N+u_Y&_T1gb82L6F@nc6GZ6ar!Jmn*ydnJ8hJIF>b%!{n zV-IHI2@joLP!3aul2*q+5&zZZZ9ie|iY{DfR~*x97}c&~Lw~xuj6@@;N-us(rF~bL zs`l*_!H@w0h_C8-8K0`XA%#Iz9SPeoN(NuD3+ivM0>3?TNtHruhiRs)>pcx6&<1XQ zD!LY#2IsINjlXkL#{v%5RlfFC493y) zfwz>K74N!>$FW7Sq64k48F_(xoC3>C-tj#6Ffk8hi)!VGnU76!C+OOQsR#~p42FE?nm?^fLV`ED&`Ua*=Ocd zN4r%{NcbROkqDT5gj@g2WAWFM0?5gQ2$re{NKX>ror@{d`cei*{B$`zvS#*V zHs@BC;y0Zj$^&6`cy!q;*o~)yGMCCkRt$<=#H5AolCsbEy;gS@@Z`5#*{;V<3!E2Kg{Ki5TRMh)V9CuFS=)hyrC zKx)2|;&89Yrhj%@22(T4>glL_2?|)1v$$4CbK*EQf4DAhbzFcO0*=m^N`QhuPL)}8 zZ)OM*@!I5orDDFJeUBKA%?0N-ln+;+>?Au*b6@5MLi@jFG%7e_J13vHaT=Mz0L`&l z5>@oBqdbf0G5n=bt;vhgt%=a~Xz0fR_9Z2#B)7D@j9B)aco4`8)?wT5b9|Vi!Jt!U z;BjHWe$fRy+uUm$o6{TTnkN#kRkNNsRCBNif2R;sb|m`n1q9B0IIdCX@E!w7xCXZE zf}Mgb(Mw4hw2Y*D%em=`4+i$!CWFFOP4E#$Pe@|(O)iTDV9aux1#UP1Oef&kCk2WSL}+t-9w&Oh*N@mI?!ym<+}rXfN;EwY~|`juA%LSAqMw;pzU zfVHqDM4Pu4Von}_D^y;#zz%-pOwYEpJ_f}Fwfp0|z-VSQFn$gdj`;zT!Gj>C_@yuc zWnnNd<-N0UnafXI$4-ozSRiW}y zt6)O8)_~=CWe%EC-aT=)Y`fNyAo;MiGI*3w&YM5~!G7MzM0+XaBf|0nqoo`AusEKW zYW?eeh0<;4-Yb%h&J|GjkUQoP);Ba!4=W>fo5W&K`GZ)09r* zRkyAM2e`?C5vXxLcT;3Iozo+jl;XzA(i@GZ@?2tUG0&rK)DqS0+w2Ihn+&xqsFe$t z7Z_NC&F+ssl<)*{cHT9Wl4_!Awo`??SmWzKeIYqsaCdO^^^3m-LZH;?M(lyAr%{P_ zafJ;j2eIyd2-rb89{85|wA7ph;Zl zZo61@K)P(>Mj0=R84G&bbCSffnjz(uI{3&4hA4eHJ`S?yaF{JI9Wz49BrXc7+j{iW zk;HpHXlsO>%C>Esa|9}et)^jGPZwvWMrH29AK$8l#a8K?2v0-}c>+6uI{f(jSoL@% zXx~tLv13|%tEwx-fRRy_)-|*>vO!pGA>Hz3oDssHI5%&{DJwCqgJXZ3P$PXGoTjb) zRM-IAxS#+oYL6%)119ln0!%5VXZ*W zoj^9g`Jq~W?^Si)McH6Irb0Kde?0DHet%3SNIWIIyACQCANnCvNG4(YWnH6FfY&8Y zQ{QtMGewk8MqnXt8v4t*tRv;AgZ)G=+(&G9Y?PV8JxLByld%vY)y(uQK}P{yK^vUr!v(QW3s7=^}@=_Biw)`ZD<&A(=>2 zwJz$f&0da_MANP&u+gJebh95)u8)i%fF!lXG{{xUHd~6;fzTa|o+5xPLGw)XuLZyv z;yFsuiq{K~rF0~&?Ab4#GdUWfK>?Z)4<1;kv(NQUxuwA4Dhxs?Ho%HX>|v+DQSJHzG%MD#k>f6Tukm}6(0pIV$Z*a1-^J=kHMKBZ_#&V3m!pFM zpYuWG^N}6vw(CU&QoUlUR5~t}h|K=NDZS`O)~*NWqcqz;BbFP4wbqJy&7d-h$3?`z zy&X#Th+xVVFW2#s5935L=QxKt*TPYi%eIgYK1ih#NfuB(DJ@c;rYvS%)OBwE>C`YW z`I0R7!{5Q9rk2hqM*o6@q778e@Vkm%=wo#SLWkU)Zq7(p@RaykHmEFP^)a-9@)pl_ z5!Z$ak6HU}()h9Fu-1p|nC5^}SJ&wh!26i z$`*3vCMuG`1K_VY6(FPPBbcoj$!-KsWNyrA4TElWu)f+N)$E^5l{7q*Oxl_qypH4P z?$-0Z^%oF;f|fBMj_y#Sv1qDR*^haY@hcYfdfuaHe}q_mDF`Wz|7T%3W>Esr z{KODo_&rSCPxrbwuUHvjVZ=Xwr{;$#AGldNy*UW%0;H}n@igFV02xHIs}54Gf^$?x zyfvJ*@aG+F#_}FPY(6Mf39^yyZ^P91^}0zrMS$veWZF0i1$u)rq7QJsWGlTrb%6oj znG-S*@tS|Qeh}}fA0Bil5G$4ar2Y?%~Z}|8P-5on5AYuPqk}hpxKnx zAiFCi#A0O`f?E{xP+QzrI`_~kRylw02m;7x9{FgT&_Lb+#ni7q_v~GX((r_DSSjxJ5pm_O zZ_q-9mtxvbtvW0b_og>YaI$d7Mjlz=!s5UN?nkVzt?_rX4iVktrIe&?_^>QsH zyt9800Y&~RsKtd0+{%5$I@DNO3nQZ4qkBdq*l=;*4F2(qxnIae3$s6s;#q8^6v2d_u)PV9(vIeoVq*c12c>k(A*QonJ+F3C(Irl2GB)GD5XDdu z`jHZyl6K>oy(&?-41S&QB1DH$ratt+l?W83{Sapy*dJ`YhKLCmZ1y{qRt2ga?b5N>Q0_WI;hdf(Uyd86JS=<*cbEC#*V-EG&f zeEp=@01iq}wV;^}QKC~SDA}-kC$|Bb>Di77YLjtau8}}`n&#J}DiEYSN$^|{+ril( zn+@t#5LBh>%~ZqzD^&u}gK)t^W;|dn_UE)VqyOyn5-`6XpbIUhC9A(sO6@)@duTOU zR1n`Tn&$`7>l**iK0vzBC2QtPTI9XQ;2be)Vr);4#qRY^yy6AjoVWD3yPDI8ktRUi z_gZ#YB`SwDP+|s33XY;h)gTTg2^~}<$xhiZTN4*1K9v?Xm)6f|+!uSYJPk649(|Q> z#3ghGF{xm#i53%!i~1(Ju(adCOi1wNZDdxo;9A8`pID1pQ2Jx`7W_m^K!|P-bdz6C z;Wy5A=a|sAZcG%0<~rv%&QPeX(4}z2b~-oy_0Zt4l1xe?Iy;pQ)Nv3J1N79xCvt%K z1f=a)*kXo~{LT!@HXz(~ta4$M>yYUAOAQmK3U?+urCbW2u9Hz5Y?h=Ap3j()Or`OQ z% z#^}S)3Ko3YGVdS-jwk1Xg}EO+G1 z1<;Ac>A$}O`?B8}r>=6*NNq;iuET?bKq`pj6WYc(lTv0AaHygq`KylFA~ptJ2d-8* z9FIs53FE`Y`=8waP}R$;mz~1jP-;JjoIrW2{+I+#coo+OkV8sqH!fey`}pl2zNb6b z8RcGV zl@7h4(`vhnzPg0}rO(Eov*&SXzU}8n$UV1w|Ht8>o6KnD`C}@dp^U-Vulb&7t0!nR zPipEfI?o|tmRX(<%F!&zd2Aur8`6oNv}erk8sMW+W=fnHyH{Gsv2-CoyPbmKw<>04 z@$4@h)jAifgYbzCXt`A#wp2l%4DEhs|6M`Q_y$fBk?CxByeGWh$gJhvb`1+`ZFUU{ zJQ%oydKp&mp>igHfrFtCnIDU6`=}qx7$CbzlCWp2U8z^f-k&^@)tEb3$>ca(G<{ey zeOr>qw^Nc5b48L2$TK&TIP@<*ACJ64Fma3M!GI>ilI0gfk>zirYNV=fb(g}=qbfi_ z`AyYAPCavY4bXP!#-LYabX+ewZ$?-0L2KVk*8XYVkoH35>2PR9Qk&@^VRcl}{R{tuaa_ zVBW3omf{PJv31;B(xs>`GxHyk3;i;|Q4QU%7UpVni?j;)?}Vh1CG1~twEt+@;9K3= znd`grh9ZDnPkuk}H95!<)W)!%tnPv&KQN$r97T#Dp|L^G*jIWgdmv;T_~K}P+@v;T zeA*L-iM&|#)H3CBDB}r^4r2iJzBPTRp$PYNv|5nCjik7ak}u*4v^%`ZUf4e=o3n5h z!W7D|uw{dKD>keSI%tn;Wq^h|RlMd^+o3PG-Prn>k5w-+MO$2Q#=%bo=J~tYBFin_ zS|b{Kv%io8|HK;sp{AriTpPi%sYb--t>!oF*DTE)IbhXj%Cj*~S{mm7dc=ma_6}EV zJZgsRDfz+RWYSK-5de=C-7K%aeiyaPBtRH@?E#sQ%Hov^^)y(mF69^C-8&czR!h?Fk$8T4|P~gY?t+GrRb)wb0_SA z6nM!E6)0*}!G{Bp=ogvJ(sWHprR<~XC;R83U6JiH^E?7T+x%%{33nsSTJndN(o+z$^A-lM1S8^bZ&Ar3nj6oY04 zwrj;Zs1mT3R0wAX2M~>>vH_0?jvgYpg<2j<_{oJRNW5%(X zS%tEB28&M&i!ny|`!8*IkrBaaERCy3OuvFimGUl^$4@JgY-I8PqN-N^ zAP9hOI?7HPS2ak}Z=I@xLcz7BM|2)KD^^x&yIyj*v?;A^cX zTiXGyUbQ0MVe`RQfUzu?K6NP~SsG$2h4fFzdx8^Dw?(Xmo2w}WgM;R^3Z;}C7Th*o z*>&tuPm#H!hMHLwlSSOIm2(t_v)*OG-z>5ol8eQ{($vc$C%ne>@lU?3c z)f_C0m2RDa(7b*cQAYZrIa|7dMjjb+LH%Sp8QxQ#6Jz@$8xNm!~@-H~Dy_3YK!ZTC<4 zrj=dzx36Cn)0(trdnwrIR`KcvM1{yA1&%*>JTK@id ziraoJLd`e$4fHOk^R%`;w6*0FVI|kHFRz3ZYW6&5W@J055qJ(0S<|fq%rUnn_*-vT1 zM;!v_N!Sict*#F`7k24)tOsl{(No`g2MvC#5Hx))=R2ZTg28bcgV`fi?AjeA{5GBY zBU1|1dO#jV+#|f{67_O@Z5IpRIr~LNQu{j7*U|Hz!JZ|ou9-5&#=p)b53MiY&+lmT zT%}#K`hCU5<$xSR$1|?a<_fB175@$zLBq5CwCy-P67tO#~Wr44r?Uoc(#D{++87fC8m&7`W_rWz!!@~RXV8_xG zN-0a>zXNO20SP0%gyhaEn|OkMBTfT6vKe@PdMaq-F3e`oPkQ+clzW}Q8&mPZFx(wO zpeg@lvGtEKY;9r^Q@^woBmIW8b^F;W{Ay*EINufs)Y#Y0ZEI|K2|+&DppO|ip(aSk z2x$=f17;xBteK!Vt%v5u1KQO>H=k3kg3ngAfQbE0qxQlL zM^-0uH7Hp|=L98QNz-095c$?)TN-@cAwc|tShX8GvpzJBhoQ*7v@(WA=!- z(!JgtGsr+vg6hh<6y`wCJDJG$eZN|zNTV4%tXEU`JoeiHr<5N&Z8B17_5EK$2{n4c z0iq%`7N&N$>dwYT+8_R%;g~eo>iQgj?6%g0ykrHkB30dgDF9=RGs-IChOUV$kAr^Q z3F7Q-5MDRPfyiJUE=vQV0Z=@i6*wKxoXDCv5gl~e`la%b*&}hw^=-*eF2J#EmP^%I z?h#W65$}5pq2AaVL`Fge$aYKi*p?SxbVW(`6*~D;b;RLh)Rd`p1buabP2M20L;Gg9IDOD0s z>!q%jOjC*BoT&4mi|iiRc+(>qKXBBU_Yb?I6a88f*`TVkJ=$`9b?5cHU<}#hpWeHB zbP4`mzh_Gk4vgqYKJE^}e3a)a5r1oyh&0W6C36;tC%~eo_$pqnLgOR@{>x*yv>8b- zF-SHDa>&VsRjF9h>qpg^lcKs~vIZTyPDhlq^!SQ6Jx#c>?3b6*a^ls${2Tt^Ze=QFj%gRTp^kkiP-0Pp@-IVuqh`2kFJ-~F@x?h} zcM_#`{w7Fyu&p{UXZ1xPW?iNmk&qbnreodS&%V}W&(uKcCXW8@@+Xl8!_?{Q|| z{kwCf45&8K#LqJ2Pd%-Doa!TANL<^|%+;zAwvX)Wz!{m#naJTV4dCpmXO}Aksy?gy zRmyMk`nz6OOkRF1u?BfIwTt)+>a=I|X0!UXQdN#iJ}ksdarWFwr`!1r7%Gx}|AUnO zuJzeR7`(WL5)RK6m|>wzWZGJs}N+X+yJ)k*)|8Uw?t0et)A%@W&KNwz=KjEnpXSQ%`0I9N7f=QCJD$OWv{46`02 zPFfXp*I*;rTU-L5yy@-?((GC#N3Q);gA5m8&8W(b z|NDzytK+jfMU4NOsl8;$L5ANH3kS8te{u7hAn07da$i-`WR@=c=Ov#PAN^rSmXh%cFHxBfHX;MsOPwB(qaruaI8z(p+2MM?tE~ZSfr9TLHh~1_GL4 zG4*YAaStt_FKU5xR>qeYPwP(D10yfkxuZ{*10zr5CBjB}11hV}X}8bhR`uRBURT~> z;Q%E3xU&1(#qz^5%A}G8udICewc0=9u3RC$fRg4M1Y7_Rm6zj&V}wmLTwi?Rfv@;0 zbaU$ET-azI0FUm`I-`Za6}VlQdMM53=Xq>b3K%I~A!%-X8P(>)u1i)JNuL_!DE06{ zu085XiIk@(tq+N|ln|(L`+C~|6aiM;$8nu2j(iEe<&ZJXr|6--naNxm4?cMwb|nv2 z_L_ipY32ImTYy&dc&mpOy&1O@wD9~z0 z3=4x++uM5&1KpBRpjsY09YWo@Zo$VWmacts(^4N;bDS5mz*XuIu)J8i(H!YgHR!S; zP98WzAXHcgYN{;uvm*(>_`ji^bTe%TJsugBX&?IT?WWCKavjy(?g=PTmtX zDb4FN@P5JLM3HWk+xz^!Z*LcG?zz3U&RVV#t}obIMDvdWi&^ZZ?+b2qO~e~d$0?h zeY*<0&P;aU0qp-7?eoY*1-`87PN`Oux!g1R|19X}ktAP7r~>wt%XW`{Q@I@^*08&J z`sDX4{$!$!hM8Ls&Lg#j3^dDKe?FAeR>#Pac@-$qI(4uHM!vkWSOCRA;e_?PGQb2E&G?tOT zMfLvCxq-DRmGX`1tA+$JkZaxiHW9)W!^L6h`CBAxhpRJn^59g}%w`IpJxGdYWSRE_U~UBL_C z$cSpspwPwKYY-*X_%G5z>U!@XENRC7`WESzu#a4?>skqOlQcMVYu%S_EcN@(t z|BLQ*VVG{OZzq_Kg3bc-Dp{YI^c2CO&u!?wC^=s>IQ_@GVi&q~m4^r13*Q8^I(#D> zHPER?W?jo{Mc#>0T45z1K^^%P;&VJ&$884 zOq-q?!c;qTD!J!*sK$)e?BeN&oU2?rC$o_Ob^gMURG;N#@K6f#2sw<#L_P#;Ipu3@uIBHQ%vF7f-Grn!V~?=Ex~l23wD9S@%QF#-&&jJ zWNx@7AfZnEQyfX%3O?nt29}{HN^SPmw_|fJ>DOp z2^8+D^-^=gl;0*sKlhzXjN5&yNgxBViLKGDI>v3=r{6lFJaYVzTJ!_Zo!!c=K5#%9 z@48-lETo1!eaK55LiKvkCNT)-5p!R87$J=>E?7#Y;8h_ZFT6x0>J_n(Nn4qUEqNAh zA%{zeYDeOzm$|Bx)FSUU<%?0ZkN2HTX%4w=beBrQ#fFj~U|%iCy68%%hT$8C{NrLH zI$IMdX3nP`P06+Tu{yT(LJVgV;VucJFZNyrarpJfU?ssSS?~;vNE|GeLNC-WH0hx) z_t6@XVh?WO8fhH_x=-`Vy#td>B10 zd#hcmT>fKvO%rq)V%RjfCo2z+R^>X$d;omaap_ za+@q|+n|9Ra-H{R8Z?Mzy(19*&Sw-U{(;KCSImjY=3+xu*#+sEobOxEklp++V5BhB zR;fSu!?oXmR$yHP$&x0t5`{-$AH}->Qhs?ol5$d{?R$*Id4c^zvp;(Y( zTq54|)<3#@){gIR!DFj2`%(V>X+hMCbF*u}BR=FDQ$dYquWGjMnU^=;%Wb%wlBC8| zJK>mDbvIc{Jneb&8PImM!~VlaIvU$I&idgy{#HKC@@J+L`?y+HcsC)h7r3+2yjHzT z-RHyYyMDjac2uLz1-2dHZuj@H_PmBs=j4<5lEak;5i-azH-S+LQ@BSjv?~rC4ZF`F zZH1$qv46ZUu9tTMeH*l6fo_@MdWnH{w_hX|H}7p?fy`exJDMjO4`iz9!I_n4JA%oL z27&VDjgEmELbJuN%KR@V34^Hww8hmrc=GHSrh-=vi#Nmw0i2n;*Tu zvOQi`fUt(2nZN~D+WaQ<_E7m@J$K(kSXjS&fTJms16u^xJI z6q^}T!MFyv2_59fhxu&Z2I3ed7WS!F-wPfUk29tX+nu%IU7P9;sn3fZKYErJ?JHpC zZkRcw!gFb12v4GLPf@+>s2JDFEy4{wP51L zZ$9pY$C;3}$XWE*b+90sVoHB4x_ZC4Ek3NGhWBzQc_0EQ#@jnTL&L4t5}e8O%qf&( z5gKO-TAH|+>CLm1QHIUyT%DLk>6BQTZt1Y74>&KEEH$`CVI~RZg5~OuxAcA4U?#x* z=*q2icQbOwW0H!qg#$?Us(w=WS>7kCk|!S(i=fC~z&{IiHl6+BgMfT(4{qqqkeJWa z#beCwxC27{FO!04JlI6f8ZKE5lWhwJLPL>Js?!GAV*5RP286H_I()m+qk;pPPhKPx zv5AMp74{o@0fzBRm+m?K$Xa~L?p7aa6h3bb6VP}o3bogWIzBp?aQ8@y=<$)$T6;Vv zH6}ldkN#{lpXFOVKDxal;t@0a?R;a)|ECjpE2|uve(@bdSW)oRFZB{fi%{ofi_`o* zu=E+XBc~V--;VF&IXL`W*te_reYbm zfyuKnWp+d(FFX2ASNMNysKpnq4EnC??=lXN2XI2rH+LP9C~cZWvn9l1+h#{M*jCGJ zgcyO~HAmDPi;oH!jA&QG*S4{YIO1_TeZz@AeizA&a;tj(u4fvavONcE-;17cmzF57 zsavm~#Ue74q$#mQKlB=0%Bs-usa_@Co<|hvin=3+oAb7}2HGu)o9RV5NLIPbGddth z)qXbiDm|}1$4cDg)27BqFSGe@V|GVd)h{#7>Smc@S%z3Ie!!jdyyp4Tyoacd&4=i5 z`y&#s=wWnr&cce%A>(O4Ghcv*^<@c5H4e9e)(NT*+{tC8_VIKo1OLZiDn~fsuU^_s zZoo`8NyO@NFOT}>6AQGB`XZot`a=N=jy^s9q7_Sa^DTZ2>%sqQ>;=!|uSh9Oujky&~Eu&2AE*8*u z#Kdzyd`j1F`?4vkScvc?t834aoom}kz*~DXwdP9Yx+TS=7yF~&99b{@0`cSZfFd&y zbjUN+sryIo0O91eZ`y-Q2PVUCt8t#J8x$m}GhbhF&QeR|h1fNjE@*^-w<!g0xDTD(?neeA^=*|))oR`sq~nk@nCFD`ip#~#4_#u?*P z54Y@-*-19O&1#@qc4`zn=HrA%$b*B;fEgS|L)cNLqqpu#TrdA)wh4XiP+7fl>i}Rx zog+N=R_)jA01zZoH}bzpeVmE3nNKEDamAxR7=8h)ZX3M+O8Xk4NeuJ( z8;JaGFOOFX%$XzLF{8`=UpMN%u4ckKD3Ax%{<8e+KU`82W0)WjaF^ZS{Lcp{Qy?$a zuZDXSr~mHYuXk!M2ozOop|F07Gyl)eU`#-5eO-2=;r`$LEikw%L6DnSJGu_(KOaC= zgW5{7;KfM!Pj~JEaobugy_kmI+k+GJwCo-DBsywg`$GA_!h@*cSBJc;!nnh#^C~lh z5$?!9jkNOA*VP9edB{_^8~Ss2Yxl?5Ze!#XwX&_hgY^1}$$;zp+=Al@t@4q1^$1(U!k#E$mDd_GI=p?>tS>Omf3`$ewLfwV7v;F}5)6^OU z%>F5G%>pO(b>RGaFbeo{fnlU)ii6$mxMe2;+}tFLrIXtNjh#@BK4?#Rr%f8BbV|lIs!T`hSxy;uX1D6hM|pCGngIAC(sQ6}=a?W7;Wo0}JaC0nAdD zIXhAwES&VkO8EOH`fpQTTK(wMg>{>@!9IPlp^k02pzWH$>j;g6H&*P-`c8SYz z;VG$5uG1V-L=cuOEie(FYn~7GJ{`S0G2y{{o4BK}%Nt!~}@rLi*JTRj7v>~_Ne3z>`MyGd|v zohStO?}gQQZp`Ka3irA~&4sJl*oavm<~Jqw>J)!Ts*j5P#xpe2z_qS%h}PZYM9j3nO zfS765l#l;Z*2$Hd&|hhUAp!{<(Nug997wjjBB;Xeui#Zu^1*otUC(ivu6`tN%j;&3 zGSLz6J+wQXxix$$68bUH$p>6RMDN)@WLa$r-88ydqjPN+C{pDh_^HNv&8{_#l*?ts zN&?5hY?NWO@P#HJD$KH#6&jTaUi^IvaN$y`)z7+o_o$j%Vz`iD1y>vi=Pd}t)(w9Ho(h6)3Y#ozMR>EfjG3_M>-exsEfFD zztZAv)5;Ik9}lPwWmFx$x_*k{fHqa4?_|z5(ZrNWeUfw429jfpwmwk-t~5_&SJkk= zl*Qdf1o|BjS2V)Yo0Wjq@Cq?e z3##CK6=0z}Pp*>acZ!<91wA6Jbg_FX$C8z*$msHBlnaN^<@z!%;yIv_KK)htJj?g) zjaDlp@Qss!_tHjbH#&%FrAeK_%2R%15}~5s+94f@oqOqky**u>OAcrTy9HJ|@oJ-^ z#IvM>5+>M`GlP1f!`_NoK*UoQ0Wj3{I>AF|2W&3z>eW zr^%E~-u-m;XLopq&sLf?VyU;O~H;<~D$Jy0OoKclTak-Hi3T^r=7U74EfF(Nai z7Fwu4DK8aDd@wu7PT7HV?qf}ks+iFS<}yvt=F{#wGaVptM&cj0a8mjdvm!b;n@EY0PHx*KLs;-%W~R~})Nmv97RH^J5#cc;1S6ZY z71aViOb^#N72(lNR$SqcdVFkSjXv%TSim53BUUO+K{mbaom0sOOT$S#INzIsFis^l zDb#Ex{CWAKYf3dt=vYYJhMAtn@TO0w*&14rC7GfR(XVp70$^V98BM=)yOI?%GMBzx z917O>XXN-=0&ZET{I`|3DsH)oCrRNN5zWqb^>p>mu{Z%3i>4tiEf)p(vo1oJgya~z zqt$5|lJs$(b9Ewb`aJ&G{?aKx8zs7WjQwj2lYtoOYVADZsY_ON(XFmm>``?Ba%&)Z z?SYyux_>!wn!(_H_km*fse_V&p3_^X-}>UnnL;E6RSgypA_%Xh$#OTHqsPsvML?@a z(mA4zS#}!Fbv7-ObZv7`E5Ryv`?8nE6TEs{;8!hbk(R9#lg248I1h*odaTLVQ$tIj zHm#D@XR6qG*ji*?kO_ORCAaQM2re|(m7$-#&EaAB=-k6*AEu)=%1z1g^?>qc*ztE+ zVGOlF>TBezj%4%Z=?{H0Lq7XS4e&K;k+R|Ag8{HS$}fLI@#rvWU|N^BRg%5luc0Uy zIxuzJbii`)AyNUoqc**14c2&a@IZsGPsnWp-}qsH)~L!0c*UlKRUcx36v&i>otLKV z42~x)(n!iQ-^`F8Ur~}HG4&&|Y?tU#t7IV8Y~88L+-IVJVvZ>C?jON1TeOC}K5?2I zpTVc;)Em!bOIZ^h-40m*Wz@x78D z97w6S!xYzIBZ*Ln$r;vZe{r ziG)U9+zl4%&APZLAU56gnD)hR$=CKtlXXADN|*RqoQ5;0W}ss0Y}FZ2UO6I!sm!;W zZj`2s9BS+NNx+sCH`6&ry-#tzM;w|sZQo8zD*Y!uM?|!C`p7{_)g$&6K;$=uk1t+l z$~bHdo$*frOKW8*o>}>&@A0!~#a0{I%rWnbQOzQ2`#OIXKDVYs{Lx(RzL*@F;HI*# z&+*4l+rU(zU#iPOB9*lkxOTP5L~0wt`xXGsZ%`qSSA&EC)@q9_S}n@{FYE`JnvkTL zmJOdFEGA;XHA%{0VbSWF-q!DU<5rSR&bbV-2y?w;rs{`9G~gEEmvz^v!aJ_Mb56f0?3f4DcY>($4#}o&N|} z%INHYtW%_B#@hdhGkDwGI%zU)WR Hj`#loKtPNN literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/double_0.7beta.png b/docs/reference/search/aggregations/reducers/images/double_0.7beta.png new file mode 100644 index 0000000000000000000000000000000000000000..b9f530227d9271a7a889f7e2ae453b44e76f9bea GIT binary patch literal 73869 zcmZTuV|ZoD(het@*tW4_+Y{TE*tTs?Y}>Xb$;1=ew(T$HeisM(=^uOb>aOapF1^(& zOio4&78(m02nY!Fo4Bw75D;h=5D;(#K7lHhuqvkYrwuVh^PCA z$9SQe9zZ!uYeGYMK;PXFIeZM&hR}5Q>4`B3A%W1kgF0R4573-fdZ$-7o8~zo=fQzw zMTp`$fpgZcC$P89qaJhN1eOm2xIF_9=7hm6oE`|>fO^)04^QE~eqH~01Bqt$0?)Vp z5hL5&&HIhyqTAJkW(B}Z7ocx60f1qNhJgEjmkd0WnEAy z!Yab9v9;fKcQC%IyT$p9pTxXd!Oq=x2dhF(u^gadKsmk;-xk1nhqUri`rghOybaWm z+*Z)0#8O{^F_+$dS?_*7*?5;n%*u6V^{I&|jytl+9u@%J_qikm=z0-@5yu6$p`RI? z)bt_Gp>0{%M1MC2dkB7BNqL>ekGP*=_{bN6YhK@`@!bRm)OFoO!;N8KJez3M6mXfP zgHrpm^m$ssx>)LoO~{GO@U3!lJKZ_6_uyp-ICpdOi94EUel$r#L)E-i!s}jtrLS2} zHF;Dq4IIZ`d8a4oaoz{^z7S(e@%L=wOzD^63k*H!15H0C5L>oDDGT4h9W|E`lWmHTM@+7s#(-h z^DzJ(5>B{2{~>Q~vS>&VxwOvIs9^-3ODB!5Q~$1}O--nRSLGG}?-J+)q%hp`&^`KI z+wa+PtG90Og<&T}wFk`XPWV^H0R7>`&}r{(Q}V_J{PVB)kAV#I6hFIunU9CVRfLlz zK2B4@z(x=a?8@j%v6lSQsZ)Ls+L%aTm`cA$Y2dhb^1jP%n)lREpNHmw3%%4j$8be{H!xkvJ zbgk*CDjK4|zLhs#2ak_>onF>LV>};2;zLiBG%!E{FZk;8_HatZ#Cl#^_^AkWY)eW& zEe25a) zPXayD7=&ccNDZ7NFjt=rJ!W)};ucN|WL4lvT%$K|OrOIgG**N$|4klg>?5=AO77ZJ z$YkxI;$g%gqa#Zj>>J1%98b)95TQOh!#0L;B;qf`ov5&W5UDmmJbujTl;>S$%>JGAPzg*2cvOsG<3qUJF!$8A_Q-nK)BZa$%3% zkAcS!%Op58C?zTdG-W3Bf*zisy3V%NwAQt*phl_gww|WmzQ(-Hy{@QEr53KPs_v@p zsP?JesAjGvsxE(NV6k$McnMJ3Tt8t)Z(m~9;!t3}x`nYNY!_~);efYAvwym+wY$B~ zxCy=0)-yN4GB`d~Iea#lIj}awGuqcvpFEgM9#I^2Kny^xMzaC3p|ioh;KnDbYSjMOe(F4MQMSx7BD<-&|FAu=ZL^h)zKKYW%uXdoNKYhASVD71?5a~EWUDT& zHY9LZAQv?mNnc(QXWL*Kbcv0a7aBZ77)KNbEMHMlSJG|jXIhORAtf*-Mk8AxhC^CK zaZas6u-=SauW95md7ih{hopq0frN;JigZDeN-9mdLux_NEe<8lA!#J;B>wh8B^EB0 zF3y`)2w*^i3{V8zXsbxgYqQEa^G8z@16-u4RL;{oS~`kfNuQFTzd{>BlcQ>&u%aZQ z42LI%*Gqs%R7pHZ^o>N0B#jg&WS4wbYEX_YWiF{Gp(+b5U6+3=Bg@^dcGk8sj=98} z;34%On{_FTpRF#5x!7AfX?wU{w}a({{UI9n%R|~*8h%`1+>BP9mah_9L${%@ad`Qm zG1*DUN#8NeNlzPeO=-=V$DN0(Gs4~Ax&5~Dj{5ch7UgR+1~az)SLv@zUqhl=B3GkE zqV}T_BMl-~q)MbLrRdT+*ie~B)92H3$0ElW$JbL}(jZb7)PP`!BcvkaCG|#ylHe<9 zRFYKORPZZ|Dq=KT7U>sp8d|OE4a=?4tzVX9`sr4y2C{yw4|J`1F9|GcW%RY=Cm2NT zFp)cx>lSO6WYl*$l-_Lfc6u$n6ltMo{m=@&hq>VE_&5T-2L*<}!zJ$L)8$FRx z9ep9PVdEX_9ppXwj`qIr$_9!9QU-G6SLNpeRH3Oa9 zc3^K{oK8=Fofxjg)1C-J1QHxNkbnZJ}ufuy2kBM^gGW0F&gs(!7 zUVcQEkEo7HE=kTvuvLL;V*~2xET~nf0NuaccsfCp5p@_m`u0RGq}S3L6Oj^C%Tmgx zt^G8&nt9iqJ9!)jcV$Mk_u3O5z%KSSOj@!vV3oy`Cl(%OBNsU5?&mNU8fVMrHhc)4 zHZ?$Xh1P|NLXM&WfA#+wFFGt5*pJ%o_C^hndesYR9P#WboyxhdGoRszBb1r!PD4D)p_UT@r?Dt@{Hd(>2>AW zW4gJN^Yncqc5QN0FL~=K84qBh1K#n>io{TN&v_2JFMUjLX8IfBKy6BMs#$p_d(17o@0i4W{;_k3AX;Zz7zk!E8`jE+RTdXh-vNM6+onW z?lt$i%yPlbcNg_)b@Nh+Z^>6hI|OK;(V&Z_1*fZ|Eq*tAQE$rYn65pp;p{|fjnMFuFb&H{RSgh76Nh_D1!M3%|Yw*?eIE3Y7t(Cvd~@b0`0J*8XlveFoHn_al%u zcX<=60ag^5I?`aUUf~j9mU6uGHO!>+#B~3V8zjE&c13-43E^c4GMOepYdPRTiu?v$4&_3PyEVMLwrd^L zD3tLP4I9VHws80JXA@X+*uh9bWMD;5CcYcT2#+K{rS~%Z%E*%XA_QlKbA_Y({;Tyy z$LRHYV?a+LzrI7f?ZS1k@R)#)!NSB3H(zG1T(k}oCM@OR4sRb)IRJ$h&L=Y zv`r*M#27aYHY&oIWB*}^d!$0eHbWwNnK9Xo&OGkxliG9fE7;4$GqR^XS41{F7IJ!!Ow8O`_l(<# z`;g8BpkaU@wwZiP19Jmp8`+rhC?9v#$8~p*tdg{bbgk5p=b9IxnTEUD-LWwB(xu@T zqip~E-F-AZRWG{_iHE!Qj=k3q8?o+sPoGy}FGAC}hTWk4sa^Az;NAWG`{x^>W;yQ~ z8t#CusIWfH?b$0Nl%HUFBic&S!pi2@G`?@FvDBFlC2n5O*edK74Wx_q+g+0I?5V z2r(EE3*Q>cBIG8x@bgQbjVOso8}59>k7zXZpH2z1!IVk&825_`t1mm5SIx+E$i%Xz zvW&c?>6U4N5cp2Hb9gLnjc<1;5ZAB} z_*tejR_3PGo{pV+mKIW)h90Xsx6%IO1F`_8EH|`uqZ_R}>@}WghhnzMiRz8xpmpE% zoVnN)+sDy~aqe`_-$=evj7|c;w1>aK#W_QyE2R_%W0TyqCk5 zVT0cX4x7ob)r2>M8$%n!-omuPbkB5{G&3|CZ>0}6OW`*o2XpIAWl?cwcjypSu~4^2 zdr$^jjt%T>5Xe!9KYwpmZF^A^P^ds=CIl(P30s(}W0|LsE~`3@-|l@)jbxBoADgE+ zuE?{LvOGCu!g&F zkisnSg;A~K@x+LD7iBs5eyV#4BWkX)n$ivhTDhA=;__%^{1wg0xT>T4!gAUIp>h~U zpF`;rcua2kF@|eqG-f=uVU|YvzSPDvay1O~It}jz^M>CISGCIZ#LH1j8;6SNAVuN28d z17FmvIuN6IEcXpC5Dgd*O&HK(8PM#ogfC+WSnNkw_X9*ANtP?gf#DZ!ev~2N)FMh- zQO1Kl0`Or98(+~$cwS$pP&}j{=D1gS6wavKfU`+3Xu+%826?=ac=4o#7>%GOJtlT6 zx`2==*9Iz11no%gpxnU}0!(#kw4dhsJ2kZIUw>n`pgkZ>guwSK$Zt~MP>fKhKp6*8 z2O3E@j4)C$Ct@p8%g3e&z6x4NuK5~rVmA2RBbEyS zZ;M7F30IC_L7UF8^3{Ve(JIU{_-3&p08f$jjvB;1EGDN>l#jgiLvZ!4h`ZwHma_T||jkC?lGe~jHP z*Z~vLv@A8w)WkZj#KePM%e?*R_4Abp8p$XUCyy>)SyD|;ZV7@PRKEF*nQIv6CV?yIhBOb{Y zV|2RSR=_HLvi|Tvzo9+COe3Jfry_P-3`j12%qrYYJ2JYaqAp>!>OIK>iG{}`a3<=O zd#dKI6IWU{E~pYq#aHa*pOqNol;@FGS6hDn?m9^^{}}!)=R!o$f!_POGT;bjzj@rx*!_E&4TCvjt_ zIrs?3ZZrn_6xI|1 zBo(XVC{VbAYu~nz-h2v##X6-O40O zxp-fge*Eld&OJ&-CT6lhTNk{5M2=JhnF+4vaFEbd^huzDsBo+%t!lQWdDU3b@jH$Ujxz*hs2+QHiVmd+L;Zan-*{2aMl3W6L+??3?w?GXqiDy5jvq<;9>D`rpT z@dmmENk@|-(!*>fFa{*d(UgYNb$YQ{+j__PEIkK^g&eh8H^GX2-h})I(^RJbWqaJ~)m8G=gZ_8 z2>`gbxX`&U(b?FW02nwpH~{pF07gdI&k?i^Zq|-^uC&$;#Q#s^KXinR91QHuY#q&P ztO@?0tEX?{s#r*i+%%PD8( zYGkP0glSpGcoPjCJ`lN<2I2L806|CiRk zdOyX*3(XDqujqN9b(}_Sfq?jdz6lE`xdNYNLbxU<&ks%%{1D{iN#N&)?1Utu0zs>h zr-8h03isEXwOPu`J2|f_KWXzPy8i-+Rs#a!pGX#(FbR?ygrp!YPUzI-aqe}IK5VE! zmYOO>_VL0!*0DFg5$JZXIzyu(4+##&5A@fTBM)V`yUTj~*u~wF4FJGdfC3``{k=hU zJ!1c(6XDVf1i{nmd1qkz??i#X2p+d>{>JI^v8Nb{|MKoA20h}RTzoepy8q7ib0yhi zK)nmad^$96e_i({H3A^We=<&k^P7&myFJf4GlVTmsb`^x-1dOahY7jMfh=#_4o;7$ z@LsxIJGq!%?~rjEX!tfuI_Y4*RnH9B^?OoK`|n#fH3Zo!c9|@*>8|3803=U)Fvq^X zo+Hn4HFO+%v1wh6@`*}zy(ZOFPBHJUpLD8R29?%h^Ljc$fe--wrJWOfhhL2 zz|-{WU&wt@j}q#srpcmfd($}9)sa!v_M*1RbfD&Wu_A1IyaHXmddZz-yTm#Bcy@B% zN}Nj=5G?TF*7lruyWK?nmQZ`&eY*h8ReF{KVLBghptuxPeHhm5EexJnm0xTuT`=U& z!+cS@lz>E^QN-ubt(dmTm>cKqbM9k0P*sm4wW6~ynd#FK@+_7-6IT5pDeGe!`K&da ze%<{X=}q}ioO2m*2>;{0rm-`(IIQlLFm$~{5Y(-lW z%i@IU<-wRNV;ra$83nK#FpZ9(3F0~gJn#_)tzXlD7Lq;ho&ydyT;BYg6Q1RlN_iZ~ z9uVFx2a+4A6@551wma}m?~$p)SUR|LGbDU8Yy6<|+or+MN#jUes5`F@wQr@{@G~E` zV{T8cVH4KR_1pH|HBIH*$gGe~p1ScFab&7X|>6(|#bHi^>_VR=&nnz^eH=%>{`ObHt7wO$q~cSg_>#7idx?WdC3 zc$=4IVcp%uN=#dFqGIcZ8K(=Y=p%&757aL8XJD_Ua_~f_BVv}CZtvPq-lr`T^^yF; z5j|Wg+O^|k8GPOk=C_NxaRnjGqoVO|dMB(XW6LHM7Xk}(Bpf8wmuibFGuSu#Zb9yYE4%FGIO6j{KMsUt6_i(dFR6z^6f56^^}ER ziEMcQfF4X2nFsFgPA2CFp+@7nL>Mwq3pz;m4Ey6D<2K^1`94=h57jKl{;R`bPCi7z zcPSzYw824%mP4wg`!RS6v+i#EsFnlkv^2-)@aD?GY=^hfj&Teu1nnt!)%#ICkrVG# z(ey4gDK3e?*vw?HRKdk6il4tUP$bQ|+$fC(8QgI644Bq_4hLjTY(uR@sBLvTWS zA9*MBI*h3X6p7$_NmcW{ptn8zZa;k9igWyK@(BgnGL5@m@Wy@Y(6spoGAi~;nI8)A z%~5`~+N5wYfusE6l!(A+f@>`9xP$2-h_@Tp^WavUQnG6llT;8 zNhA_(2M$zd#zVlWt1@1oBc%;@wry3fsLx{nIdc~=N7WVUySA1xr5ZLW*?J1$P(Tbd5a$*)B3zCeFzt{i@oCG#l##{EqqrgkeXZK2*Tr!S6*_J31)ZCMNr1 zY_B72(>A&(ge>(q!V$!P`fsMm}}kKB(^%Jsin-EP$62W*Wx1J=@i;@Z|fsAw)L0Z@n1SU6Z3=qT|I1h zfL)-vsyP7sTJOJ6YcMGE{Q%x4gMhHNOc66n9XHiF4pNd8MJ_iRk#e}prFO)y}{ zE}0%d<$v%*2S)TA*4Acw@g{s^ec=QCOl|)%=JE@OA29GbJ#2OcD41^ct6D9^U)}@x z6^RnikOb!3IZYG?JBprfh3ou%)jeh)Mn-!Hx@zL2g@uNVY8losU!p&g6{j(9)xVDg z0`m+6lFIbj4d`e5yKevj!$J1lJV}A6^8c&GeJbE1C`!-TVIj zAkEVFS9NQ_Xt)7?o9_DpmBHZ<%=>n$c)Z%mr0srGc#c0AhRABYDo#mB$!s=F0*xqN zem45B;UlZrY+YzD7(J|s%juZo2MRGg6i0kk&U)Bk$I=zlezb?qTc8^eTA{>0y8pYF zl#&!y0|D#r4EY_1di*b|0o6MP>Tyf3Fr$yNDN%w05x-mE^fMjD`{laH1ozZ2zE1Zvg3=SZT5{rHCr=ph&i;ESlt?35(`zNz_Ja!k~ULMHL10>DOsc<>$1DBg^WZkW=gU3e!-LHZc z^A!x$tDZOGD5$6+Hl259XDiJyT`%Wc+in>??@z{=!8pumo9Rv4Ld$3csf9KFs$swgrbS@k5fMjACfS9jrjF?c+O>mC<{ zpTe2)dB4HGY2$6#iRL=&xcOb)wDvOyMta6@ZU3aYyJ!&KC!_1_Ui105y!zJY>0BIR zqtnBs&;tgICgDbxTn%a-c%1!%25pP!=CJ@s#>utw1w9p*!Jx&>x;9=%zJXg@? zkLzr`HoP8|Hh%oPSZ#GU>hS|rKA(H=A7trL;rw<1SeLxo!Oyw*`7py+2YJ_=*5w0} z7f;=l6F-m8X8Amq@Lj9=Vteu*seKwUaQU2uS!)H$-p}`jSJ~v9!I+s=hl7IHw3HNa znz6c8MwqTuG3m1wcr@#fub4zDwLqf+71uX6g|Vai!W~Xqdgmda-)kvZV5XKGmy6Up;d$sD(Jr3D;GUpX_9qTzHr$rCZO2z#LW`uyBum-P z7K7TdIK`=XS4I15Fy+kjAmH&T9IZX3KTl#s5$7NFQcs){B;ILzpT5Uvw=JxndUU!} z@A!Oc79JB*{HbW89&T=K)5UDGw9;7)_$-Yptq#Q%#kvi#qk1YydKW}4gZEb_|nTDC*>tTM3!bX=@;qcAP1fMLeMnf#t z_o$G@?&xKn>mlN{@TV%`Z}syjFIyDj89HI$daSBV=)YRGCUb?%W{RZuQ##%6eh6N4 zxZM;?j)F9lgJ@p4;-Pz%ihwzpqW2oaH+F!EsJSRAk)v6|>>7@Q3r}w+p-)$K-p_#O z-FK8PjL){)#nTP9ZvO+pc4PvHY0eK!OvpZ8=y*TvHJHy)F@fATEoh5<`l`eaHGX4Q zIx!DpMI*FW^;jchBe65~X-4#OO?2AD>laXXZhk<=CCJ-lAH2e5RCII_Vvdvc+wYxv zp9_oXPO_klr<;S(=ZyQ%p!JQ{eYV+J12OH+-U~FnZtW$=j$Uo|F3|Z$m15ZYS4(=L zzvLL79^Vxm7>yz#JiI_ODL~auPcJ2(VZ*i$cDllKDo4z z#%%umxVEhPw}$yW`FgicH*<_JvZ+jSzkdrg44AI!w6N|!0*5f9El??k!@kn=P+ii) zr;=C_QJ1_h6F$yoqMH9b1o=)Ney^l1#tDy%Oz>{yZRW2fn0R@7CK9QD`a8Hzqwq^f zNg?CBX_IaLN!)sFC5Qa?A)ih>?dxYmlitt&Dg4hEc`O9vZ{t;x3;TDW`0~dS0`)cz zdn%<+K(fnz-?~RRz|IB#%dkg`KSJfbpFRgc4%FCMON6!Cjbh!HwjiZC$1~&lpSgLa z@G~S%C)JnUj;9)qP*PuRC2}_Z0q3s-tq@F+!0MpGO@D=F&uS?~D?AkI7H?E(ZIJ9o zfBUm=*kyCZG-T@GX!AE|KXM2?*7F!%_;N}rKk~5eJ7kI_9WpN_P4#FuWMtQ0FeFR1nJN06RWxH%Aw67X(G>-EM`ZL8leHBhNGkJ|> z(KhbA%DY#0{_wzxK)|X<->oh(oCGgX_d2J%4(HduHEh4~GM^WcN?BX@CM3MkJ3|fU# zKr;IUzj8I3wj83dI~LFTnr!)Q_G9XLhgLYV^|nVbdCEa{T^xeuCU#Ad&3f1cXF0U zoZfeC@46V-xb@$nijoFn0;K3l_BC$&uvFO7qx4Hw$CLw5nCVjrWX z_xZ#@Ma_YM8bSJ^Qbpfp-i4F1Hv+bif9F^2sf-tPPwlbs zO2tIuNolv==fGvYQ9KR-k5t$>KCB0p)_XkVxggk5`{j$EPNgM7Atx*Im2}eFpJ>ZH zM&NktmqsxYI>%}r`2GH~d0@r#3=mG;8tI%NW~_V>ndclv(o4^9JmRm{af|UrF=yRjqi9sI0A|V1EK{`{-jb=0iaf?nO1hN=D7H!kLyq7 z?fZbx!dRzS8Q;oSfhz(YIHv&^GB5PPrXEXwtCNxI5_x>k_&p^S(=CB*P+dU&&Xv7C zku9nEB}u`ow#7_U;ebzi)W=Hbf%mPVgf07>lP2Z$Cc)QIbONH`2yv-l%=Ma))y0I-%ij0;m9NBI%@h|1M&OiwF1pS^NsD((f-!&Ay(}_%UT1=* zmylWZ(@0kIdDmmh z(8=82jFz~(a)MC4g#y7NsM)K4*8luI6FX0_GW1%+^>NakDiW=?yx)1bp(JaTmtFKd z9cMiiP0UWGkzXU;c^S0658*FTCYjWJCF5Ry5O}^H)6pQ$<2Fe^olZ&8KKfCDH_O!E zm07Hx>+Cp_y+0{U3a+GvBu?o-&Zg8sSl&r`F_5mkh|dkCc3qNe6#&Fq3aa)tbEOwir7K6 zCaj#Qu!VzmnSz0rgP|M40w%`R%|9Vp&oF+At!-z6^4x3PfCW?x2|`vz8xbFxuc{el zz%APn1M(#Z?oN8ZEetSHnb`5XzrOf=L0Htb6>N&84I`4^D04p^q5S#n`7SzKLSdnw z+QM7@FW@Jq70GvVMT^g^qJ0>7G1W(+N^h7Im2C61mnbb8k@y{O7ObkR+s^28=1WMT zTIomqXDJeF#(x&tpLYktMOYv})C)&u-8QPJ>TXu2(0|-Mphk472;bKOaMm#Ynlz;m z13x~p4$9(M+(Usx+`u7cUXoKGh5cM<&dbHsn8^eNH52ds77kg0^}9>g7Uc{MLGMWN zjY9JA=~p1uv0iqLpU7XzDg1X}Iaifgu;;$M3rIsO4V?*9_fO0*w704h~4UA4YpT-bBGobe0W8@Xw)D`O@8Qmv0UB9xysWLsDZiK7eA%bLP z{p20>2KC(&p#;KjE~q&0mudkcl9A}cW7`8liBx912q*EM>UXwDJ%A^wo+9sNtLz_% zLJ?hS<{nUkQT#b24UsAZIE4?*#gj*#xikWCUzDZI(DRApAAa7>;Eq$DG+NcpA z98ljb8sU#7Ibgs)lZ^A1Zmtqt@xAqEnrC#LZo&YNfLW)HP{?ZlWI==vSNyN0$`Y6; z8*msLKmK?E+q^|csYG0ggLDi{HvA1Z0iqUVE~8^9h-lWZM*ScEmF~I-f14(z;<7#K zTm}wIJC{qr&`pJ)T2ktFZpR+M3y}*zF~Db8hw%1#(el)VK8mxuB_(=yUzIEU2U1>+ z@1*$!SC_nCI=UA>bxe)pS+M;*#B(N+zUg{?1jU4P2`jp;L>~Z0 z<`Y8~*BT5e2reUzjExoW-zhFtz(!Svfj|&UGDDtHvgeYe6^|u_ILmQ(TBLh<)*yKP z$tC0fgB7$VPI>Pv1u(Z5`Ez48+jlOV1n(R%H|plOMT0&dyV}jrEw?zy-hmn!&L|6F zcifzx;LPh0GMt4xF~1a6CTN_C5l(>~-t)=k3kY`W7k<@vUUg`&?;fJkht0){_Yv~J zP?q@cGKD*XMM+y%Et*0m_Lh`g!=pWZlbu7(uL-_ymR%RPdy{0pJ_<3U#pIWj!WXH{ z>+Y(*K78l%k~ncFp4?+~&)$60V|J^ntx(jRnH~^uJlmO=#;`V2%(-gx^Akcag$A=9 zfew?w*e5yDobwn_zr3kr<6;K;Pw9@}hdLP;#I?{YShjz<(${DWi5Ik3)bDs<`tt2EvKB828rc-eK?MFTlAX89Wwrvp& zplE<-67}UNTC+t^hPl*+h!n=*fF;$W(_(x4JFBMKO>UEdl2p2gam*I(ic+b%f@SWu zb@OyQn=Zk{+tozz;rkNb)IC%8^oW<=to2AGbCHWh-F;WOjP(<-*WDXo8m|HOotphq#vC0 zblzU$H6$d5(8N^B=ioKxj$-!z24I>{uXr#1V2~t-Ko{!@iiR3aa zCg6PoT*Y($7wY(VAR}X>a@H_cOz)2?>+Uk_&=sUgazF8))Tx3| z)o?Kb!KTvV^PkM6!eoAyI43yz9Rg>d*V?Xd(PPQQQ z*?M{T^|4;$!WPD_D$3#V9Z-mlCUIsnni7qIuE7tn&>=uA2{Ke&P-j<{C^5uGl(anX zOQGVdyXgvLqw4yVOV<%R@r!l7DxXXea8uH_t`sn-R|yw&CA_!v#BcXZXKX0R*L$l1 z#Uwc_mJ)Y1epAmo@?aMPQxLEu<1<42)<(6|SQ1u5#RHDPg{jQSkQq0i`bz_JE589IEs3Lq5DN;iFG-Oolszx~n zHeLRB7^~$YPAf2k+FVl;H+aQ&+j#?6Ldmb^0>?u7@jIn26lc}dGd_4L6=g0RDetbD zd6TzDP8X;$8$NGHJXuVc@9Z35UAnJSJRS0@mobGmoX=D1xEE7x4gp6BLPdA^GR*H) z?Mi%&oxgMiy1QMG-yTjrra*PeIkX=Z+CzP$lTeD}My|XZCBXWRNq;URgnCLWbI2=87~{@iBj`}>7$g&wTn-EK=7&+f-+ z$#;A$+Bmq+JGc$52f*&S!nW&#V4E8chr(?461EtV67ZI}a%s1(*v#K_D02P%k)UB4 zhmWwiBH!t4FcQ&FSIlgWt}<%PtqsByYr$V{2rJV&xKro8@j%71ZM_nkq*H zCNw`JZfq(!i+k4Lr(ad6-62oC0G;{{I(Eg|kpBP59(+}i7k?y|?Wd+C z%tzEA;6qulFlp3J@K8BB*J}WX@aqB{_ZR5g=Db2QU+SvagOXFQCTGbrH&9Wj1x{kr+brH7^rI>jlcQz*`^?kZrJ3tt~u3ws8Ly5qF=$266eFD&CO3KX zR9Q1^`ZoPm%L08l{q~yP4dD_Oq}&0%bfBPgZQd@d>7Ch0)1-ToVj0%}tdUbL#M8#< zq-j~h1@}>S9oyPW`lR__@)M6Tod}*->>}MAp2qY zSr05HVoy=WhNvO~imEOT%i}|^1qGhzX48C4<{`!*7q05~1TqUY1>qf=jZGiS2Gr7P zZ)BaGp3;?0XVkVUxJ2Ffs3MckfWYA~=e}N{vCL6bSAu2YK8}A!?G&)AXJ?qo<%Wp; ziWMS7&{Q_8D}s6{tFDlj#+>Zv;_}o6y2516$O0cSsk|iMZFQR)uDt(w#FhAt+R*IA4i3ugntODku54zyJisFe3kG5x*U;#*nAVMcQg zf-}S^>?NhA_6#AJs6qedonV^_mR9{LlWXB~(5Zn#Zuip>>$++V_u|FUEP2d-8=+@5 zkfzAEMw<++(r$mJ*%F&httqCV;#eT4Car`#*2WWPbs@w`OeuFa9POT)gREJD>5j~> z-pG&uv%#tDlg*P5n|!%&-!q+jAPHL0c0wj~V^<>K_SoDfu53%Acm@S#L!f{2h=J6_ zlJ{n7aOo30s<6+PyXT+o;*NmL&T4~u_I{RCSmEO6VDhRxsjzfjDC7xUPMjLP+$qC0 z^*jkjzWaZ?n}!JN%^Krjkg9c+lKbu2YrM#CPw_+=Cz89f`-rP2Lm9C3LM<7Ka42l{ zn=)!KIk~?G{D57D-7bMy)g;~t?!1-#qGs3Ov*uvxMG7_1@%bIuSCdE(5Llub-QU)$c^uq_v3w_A z#LO!B|Dx<9H)IXzd`%E_VNb#=Tbzn4u9(JSW(iAARG@!ltKk(4(Y*MmezFFGpqNLr zJoeoeyHAr;r0AFgz z%K*{m5j-W{Is^Cqg?7y&kG@J+mz6YU{fGVR66WYPx59L1@?{jNvry=e3HXcqUWR_G z?{rBrRKTm(C=U>eb?I0#|1RqFSU`;VOL&8Qb%M-qO ziIf*2NwuS1@Bk;IozyWXzS_%CtpkhT&&VV3kf$Q!AcXH6TdMH%IO<@3_V{hQ?VH_p zNQ2v_;aqqlm_VxI$rX@HNORKW>O%VokgN8>*7_D(MsSv(b)(jv5L8cxSS7U01_UKpy1ebs0;6G_y4(;A1xfh!iicILAAHEbahOO$ zD-w-IH_n_jSm7iwdA8Q5q@NPo7P)AD?kb-&;zBbv0MRTJJ8w%89rFaGbGB`$Jrk*w zwKb6Vhn4*xdj|QmGIkXb=IY?#%anK6h5Kui6|pjH!|mOhITYMs^)EUIPWF)yIxKVJ zqg*fvl(VJzBV(!KjYtMWdvB0eH3|E_w**6DAQO3|qV9m$$p3rCcL@S=?KadKD@+4l zc{Vq*eM5cFfEn~&+Sqv

F*ZI&Yc5}NkCYVV=_6D zR6w#m?Y~BMU=b0s@0y{Po%gr>V+VZLUA-HHWxi_n0=TZlDU#(?Q`flOT}Yrh|MnCd ze$s<7k|5u3HMvM~$HOh^=Is|J2hiE@hO6ze_HBbP9UWFS-|L6E#|p)K5Np*F%;b6I zin<|D+)v_Aua`qZ={^_fxH(m26{x_E(&G2+tF@co<_TKGAqrRqd5_)&E)Ndc1v1u# zrBAH`#^iS<6I?5X2x2B^X&Q7fG8W5lVqy-MoAfMB76IJk0#Hrd#s8z}8vNt#o^Fz+ zVPjj3ZKr9_*x1;%Z8eS4*o|#(Y}?$RvF-HTKK;G_z<%aC_s*S}bI#0G$`%f-q*>p= ztDpoy+76_P_2Bpr_2l9Icf$h|Fdj#yZ^b3(Z!}&8()XvLO@w@!R0p0VCsotP23<;Q z_A3s`83$+-e-~pF773>VqnSIZw>iG$WuB1+CH`0Y}H#S{ZEKJw{os z_Bz!6iklqjnqj!dN8@h!=cV}SvI;=@7sMMQrWmf#at?>9kCuT{&$2Rrsf+ScDD9(d zqc^pH@{`rl)&w- z?>pXTtagJ!X%E%}K!bCT@BjH1H|T=Jq=}e;l&s22f|qk@v4Nc~O9DheUS;Se2g4cu zM(}qLOc5tiSl$o_R;2YQ=+}5J1y;%zlzrb!d{_J6H~>rE^Pp41lC%TcqT%d{;$W^! zvm*Dg_5FYDCd%Y_Sywa$-nnXaAXabe~ zz^e`)ZF5zc;|DNNjc9E6kYSf#;bRio1kgWlm)Bicfr9}W{bB74*)amEpLIWBZUhSJ z!q>RX*FC`leGX(e>{iUnG;3J?aE5@iLt#?y!`JO0_gvOElNyU$iSmoQyowj&tEVS@ zu+)?@K!>kwW>$q(e2amVG-_$08+HY(b(Nb;l9uT!-&>ksA8%F39`pa#fd<54ux!NS zePTDy#ml6IBGc_Uki$dLVD=YjmcOoHT@PU!o3GE3&yIJ-UF*otRxf-A!L|zK|6HM? zXD?41ZD#xBmqr7&omHL=*?6YySV_s}XO$txrj*~c?^&*vLk^yur`_>%t`51|;Ae>| zlgtYu&BUP9(!vz4|GM%~6-rvG!q z304DAIF0av}O4@{NivEIy)W%xyGN%9nZD%9Gmc>NVSBqY{ zQp(+agpx;1O-YdM=Pz!nGb!)VE9fNU&f=F$o1<1y28rPdiG6IEok`6)?6gyhhE1_F zqSQKqP+L6>>uoEYzC~WPS}El{}#G17nT#+V^KkA`LBd zkU`ZWA7TH6RHsz;{MH~s54YAE@z%s~R|5eP&g=G(?S5~9mrL?c_U#8w-)NbBst)!6 zxWNS)5g_}G%hwI&hNfoGz|)D%@zx@;hS02rmbtEp5M_vg#=nU$ncC|^ARijb+{ZID zs6B9z=wf{kSa+5pLb^X3-FA+ra8h}6ogP!6Q#_ef!a>zkSYb=Bq6u3vE5xVs^+K#D z8>47>0n-wgMNJ&8*=wSG`c2^sz*&=I=Otx=<2j7~TVm>hBKE+h0@Vj34_MXf>=97> z9~T4GpGi(+H+4oZao5vcdR0fObs;T-0B!H;OH?C++BJ`+^Mk~_VxotML5(cc4zKN;vIIK@Yy`Y<#T(3MP zK!JR#sh)?I|NpOpg-cCC(n^|bC>iaE(>NK64{T)^dlm0Qv95;FtCGSvvd?7TzVH); zS=4bjrl!`$Ux-Yk)Y2uO{t@lNq#5e!fLvu=tf!%1)xb;V9>?+#N2!&UsG#0dD`b`B za9Z7R0f$KCC@%91_O6_I%Lxpn;WapIYu`5i=@gl2zruUVtRe@l$oHgoW^DTJA}Jkk zK7)EMXd0u2r9U>A3!h@FfKdLpz1XKly z+#<#}9bGs0Ga@eX)!q1^i5Y6G@;%urZIbb3FJ52&K$a}@^*8<4hGUtXP)6*)na1nP zzR|o$I@R*4`Sffe>m`&V0o5xMg)KvC?Yf2l`Tvu{76x?ZELEMYv}MTUMI$qJKW*g& z{n6<9$|pivXh!olVcr(Xl*~Gj7egxn?qF@RaFVmULl6ol7dp4=aX->f(noFEyNqMs zxzl#z5}lwX8)q}Y=lUrWtialq!!AVEFaQp!L+e&X`|l5{nZz=?_U$oDF+%eCT`!y~ zULUoNa68^M`S=JoQi^1OaYpi|dG12Xv?_|;ylE_qHvXq-)F(G6!ir+b zC)s8K*^Zh*q&F-{FjJC7W6Ww>_t;o1BhyUBzEsTn8lRf-+YDy`I5vEvlEMm1i@uUygas>5g$g+28K%C^=dO zOv?Kj3D+bnli~2cSkh3soy&1YM-~Pov>lPsHg+z25w`O0^1E9f7c8H(%~L2u74 zapg<$HgU}jv6bxU^Maw&<960gx@`2kd_0wYX&GAPu&y>aS6b|+CuVKdE@B5wJKRw; z_oE+RQ_FPfTs^HZ3OR<<=(a`v^FX5_O?1{h>4x7Xp5b|W-2454CEf9I){advE}E$N zm?odiD`<>QO;){&-KAX-z7x`~*m6VLex_*Ljl=U=afSi`?hzXj?=97N1HmvM#J-%? zC&Ki)PsB=nAg1EpdBLp5E`M6@O(PsXzbC{I@)? z7f#%osFr+eV{-2b^|erKql=5X)}+77CrXp0y0>m0l#gr^{Rc911K=c8_MD}~IqF51 ztWqe0b$-Yhx=HLDY<~>PxyU4fC2V44XIx%yY|JhjaUCi8;px$&+M@r|lu^~HZjMqA z%T;U>f!U=mrX{^rfjfC$sN0Udo2c{K#(TaxEg(Wn#hIx1LTj`FlK}MNXN3FAxpCKQ z)wSNdcH}Zz$Kh-XiRA`t_4KCVmr-U^q3%Ue-mtDmZ{F4&L_SNT+_55kIrvfZdBQ(i zJ;D2|$ilx$(c`Gbzg4Z#zR@>ycnP}q->L=`7{<&C7LX4oR_)F{IFRW?IFYM#tlf7w zNFYpTG>GU%pawaL!3nPLChfqu70_ewkDdT3-lGlywc=_L?B*L?l_Q+`ZAAdsbTraV z6c14Y0F_{?b!Ek>my5Qvy?e+#b~Xp+01&19f0|HX6)FRQBB z0d7lvh9CoKr^n;{BD3Z1Z)z#l&Nm*WFq^12ocA|-aGNV%J@LslbtQ!(^Q(^P4q1HL z4S3J;)j59ujI@}|T2e6|ExK@vtl2cXD!#FAk34TbHo|v%w*c;@pZ_+G&bud5uL}ol zB**BMXkYSbUFAA$acBGpuF`Gedo|aF9U|R2gEqO{r!;xm@c-SVTP}tg=2Dxz z1(D8bB`N-<6>}T>b!vC0*CcPm-16}eH=XlJc?VNLXV#xb=JV`l+fi|?e66@0uK)Tg zVGN2IC0Ji8O{!VYZJFaeAM@#2v48wsk^-Z>Vu}gp#y9R60+2I@In%bZMrEvj_35o+WvZG` zKb}@6EXEHZmZ7$mg*GXjJgU5mP#OA^h|$a`_0nh5>dc~W&M=*2(}<6HOnfNJ zhz0iqQ*|P`dQl*z)vgvMITNDzj-auKH;KKr>W);)!rsIhEs{Ta6JpJbk)^L0vY;XF zzQ3<~qaw7fU@p$b&;FEicpY`uC8#DtGn}?>G&f6kw{uM@CRZoY2`~dQwNsneu6o5- z)LL@|8Q(|MYBpRo;~xz-u0?>2fN&1)O-jF;ynuje)HuJg?+YZ2n8$<+QJn2w#i}n0 zd?ouoWt^zFzFByE5YwWpHKx6p#uKbhW5>xhj~ecC_XX5qWYIKqzA4Nlj$K=fzRi&5 z1Wpg?X^W8PxRB-!{a5r(qCeW?pu^Z7FXqb2nIkxxGufOoOk`-5V7?b3iaSvHg{^z3 zw!%BcNy;P9s6a0XH|t9N)Mp2LWvjLpu_4l;$Wuv60Iw zm%9nrm`U0ST(D4veJO9dBXSCglG?IrHPhtfP7-J&YB^gMTm7VlJDD2^^iG%>PrJ`4 z*A)cec7EK0!eGYR5KWKJeWG6AaKDjEtE7JU4+i$3#hM%#lST>f2 z%oUGOs6)<7YWgU0)T!@q+qL)Fh9&-Xt+KDgX)gQ3l(zP(gjH2(>3R!J?{#gSO*%R# z0vn!0pX-Fn*U#E&J>)!1>y+n;UmCMg&VCrZ1*LUndUS~U6FyXFw(WwxgCN1ud_^d# zmo)=q59WE(j5Q%1_t#%DaKqul$g{XB3gL3P4Xu%Hf`IIkb|3!^j`-}A<_w-#yTy>1 zMAvh|c(e*bx(osWQ-xWMx(E2O3jtSQq{Z8FR1`-h3?o>43bpnt(OG{7db%~IbYi{6 zjpC1>h8>$mhgzAbeaof{BrGw9e^I%G2wi|zx4qyFhy~iZt!D5~@pQ#v`mAu$wpugY zMZ^@Wz+)f@jG15_9)wVNu)-Fj@Ac(`!k=aD_Ri9$Vx#a#k>GM#dl{eAF)S)xsW(qg z5(QJ3V)#`JgqCPfrU|8vE#1zx{X9l!^1t& zK>Br0tiP8>Xc6M6(?-Wrr>xUAWD&-=`!}YkOfA#wDbSyfKieTP*04J--WVIIA?`iv zg371bWH(GWGV!>7t(HtgO5j6Xp&4qO^@(2$-=H^sNk(DDk@-8_9cU)Em9wU5gBamB zS)f+jZWx%zm|tQ}FmPo7(XnLvbDd)^TE(x*$#Z`gp%lI-5AON>prh6V=gD$ zd|LOHSSD(|vN8oc2feJdR-jQx*Q)29Ut=1^0mAzfrB$=%B#@Cq#=@`lb?}-MirGKzgH0fAT|Gw3o>t%63NWZQ##{FMS3?jW0U1V*H^F2&)+nACf+IVs-3vX8jJy%Q%qSFYsM%9NX1C?7DL@#Uo30BJGam{bRRT+6p zglqSttW+Y|RFS{ZI@QO!)oV>%G{b8lw}Zr`qa*s4|9=<27tmqZ3YX5bYyCM?kbV98 zW37h*+SZobX@clbqHk9|$G7bC$TJ?TX)7Ge(pS4EQar-L2r)ll38iA&HSSxDVs)d5 z>ejxaUn-W^8`5cLY404!$DDUEHuse>@_4eD9EkswDgn&sk3Db^E-FI^hK)=o^Omt1 zq*>X|@Jj6t3J4A1nc|d~F9)qI1HD*mL2Mb~8OdtojHGAkS|e`{F*sUAxmS$$`E59M zq?0>}x-({zeKP2(SZfu?3!LanjU&w3Y*LjROA}=tgA3CQmK(Eym3d>9Mp3JA4g*j| zJV~ZwK+E5{ta{EzAA;TmpK{)kD0-aQ0=D(Clz%PJkMW7L6z|@pAj& z93sG|Baki$@JzQ@{W%gYPR$yP7rC_nEcnxEq*ae=yXV4a=JalAjS~HLW+bGfT{rAW zcyj}lr^x~HgAGkH;X>7R8g z^?>pXI7e_tvShmLlBps96~A%8&ljIQxybW0pUHpA*+*RIynE&1-q9`vQ5%yd*raUy z+7&Vs@n#6I#9RVvI#0B+50RkDP(7X=@lB~3RMK##@D6;c*66Z;TS3S!hSurqx?iU!EjCLN5!YnnC5aeEZuYTIJLUiy~H(tqktECG51Dq+K zz8w4*9&N{Cis1U_rdkcOUR_U{!|s^#MUXpAll+U7yyUc`m_=b_jPzPXe%$I}9>atx zer)vz_Y>7oYF%IXpv5tCi^!#&4HzV}^kd}mCJJPn7VL40oga@shCewta>P{4&Tn?1 z$CsJ@piN45C2E+%I*H4PrRNhNNdgaXR)%~4<7Zk>uhSlfk1(QZ^y5(>inO6JOUS#b z_C->);e_Lqi(TS{_v}olz16VenVyRBmi;~4$LJj`-a`&x(u%g2Zej@_?-F90H7jMk z>uc((0sF7s`v_SoHK86!1SLu}C^j1@(z5Y*IU#o1>7oPIsZR^i zt32Dgzv>aWG}Ua0bn4NvfNuQl4Ll_5uF&1bESSV}%iS@u+Zk+5I>>0StP!wYWj3p+ zDq#cmg_=#uiu(!45yS!}iG!*EehjIJ)u@jGH#k^Qj(}91pUoEDwog`Hw0&(CDT6Hs zOURz*D=*slz06PeQh6ga8*Iq9kXvwhQ5Jq&w4aXypad_jtr!Jf!v2TWJ`x9d4&9fD ziZU=U!^J7t7|fqb~`?kU|cD-gm#%YFJA!S z#kJc(mWPrP+Lp9a_kVodBRXVjspsM&JaKX7#YuhVYIe@6K=WkF4=UaDSRm4=4sCRv zrR9S9$Cw6g_a;u~;mX9`?g?{oBEMY;TZC*H z1{2r*8cn^4wzIi5`++jro2HpJs^I((L)&OKQcL&EbD9bdrxUAOllUM!AcOa$(DH=Y zH|8M>Jh;2&5OxR&O^6;x3t{rv{0cOV=;KW!udHM8Wvxh&ocE7ION7Irb$pgwJtJDd zXYm4W9v`vSiEy|~l#Z@QhN;d2l^)^rUJ(BNe`_7iKZA|LqIt-=i6U%1C#(i`VrbFW z-f+I{nqX-=A0U~dj962U1E;}LLnTZ$<3$Uh>H38RjAb=p+8L5lE1(*PBpW}*8-yi+ z2JLgd>?_3D!893h1NtSf(Iaa^1~W@ndj3#WZKdyLGw9I5zwn`E$31f*@(yiR_55%x zk&JDNlouGLc2m5=nlK39zym{5&5~2L5;!Q2p?E{O%JtX*C>YF?>E*@0VWQk%lgNhNjWRu%$GQRW#V>T z9Bcht7b(==76~W3`%f=z1MlG1xd6*9h5&hGaARa!EuUq~?_Mnwqpn0r05m?ewO;qQ z|COoyHB1_ZjKc9A?Mu|RNLYG_JxnDQ&Bwb!4%E$iJOiP><@y#aRM+nu@Q(Eb1qb$_ z%3_#iykZ=Cn#iAc9^VA5C$Z*w&uIJACD~|_YDBbZ_ITo3CcoC<8|~XvsT4!hrAg?b zG(K@IElo`Tn`|vQ-a>cScgfj|UMY3~@hMVSp!uqL;CUFp@@PNIFT*(n)hFYoIJU7Z zy1QAn><7;l-i@@JFC1+~S$v}$Q!3^nTgD35_icL|Q7&3|6GDQQ$O(g7^$t+%{7WxGnv6}KjuDoTGuL={;s_F?KuMXeI{({MAQ?) z)7I%B2PJ0zhCW*6RMJajM@KkUS}K>bOQB1e*>;oiWc%N2!LK8<$5}}vF?i|SRSkLr z0Cm%=(b@UO4b>_Tr^qh<7tPl&G|E3Al8y5aw$)sP$wCwBWM78FSU-r4)B`(~!Jr%> z_M)I>43_EGHqS|~v2B&QDC|?~#CRI41s$sfW>nD?LQk`$k9=xj3Wh;F={@(hKjCYD zpVG{C^CHec@JVLrP_nO#?l4e`HOvbHXX3%~#CtYw) zWTp6%XG`ga&QX7%-L4Z_)%)HT3MWIQbiCVCPpR-kI3_vN3!M?_`hK?X5Up^skkj|H z;J{j`0pFSF+VlE0Q!9$Zi0@Zk+HYX_C@r`I-c&GEJ18k68Q@N;&-;$#y8I4>v~rXD z>b{|nLu+gQYrwj(gX9+Y-3jLXhn?+Nt4~MO|8w z@=Lk_Nv%=Ny4(de+y|b-Aa{7jSfxGK)%$5J!|j>ft2R7cj;Ywm>oCTiw8WhEoy^Mv zIaV&^I`0$gEe>KZfndFn{O$Z@8DG7aGDUhRw-44tJuF=3#EtV%nXa(GS(*vZ;$Vz} z(KhQeI{4_#d1PGy&cJM#;dVCI(|HeEEKU58-w`Jxh!0tqd&z zMLM_=98U~0h2+b5a25-36_|gynw(&_gCB@W@8fu(M)gZ0Z2T%|`mqtmr3`suz~M>) zlrga1h9B!VOe@TAs2Y)=Lhqy!ajj+ss2%*&U! zA}$-ZdTLAjM&6%dt@@E0O7Vea;E7KypR*)6Lr*!Y7*-LDViwBBDk=Vrf_=RWlSU9~ zmMSliYn3S8xD3ZnvuUnqUZcmeV)e*y8tF^jc(37orvEXY?b*Wd;B#6I=k>A0vvf8` z9&e=uq+xb(+zQlzGjS2Y*J_)SrMqRHm_4H8C;Ph_+xUCODr1QX3xhG5_l^Z2o-lCt zET(awxUhOj;Ab$P;Cfo=w{T14(tdd*H>sSgv03A&)V06?QU||V7v`}^1kgq2ehH5` zzJ_w?dn*rg=*?@YTX3N|JawQlA(WXY1^+^vlyc?`O$_G31(l(d`N7bfL2)du$nz2ZavOYGi_T_~=Ruf&@^|op(!WKX z2Rmfzie&+tbX4?ZMCocefAyr`=YxXAMx->-FGB?iRfi$IE)&C(@@8f=oX%#Z{eAZK z>T}0hbQATwTrk$YfYUFlaYu`-XX&!JXgCAfTujFTj;f!rA_+)^f2E-tB;n9sIAphiA zD)R%`SKRl5m;tKs(u)I*|7sU|ju5}aZ{a%I@c*$Y)t^bZ`J+|8HFeSBJJ&VsPqO(} zr-BI-c&4=3{?zOSzBA0)%jc;OsWJuX98i#nJHUeIFlhV$BfexMH$0?M6?C9CI2S~JLBeU2#YNjCSeMEI*r2vvZ0b|HYY ztsDPvnR4A?dNQ8rNV&lL=)RUs@CYSU#LFu;=;?nL9M_?^zPwMP2q8bWlK> z`PotWDY|6(^4P_zDmAB)OY_|?0sv*L%gzqYb{QEPK^q@y&wk%Nx)UP)s;|N)n;L?r z7jVZ`sp0a8+~CE2b`$(7Z!?b|&On7^F9oLF#Mo@JTcv_bHre$M>RwIx%x7_1^R%&< zQp~1F&iP4i)@C5lqTodAcYRiC-<{TT1PE8sLO1au)u$_5M?Y=4UGeGmHL`Xb_S_pE zmdXQ0)-uWSfOXraWUeO#Y#`zcjgBAK8CkVyxPQX8dgCc+fjY$YRv}0bye$6{W@VDs z20guj6!Uy8gr|g0ho7M(DPb)yO!L_b}5n+-|8=Kh%nZaH)m-BZBBUkz#YQk z6tEzsa)Q??=Qp(kLM5|3RXE5ArHyAgDXO25s`xEwParz8KW0D5dQ%1>sQST=D>V%k z?(p*VdhES~ja%+GLu158{2PafTqcX{4_`}o!S~5;Ng(-D+OKmbfhZ$paB~MV%4rWw z(!6HQORnEftF84Lrs=D}bz#mL_Sr+_9I9hCyB79WemBh7S&SrKe^>q|;VG`puz?S2 zc=fa~3{tkzrhv|$t5VKA{~h8ZL!dMHT`*0saUukG@pUF%{`k)c^U6pncX1bmmAM1W zHmL)q&6G-ViX{$c^XX|vMxOL~yFSa1TS5R1oxk?E`4gkDsk`%{tz#smYDM^3t~2V? zA+i44yyKM2-vx-XC@^!^pCE~{{hB1hSuF6%q;WqVZJv_%Pe)#$NbvPo;lU1w20- zA0Z4Ou#2<}P zys1nX1xG-*n`I!+jn#=|4m21ldefKoLFJ9|&e#ZK$7R3Y`;^9Btl&l{)(7X~crcVP z9$>Dp;lQdypPR(kG-_?b`YRN~$xF^mX6@ZfTYm<>5oMs}s`GvQ%&rtX6d7B%REt&i zuax7UgJd4twjK+b$Xd}IS_s`$wg`NNrj0B!iBB#m>g^#p$WjWrXsngKq@+l%lmEck z@9xxlFSEz)RSXxjR|bOR3xj^ zYMq@(OYK?&u+zKd0cLPs$YYe0$x;n6_4uPS-H)3bz5_-ZoSd;H^RfE;}-YyxpyY1DuQGO9y0h z2gCTLO5+wO*PodG@iLp=0_HYHE__*88QVSA2sb!$$iiB0fIAef_#zp+W9B6mhTj6Y z@K%3)}&QqWi^0x?nTh4n8hvAMW+=PQwh)e9;G3cUt#^to{39dGuvJa_vSq zoGX%a&00BYG2PYS1%P?PcvnwDT0N$nxUIz3!sQSx*xz3KzP^-s?9a6HbVR7-vvSl5 zE|;TRg_UYDF~rfcpTVz@?N1T{sxt&A&pnY{s?lfT<1R>8u~fn}O}&$vw~uwNukPu< z7rCD!b9-ue7f#3{`Fdfkm5zA!#Gj|xgyFpN8n~UFfUDz6_mB^7Q{H-^bUgI$!Fqo=0S zGwK)3=}p`^v6^Tm{_tNh0RdG-c4=H>xJhuNTN{ z@or3G2Q2o~c!^1jHa(kudy&~AGnX>xGSF>6InVT!xGjX~BAMWNj;$2<4&xq$EC419 zs)rPfN$n{dDu$Om1sP;D#kVacV+Uf@A#p>(Vnh$?pT-Xp3kx{A`J661$)pdjnm9Sz zo*dF?m2@ok%ilV2?1X4v%s`tb{C7!0-+aDWzaWZ;OH&&A95Z!FGmjM`uF`cXCtDS`&92j2YW(!l+R&1&dE-;x|FrL zKxh|Sbr7$IJ*Lu+d&3XDIPhDS<1z}C{W$Lj|*{WUc>8WnJ+X4+E4-cEmeIKOwyPBhTIH51v+Q*^Ihf@k}|s~xX20^>@0eo8d=TEBb|PvQ|t+Xz`uPOc%w!~ zaKI;Z2a&>`+YqAr=f7A5vDM9}WfT3fDxOTYQzE%sIm@;y(cpEaI~xgg5ALlGRb33q z-z55D(PM~dIe_<&3_yZaKR*@)EFm#=3A`fhSC;C+OVB(Czb+%Uda_%Nmdx3zeFoJ- zFCaCNo)gp<8-xGv_!iKXnS%`VQh=&OgY0q+b4gp?QuKzEbFz^xZ!^i#Y3AjM%Rh^Q zBv_pPMBrkraDQ zz=-=DFcuo)8WZwpOw$ND6;7$mKdAvn0lItMmZ);$0bZQ zftsROUzEBYT;Z*V4^|%!!CSkWA^wpYwiw-kDQ5kt#5Yh|PeAT;u-55JJw@@l5DOKd z=s%PZ^W&H1|M*)y@otVD!D5|qpctn@G_yYqz@g{^1*{}byGnJQ?nXh;>LD4GSgJ=ZhZI;ao$-4pOsXwPxM?=g zGC@<~t?sFeLxIySU5iU1$#LQgx*_c;<|knivPc1n(OJd#)l&D2|KyjiiPRy(gh}*q zoWPh-RIVlTPzvD_nrXh(hWzJqAojVr&9YcIiAq(t-ceO+p8nqiXdw?h&O>fA!;EQI^tTj9C3sg&5#%5Ut12-l9 z*4MsNsqxhCnQHi;{?v5mx?0Z@o!v-PYeGR@xhS=?04~n`29*)Nm%RxEltT5Y-@oxP z7{{EH3)8k}KnqFPWYW7Y!PBTGw-S7ZYOc2HUK^JwTF@%y%|_o2{zic!Gn7cGw{_0& z!{h+!%kDTQTshAwUxfvlDFg%GYLX^j^8IT0{A$6C%smg>VsbT`=^HbxKD4ZU zvjgUGL$1#Y0zdvY{NMl?=uf;-;Ps|Iv82 ztP~CVt7I0fvJ{%Fg(XtYO4>cJYH__8#U{Hog%eia^ZV8bea`2_ZE%d%=W;`Y#+LYs z9USwOGwI5?-Z^dvLVHLo$?|hv8|JaF%6EqP{W$^L5j&XoNafjFC224a4=SOJ`nQB_ zy0Fk=`?Yc;QwmpfUEVCvMt}N~k6#q6{l6qZ+D-e=Qqx77@-`H`6^!(sN9MZCCiT^8&MQ|YknNgFGwoI z)XqoDr6iH&%?MTlen7rxG=RoWC)ve}>B@w%i~jQUUyJcs>?b%Yp62hgyha9Tv-3k_ zdG#1M0S{`STPMCb@Mgauq1wbb^!W1MU#$^hF9J&q;BZh=d4F{sQ!>VVD`u!%{QZh zp{H9gEuuHF5+n9BbD8M*l7Oj1e}U9YZFt?$eh zc*zqb`&lcZ7sY>6_pC3g^^tsXgC5h8j>dtid2$Jrc5z{^NW< z$7#CBcRH`~(vD-5=B#ItFF3=bLPHYK97j2Wvig24A=%#Va$fFh^3NVU zPSP`91AGhzG(NytKz&070b!bECoa#kpvP}Ti&d@Y7!QwE>;$|ONoG|fk~4-*F~)<~ zvJjG&j(0%i1YKZ;Tyx5QRW|2WHTS4@1zX>0O4b}rpRwY)%E`on7^qF+I=ID15SU=^Bl<)uAR7dcy;d;J6ox}_n{-jH=o`P$B9%7xbW~vn>o6O1kY_%igqsw$S0QBw%*<--Qu6q z&RHo5bSR(;4)soF(rC}2uJ~4u!jbDv2G0%5U#_Ce{@kG#%Ranw;zRiRL%;XN25$dO zDs-Q~pHARFa)=ww=`t0KFez_K5IPV+mQKjzQ_=r4 zu0mk?Mo>wi%3q5Xw4oEa^Jm2yqor)p+SenHCI|{)CxGYIuYf^bqj7^MJjH~Pw1rQ$ zvzt_v4R5QP{%3#OSUMiK?oZ_GyW&*Wip*H7IOOuu`b(79c_gAgSP6f{BaFjCL;bC7 z-SM)K?aTv<{M~sgvrnswg|jEV{k4f(a36S}O57A2Flr=ulk z933l&jZHapr$xQoKfACm@_tozwTh?g`X&9s6(`JIYbhTKHmrPnG88LR0S<~d zkP5B=3Nm=vO3Z$wcnM*?y_2SxpR~EO9Cu0uA5Lp~uYF?qWsN-S8h2RmF)46v87fmz z5IsPrY&z6(#CDkBs64NwRrMHFpD6fN`#&f9i**}NW6Q@E;Q;8Hv=N!oVCzRj^dj&O zE<4uM$9WQ^*!cm?F&{+z`8g7Ae|Jvc09;9e;|YBokxho`Z?EI32A}U({Ai)Rf%aN6p;eqV=Cs zNrbz7h==t2|2zseuQf-)uu`Jw%z%DI^S8b&nrJ%AZ#T()IW|=fx7KTH=$-H|8O|&=IImH(_{0E{8Gwcc?L!P%RMZvXsV8LwTcWS|OawKO{`=SmwSrl_ zYTTK8r(*EwHKtna6PlJcPGM9X-@Y}$kKDnI^~fRyNFCFv+eSwjf3RV~sF&ZMudP_W zVTDpra^}}-AQI`9(y~TuNz)VD6w3t+Hb|`VlWhPrZSTZFb~j5X#9};u)9Fc!!$1Ze zuP0B&?{3C|Pgq+i%FcgyOMpkuS4?UAqQqlQ#2r{N`XWIghzYGFY#?JvC7cF4GZzbO zRMbL#p@WL;C*XZxshrt~{O*C9FIwG}*3o`|%i!tHe*tP?Spa;eeZvOdKC!Ly1-zhO z(k(T(-}8x1{uav0?0k`p(^r(zjRtn|IXMXB2XgK@ElCoxcCqIXxta=M4XV*pL%(fM zzu7og_5MnhPuSIq0b58Q80B=&)s0i#30W?*3`62~$@F?P=bX9b?viQ5adUko`2Zbg zhNhwB1cjw9Zc|8mn<4lW`5Al_well*WMrPal^WDs?zU|twey7%&1L0b{5|5+YR7UZ z1r}W>kf7~{MNlstp@&td4M6tWRr%E%c<^YjwwvCc*y`fabjAMqS&2(a*Wf(nLZ#lcgye{WUyP6B;dE z6tR(sn~aEN&!@PI{C0y(?zRo`lP?`oSJo7r%eQyb7y6)zr4h7@lx?a2gZCO zZJxhOF5vJcpCF%p^v||&J|_1@I{dL+aUObHlQ90*&MB?k)e+7H*y^dXdSxG0L+tgY zElrR6PHob?Ff^9^Fam>nQN-;b_V17D-35I?0gO;3VfJzs}Acqx4u6U-hHX3Lp1lmV)@eVCB2m>g=TF1$}z&^~yO`<6Ah!q4Xv z?TxPXMQI2FJ2e&0q)`3GCnt4_!!(NO6;yg;(xFJt&n-T+fA;%XOv!r_l|P$B&~Das zO)s(30Ke0bE=>z53N@u*f$G;b1OI4x!%gCx2JMZ3g4{a=5LTXWa!J3SxY@tq&g#G6 z#?ae;B_YUm0id?TbOW$jAyU_p`p54^wd*b=ptA{ys>4FhCt+jmRL>_kUAJPTnUP#% z9hd{M!v=7MXvyIo5ux}B;i3=3IK%Yude0|Jm!2eM2ad~guZ}>856$)NF$yTXaJ5hS zB6ujG_1G`BsXi??0<5xcu}6LREGn|RxEP*rPN|sa+8?=Sn7_h`n>NRibw}8{&U`Ex z<40*+pj31zM2L_Xt9T!!yV+_tl)S9Hl5+nDi4kaOawUF3@+0kJL4c0_Jq%*#+^uiL zP1ahq?gSkrR+9a7pkrnGE{`>zhduwS2AP)>&I1jK@-8AV4CvDE+`hs-*TJjBddIxt z*DXLr-}wVV!OY~KNBLjgs0YB<%8Z2lw{M)Ky2mWeh~+d_CcSb%^+-ksIBfok*F{UFkZQ>U{n* z-_}_-EmaNKd6Q&Shw|RTb@{F4X<3^VHou1cQyVFpP$~?xFCqb0psh;{$^PZD_6d98 z!5R8#|3&xhm%b6a&bwK-l&!f1lHN_kMpRVd52X(qhzZZDA?Sddu;wM(;ELJu>B=R= z7XPnwpJ4!fVJQF+-Q&4S$#x*CHVBwW2DdC>U@wlmxpmyh=G?x@Oy~Uu7kDte!CTov zSl7udktqBHSjCno9~Q99C7r(r4Jfx1iPU)>S(RaW|G`u5Ns9UI4PsUD@x8q#C5Fiw z0&j^LwjRKtzNo9BsSc6wMhi=l^2FALYw%yoCW7~FP+qP}nHXGY+oHVv=+l_61yU%;hAJ}`%thsez7URoD zzZ3~xGBdgmHco-TG5x5ugRM=*`F5BuMv=kv!1F+L7wp~wJ3n~Ju=a{NY?hiyZ{+aa z&B=%6uUQA||a=lEc9g`ok?ZRk|iwHXIXyi3w_dNBf4LbRw1 zUW*j`xmvI^UHO_uDmqfqH9FPR`s1)5c!5A8>xKiUMdGBcq#_O}Pi=e4Jd-bGm~JWY z3E|m0m$2Emx@C2zzrN0A?0fAVNUbEX3$X8^Kn-UnH5tj- zAWCDS!o)HDwzJiHdg$SUisX&h4g6a=-gKn~|UR;hU>s%i7 zbWo7Mbi@PrGLhqJ6-dE7$D;)S6nR`(ZI0wJ$n=jBYdrUYo;}?dk7VoNkyKdMQL3PT z79xilIoTZLnC)VGD<^YlBCnfYWU<*E5W1!%m`O?kz3E?@zB&7&8^gq(jxoLW+}&pC zu1d)2yo@=p^n#^!`l;jCBqj>00o)(VXOVl2@Iz5+SY4u&I{mpC$l60-;)Ujs)GM0Z zWEY0}^6yosJL`8!9mdun5amDWyhp9ffkK4NocTj2#8dl9mn!%8`Kf}|1)=jf7FFhe zpu)$Dy#=P?Qf~I6*msBU$Y#&uHXLgc^LvR`zt*kmT0DSw-cC%a6h#mn;DMtWJ*lnX zX}Umv7cs>mEt6Qr=zuCu-+6&SxUkHdh8#>1N0F=EzQsvHf(M!*j@GK~kR=EbF>j>ePz$lF%$KH09F!6wNY8#j)hfDspKlj=^KOB|FeDW z-E1A38Iz5K_LpU)U!VIoMOX$UgBq_mZpqaApxR964`m0;tFt7?r$==UuBBFvVfA

    YBkaA;`w5@bYq8yUrf)dIcb$R3(Bdh4PupD#;*> zqfX%ijsU-mCJ&vqQsZifZizNWHc5O%XhrB==&)#JX*S<}Jl-xx+>Rd3Z@834$DiM0 zK-$DX-y!cq8*Mu`a`TD*~kB_T?BSeXb{fr zRw9qolq|G3M|^QiJ9Q#CGSN*%USWXhfx?8EyS(;Cry{NV?Gi~vj0(Z3R#klUaba-< zZIN&Vth3*d%qapE55qX)4GTI8KKn1$CWij>rVMg*OpSU?-$u*E$;RtCl?IZP=;cjk zjuZ2bNJrGBGh|_`7re;j%KN$8bP!{hoWcUn2}N3<1SAINmeXQcO&r;h0u&K zb;Na2!&>ooeKdp3?wgvW(-o1qPx41EL8BS}F&c`z7hmR0bCJ5#I9%8AO`nPI_nz!6Se= zLXuuWX)nrj*iQ)li^9%dbQ*!r-z5wmIhZB>l>wD2dN1&N8VpA0I=@i?zbsKaWieJW z_}PG&1Dh@|bjH1riVIOE$~QQFIE@fXgBrch(r~wymgCDLrW^Vr@>D27@1nvM1un%X zg(|dZ5Ot7=q|+!96-zRX3bjI9n$W9|jnuloF&9>&{{!MVX*P-uLKokgFoa2nrKrk~ z>Q}20=mUFn8Y%b+L@U}%&ebnoOrc+f!OVzKaauSY<#L8Qm%Z^j_MYorYrrpHN?^gE zGQ+|mk-r@dLJSg|b(11d9}^c7Pf+mE-)cX!Ye#g(ey=MiLQec$a$}avIPcpu$$Nfc zaiuYr&dg7HeR4@}Mu@%hcs^=PW;R{58TFelx~i zL_KV$*XEWN@>yva_$9i-%5}Ua7SNUB$9ES8L>a`Z`Pk8Q@qToF1fHIan!VJpcL#zW zlO^v@B+t#SGzHv4;tci(u_ZT8A(#W$Aw7G6gZycxflPbB!M>}<0ZR(t9RVxpgBpeu z5gLuaO8Q6SiA{T-S6uHNb1ZMsT-LO``dO-FSC^D&-Sq{O4VBuZyUxAZR)>Xep|@JaK`7z!`R zRLyNpx24xX>4Ntm4}|NdNJy_{DrSyk_4VDSDHfh0BtnlnIYWbUiuoN& zO_sA-CYs0Ud(Ni!o+dO5^s>foLDwMrwY}D!*1LG>c`vL|%)r%My2?2B4V2h6xk zPqWlI@={7w^AssO!FBFhNehy8SNa`WU`lWblh0c`FK9MWe{gC(RtA(Pw4|>ouCT4< zzun2EO1t@9nt$~5w&WkDqL47#p>GIXLZL({fy{<9a5_oqDfuPQK~_4~kX5(X(!6Re z>-wSA1CD{%GSNuaDCKN+Om>vZ4919zHI~+xzQG_?XW!u5kYnIvw0MBHYrLnui@jgH&pC`U zV%9E|k*fkkT}%8Vb5wDK5D&*_=@77)J{PR!e9+~f$UbIx!fpOXvvqN>H8K3+W< zl`mQ;odo0G@kQAwTIbmOvTJaUx^TZ@xl4CnG=41B9rhi%QjmMb3%H6`dgU7V+a^d)$wFUo3n6Zdck9`Z4`h*Ja zH5WBn#lU>r{5xxK(^`|=sp#pqQ!AWi9Cp8-J-B}BOF?YsY&CXOer2gyXAk$l3KpRL zSgUKVL|{M{xIj2qnVDAKGBa@o#p|yj{15!IKU|wHu{D8iHf{4|>wdQSeGK2|0ffih zH3dk5KF<%bEtJ)r)n%l)jO=Xa42_3kbfPi@1xjtWQOq>k}-EFLGow(e2 ziT|es*XR2mpXrGS|EGzw6)&;6j69*RoudgMD;*0R12G>AAt51;qp>NMqKN2!*gyaA z5}P|a+jG&=yScg1xiQn(IhxTka&mIgGceIJG0}dupmp-FbvAIPwRIx--%kG9kBEtr zk)wsZvxS{4;UE1P7}~iw^AZ#PG0>mC|CQ6k-Qxd7vUU28TAvEi|3RT=q+_7}v+qw< zoOaSgVUz*qGQleU8D$$;QC*KkffT`M(kW!m06poXngYf93oOG#|=4?7BY6nlk0NCp1>$e)A38{=0$aL(;Ri!AOB zI@tWP#V3P4v^-EV>67f=Sw9K#rJt1QSps?tv47G2|703i0qsr%f{y$9GY1dy44JYu zpU&ThqQ;^pr)}hn%Ya-Oa~3TUTY4mlM&MC^uoH=)JNQzV?eHGtaI`|`XcXGc0@It1 zY~+JX2>iFF%oHRTpthYj#ZSi>$$7cMnia<1<4FKPiki}?2d-Ao;JP!xnY)`;3NG7z zT#ro)Gz-=a;|sh*_D`{t$$;fR)$UI%{AY*XKP&ia#VmBdPT(FfjP59Gw&nv10t0=gAXhsS zO{u!SFqc5Mzh;Ps zsxSW_z3&J07h8p}b%GB_gzh>N&?#L1%C<`*6N4{18Cfx#ABXTx1PU7Jx0B3I!>3Yl z)K0e3cOtJR-Q4!7Euzz2)Vl5v^piS<3%lVqG4VZ}pdaL?&?WMqxL?Q?i2l9^gaH}i zgj5LNVv%3j&PqOn6J9<>?@2s*Q!vQ4yDebj1312L7y<3vxNVIcUJTM+S_@?g2F7_D zAu95}AZ8X8#ywBqT2GD-F!v(+L$0Z85wKb*jz3)o0#ZD~!Az@gWdo?WUB|4eXKq zfEdnj8~#=LC^ol^=Ke5=bA8-xe)fJv$HR4rb7tiRQQh^d`Gr<`m`P)b0HYK#Cu&p0 z(*Mo>DhW*7|8I{aHLzUZ*CAX^5>xVKTdkO9f-}&9U590(n%x`Qx)JV6zt;C|_PF~| z>B@FPV&4T)1Oi-{pKe+Yg{y9vCd<&G?z(lp{Oy(MCJfHc&t-AnEzjMpu$#Y$k(58u zy5Dc#BE=Ry?YZq<|Bj)3y6j_lg@=>wsXJVY#jxcCZnMhmhkNu5N(%(UCw`FV>Y+xP z%Za_dKDtFNv>@u)0g4-=`zoI+t_7416T>I@f{s)Vjn2+n^xd_!oR0`swh3Wadj$my z(f;N@pZ-(^2WAL=fws*IV~M?9DlE)M4@~s!e1=WKX(~|S6 zZwvkVHRCQj&MsEiWOU?jB$Ia2-wxbtSio4wJW{*nF5GX#vk%id-1 zI2?QQg6t56^J*u|zgQ^D#j3_ku0&3Czu=GYWd*Bv5L*-QMtq*!MMf;?N`;S3-RMf{ zG|l3Yzbq-TJ3DcB?CeA>Xi^WjU`EjGOyMT7Bf51lz%p-o;!N>+BG|qRMz@|NnJy(I zgxD5V50lh%hgitT4w(pN^eVN2%kg%#N*k=n27R4Q0o#=LyWNTBr67hm2J6b*5%KuW z7V|Z@jz{{vw46>;wIliZY68DElOoitKX?`CtP=eAcmrxP%)#|~zsE$?3VU5B-P-9h zyY_VkEj;I~Qe(fvV}w%Aw0t?^XfAqo5=*F?41i-EqCUN zltFsT++TC68B-Z(ShG7C5F=;(!T*ZEp1m?;E`ia_HVl<;KG_K_x5>8D7)50r zFk9_5o>!QW6|e!vW6pO*^Qh~jeP;Gv$zA~4zS|Nj$CRN7*IXQ*F|Yh`)3X`vThK*o z?(#L~`ILVA%_ov?AZxG#32hy2k?#go)T21O>jcsC;WVn1v|f5b;RGosdyebviVvWQW_TzL$w*VlihmN;lsR8->A zCC0t|!+sPrG)FHsitJBCelK6a|9gt@hy_rNxb6%bDgP;r07x;)rxaW`8~@B+pAk?P zi~?veA0bidpR|A#$*r^}UNbW@wc|T1qYlaIdZ&L*QJE)5C_G-D*JW2Iolg7KS;JKI z3$QSz!SU~6aT%GxR)^hb@d9l?Dt*r@GPP=D?_@UjL=`+9*VX`v+<=Bz_P$=z@5A{T zV7_MbDV}%!*Emr3xaeg$nf}Zl9?80x-2aMmFG2#~TPhW*+YM8E!h(W=(V7nlWKvge zkDDa)HAPwMb{jk-t(Qx6pxNARqAj-TQG~BQAqloV=?DO>auv`@kW0WUI??(p(D2$+ z*_JynQ@xW(K3re7x~up<5u;1IfV6a!hN`wUZUIPv>#Snt9N%!1=@ZYG{8b0tqE0^e2tk2mzl5b0su|R;yVb z@NjOv+0A>lHV&7YnoO-`xZM}LL9M3af|kK<7n7>zi-p5x-HV~;4V*oEwcX$4Lggi2 z1!BMQ`mqgY9STFwXWu06N+$ohU>?ZtVFRH$kmbKxgB1`oTAg(MudsrTJTfscSQyI} z5eSLkskYhSX1C@%E3?$$M*n_a!WzBG)tde7J18pH0`?ccXQQ-0x53T||I3o+))T@M5X$@IyGK zXsIB`**y9AyqG~{6vqOgQy}nq6R0%a(x~Ocu z^%K17zbF7}1>1iVXzHTX`@Nm&0XCV0u9W0>gJm+=l3Uf%Vdo2gAM_D)bDQw-=-a@3 zGOP!zMZz`LG(@F^^`~AD>V{{b4rO1V+uIhY8^oK*{^^Y`LcP1!s9NQg3Qc7_Rqspn zR*Q7F{Jv{Y6}(;#K`>30LzN)44+^T>=fc5jiLzY38kGc919ePHOt1{pCQ|0JG{<@a zK5-qj)6VVhMhhhpmI?-2)`68+h*3opE3}d-i6*%QNj%j^psbD;E9+?V%tM5d?Mp5n zPVJ{TzX0f6q-6Ti?Dhh{#zfjj&!xj?lW+vB{q4cT;G~aSyW8c3*XX}OSeYAGC4&V< zwQrLqUDusr*|_uG2-4YR4*?#x>p(33yZHqkujky;Ye{i&y3=`Kr7I;RWl4@tU0*d! z4IDO0@5OpYG-B0TnJI_-@q9(<^{vXR8SP<~6MV=QED*HuT%QLGb4^lS1hXILz#QGf zZBP3u!^U_88xvZQsT;b{KQZ(|5^>OW@R0nq5ZZti8#TW7bYH>gs;{H4!7IMjbmC801StvN#{fdq1wb$>*-_?HSW(HjbpBZ+aYN zW3rgXF$TXs_IyN#z~Nv=*A5o0YzdrSwC=)d4FYUQTw0%zc-0Ka`@cD!w~&fa)Ko%@YF^8W3|eO3htwQ=a*YOlh)QRtniR zr^^e*kn-DlQdOU}Ak7Zqdb;#fFyy_Uu%$oW1phs@`-53B0Y^_-OF26?USINFcHUn7 zBF}XvVbk-z9yDKVu6kAw)L?DL#?zEZQ;}B48W~+cnwZ%~Jro)&=OI?>BrB>lt?t=3S{(`*GgW8JWoC;&JX&GFx9m|&{Ql|M z@={Rdc|RzXN>FE~%NHKM!_t+JQA62ed?xVS!IgitK7=YmS5-n|wOSZ|I$vl5Z-*_% zC)jG2Ogtt2rweL|6H-$CK%eBgqr~u_4c$ng{iof0!h^hpKeDzJAC>ICiWu=crq67r z7J|Q;@b}96vw)q$20`_=nh+Zd|2IuJ1Sn8n<^O|C0FERs(z}wY9yA?%n4!`_jU`f| z%z(MF`}lJkg6{!0{hyQHCz{u*6xL(wMSxKB0flA!9q=qWH@;Xdux$1pKu~EhM=e!zc;ku?XS4H_K>EX!`vx&G z2b7zLyBv6@=sRS`^@Wk#R$(-x%zf)a>COY7X5a+!JD=|*&VINmmDp$A z)Gm>B_F`}t2gd$c#%*g-FE2{-dvJ4$BNm zm+{<>I|sulBx;ihvN*90Bq7^8*SEYWIe{0!1O_gaT$)*4Ty$rwc$<4U(YGOw5Wi!0 zJEcwr<%XE`l9@1g!xq*qkGT(Y3sa3F#;fhNNLGU!*g2p*7UC7;gSG% za`)aYSYP`U_EZ6giGeSktG@cPF?lb>WPe+J!L2A!T82Y&*S1I-oW;sDE)Tqh*{R5sl6=Fq#9!NX>Q_BRS0wjz>#>e{G0>tc7u+1CN7NOUSVTaofcn^T! zF2?{yw#pciOkM>zHzZjA5Eu{VMr0&>74z_Ar4;z@7*J^NUb%Yv3lFhQ7x%w!3a0de zPk1i%D{j)qr7=wy;iP(QZiL;s*BV&FFFQF8*&2Tf|FD~L=ybVsQqS-Pz5qR6$2WH}XE1$^p>xmAL9F&nz092xnD8aYFb`JDO5z~6z#3C{) z%di?##tyiAk*nlng?b<+@>UE6I6rKZn3+J`hARH*clR;9WWUIXzm_PZpXTiCnyP6I zQjl&1DP;hS<@DVpec4)a9e+K{4Eql8b5z(?88zdpi_ln*wYBaOG2dw9H|HE|F>bPt zqp#Nd_FtY0MCrt>!<&CWbF^-w@bv6)!0g`2ub<$;^gbl+a6``lf$U~F0~hQ@fW>lhwb_0zLJ{!$tUq(M8!$PKOd_Ry$6`IfGd zJxSbb<%ymKcm>buW!~Jg&DQt?ps7C4Lyo&{64e?Onij;b#~$TaD5~3 zo40zBjzZ6TJd13#xxKo2vn!aF8!B{IFnm34UbTPhF)cLsNV(dh9kv>ag zpI%q32_QD@aspZ+i1cXDp$vfF3buhi`G-F6GNtmRI( z^ne;k9T#1;n0;1(!XOA=l^c8M?vj<$;riRoVPCZP?Cj{JAh1Ls%1z#f8kAp75I-YG zZDPKG6eDsZ&}*40W`r+~g44bjp2Y3|D*9^7{VwrmhF4DxTub#;XO~5Rbla^6!1d{| z@c+8mNfEHH!{+v_0_mXiw+J?AUVP7)6_-kWYndB06ow8Zmn7o$B05@7MaMK)dpkdH zj2wuNelZSkyPXiXGu;sojmsN5#QqjWmE1KG_Izg#)L_hv=u2Hep2h~uKlgYjl7U!z ziwm~lki&2TSUF|gpT5q(%{

    252v5o}@+kz9CPG72MbTRlLu4<(yBpWU~=`@x0$) zs^K*uDUf{Vo12Y30tH~$9M6~+NQsBY#Z8G?Hlst?WVY9TwVux$DRBImFjFT(+{6iq z!4}NUU5A#F!_sqW7=~f(+A%Z8yp?aek5GK2`CJf1){N@ zfdYL_^FChYD0MO$m&J^HxRxM0+?a-2o8;*wTfTz%i_ro>CkgsvZ!v9Mp+k#&?`^kM z1Euj8YKl5OE@Eph+Lu54zIobquEnVa&xBA*fSvQVd@Ydvhrr>X>=Jn=(8iL!Y)bIl z-CZPHjdV`adubsi8;3G~iE*C*BcS(_0rH@)f)tSvM9++GXX!&)8(*T$#XApaYBk@5$r zCHv`yT=o?NHs&0>9v9C3qsVlg1r}cFzjY@f9mpk~dO+F!<92xyeMp7WgD?mw;@8j7 z`oR*`;{?7z;ZC}}`7vktpJCkFgXkyEtF?sC`m@DA->u1TkCu;m{TZL;1qf*$^RQeN zW^CpexI#&R3CVkaZk61&?g^CY_{SzVgcw4AW(lt`0>H}Sdxwm?vStuCvA1qO^ey&f zuji1}cEyJB|5Hp7l7ejFEq^zV=?I<+NtVbf0K0*OWqha#PHXUni#w!{MkMQUIrX>c zdBWl%XM5M8$+QDceS)u;(beS@DEBx>y zv}(8zaq(UG^(B^lD@e51jqNcYe93#chrvPLyH|1!2y&n=k`xgOgxkqD^r`Gs<@37S z4h|tD(UspEazi*ZOX>mxhCMV(=YJE%yv&xkZ~V|i=0$Bg`QY^_4=|dqoSzT=f%ltH z{8dH8!^qSDi?fF#dNaW!RkCo7H$ily48^_g7LJEH-z}VXqfYS|6@?i3ZmiNqUyiPt z(at%$zQBv2beGfDm>0Nx!LPorp1`_*MLc&qHGIe0`!Sa-Avk+|x=u+DBqDw3qL@~I zJl-ktd9eNWqTd!F z9rC&TC_O#AdT9WdhKv>uTr5u)h!#xYOxYl!_QxSKW=0v)9dIXL$>_AN~-`4DFXD4FNWNnS4Tdm3;!ewXD>k0DGm}(6qi5- zT>HQYkWchV^jsT-c$wj33_Y@N?wIq!8ObnnmL=1NC`yW595 z>bN2My<$j;R$gYZ#r)ORky;Rk?&IVn3IqO(VA&tHjkr zS=ZRPMzQzM3je6*Qs3sbnEMIc#JY!KLw$S2aIo{%=NAp`{1>jiJdr(kyI~yowxOYr z>wFKW85chJm1_4WD$3gp3Bw-3^xO?IsIrNb}Kdy`4H=^8s3nBHy{cWDqL(mVuVa_|HcbA6FG zPJc1BbGrAwTyADw-Gy9#D7p~g`2Q9??$Ap|Q-^GpMmhJ`cL@Qto$l`PDqxxsOk~s@ zYa1Qp&r9%txtP)WyhRJpE;X1AIF$2?L-jf$9GQVwj`N97#%mcbZgjYXDj}{HtN>;z zy2izvK-tSWd5MvevNodu8$UII&+ukQWIOwvYOKv1=FD(?zJi@HP|OOw>2C$Nq8Ji* zFX|9`0fH?={W>b>pv2x4>u1oWRXB{zZj~dQ7MB$|sZ|WmRYa|`(O*z?;1z#vR7Z)Q znG$-(wgKoME->oYxxI9Jc^ zW{&5J+~E5Bggj&c;z$hdf}vSGV1?Yu0-#wGeYZIvMW(pbzbWOG@6#DKe?xi#o{FgO zBD4Rq%mOy#wcPBpG}}Q>2a`_BnthL`2Lic;VmD6Hk3LAUY*JAxkyZdn&W{Xnef5r% zCC^2;wB&l|8mWOp6=J9lz01kg0=*XhfSNPxf;fMN4_~C+@n9ZfvqK`HpXB8BH{!3p zC*pf%W+YRIKy}g&De*mCxZm)*0vXP;c*5qHu*t-A$tZ`5YD5RoIyRbr;@)*#(5JcT z{NiNAw5KlNRqKS*8BU(+QhzPj#QLv}h))+`ryo{e`UrFHLL}qTp{j5Nco|yZ+9RanF!*fp;oX8Wc zr3LtQ0W}&Ym5*%xgDAn4e0UunbWnglL_Y)c=mXbR>`#mb->znd4K5c3k02N;udLA2 z)7)9_SIoP|{a~icjXa(lqX1|feT{PGT9&0I91Ew z1Q#DCko?D?$$l8;Cmto$fAxE8M-ZHO|4MdbYadn|Oaeroq;0FB;N8@-(oc!(aXq=SQjGbviTJHg`fS z0NmO>;vdg-EMku!lV6D~wAkTGDBpEjKxV7MZhn@gr zlvqMc{CO~i$OD}C;Z~KFc!8?7k%#)u9PD?^Y4;d zF%sufL*TTyz)uzZT7hQ)^MA(XBmv8iffMy@4iVX&2jaZI24LT-)}U&WfM?r$N#JEd z^(M+lkw3!P=a!h`0Iok+1PTLpHspSfZVD-QYW9}=t6vwdg~%`0Y6N>u5C7<94eV;= zDyO(XOTS!Ow}^0k&T^nb1`b$QWCqc;E&7xphQ%R9?GidIdy;pu-|lWp7j1`; zFF$*hZsXYOeFzU2z}Q#*GA}McPmzv5r%%#mPQtIZ_@ZpS>9lasXEMVpeg7@=s6iQ< zUH88LUk~KqIg4t_eEG`P>5TJW0gZNrTc!TTIg6^psQXpWfH?%IV2Vad`S4 zbc>HA#>L8qCRRiI>Vv8C1bbaC1(+}BiA@L3H`)6P`1U=4a}G4xx~zX}JHt(L8o!a* zw+euWw;fC+k|K(7j5b_U-}DhE#c!t~TZ~-?7Sy3lRHeu`Fzj-h6?d=bYjhcL3$c!A z;`3OQd3=od#~k2gR(yS|o#oW~PX(5jg0h4Ii#rAG>D)IE4;n#RQZsZd?g*kmS0(l% z4&9V%lM;1E7}B?wzj+E+Qam{MR3DI(t=mN%Th8!ylX zwqQwBkbNVcs;?^C zOxEtb#$*kQSIUl4&U*RYlW+6d&D>`uo0gkwsS&z)9xi|MT3W|nYUs7A-}<_+m80tA~5 z*Z*83oZ|t(UULvyF>AG6mtqx#VoQSO=IbU|ZJ`6M(#e8ygTsSkh*?B{yDAW_eqjF< zu60u?YvgYe4VMWCaevEng1Fd+i*R&yH5=`Njts^AIknqgN8e9qfqkG-(M*<~4sDVA z2r1>V&&{p-p{f*PCgrtmMsa^|OZ1I_uc# zae-UR91X+on_^R)`E3zt5-svYqTAiJli#Q(3MTDl7CS#o0!{F1BAI63fsAjsm^VpxwPWDdx!Q_A=YOxE8W7f= z)sg^JoW{WX^da!XQ*h%C7o_hF-SllNnmbg+41r$?UKbxi5(~62G6$=yoKmDL=9L0% z{t=AfGm$ICdTHnQDx2OjP-(mZcj!5hd;fjeJk{-urUw5^J&#wG*}Ol3ew#!9n8TZ9 zS&5fC5BNvp5y5$=YB3>Tc>QVll)%%+k(|~6R2j}e5n2Tzl@)0ve)k2tE!(0h5u>dF3xLt8HBx1Cs-fVFoJtKSo_n- zmFlJ`&wuWv1k|ETA4aFxdFe$0|39j(`JvAC|7R|1xh-2e*=u3hu9IyoYprGLWZUj! zW7)3NvhBWS&+~kK`2GhM?(2QMt{2`np&r$Guy=!CW(e+*i?7rA!uyAP>sX9@DoaE; zB{Vawf|@+)+lfs@_4ws)6Chy)sbjkioJbGHMy&y=P=m`fQF#%j4cCm7Atmf|ZV}o3 zf?rI=+v`oF(b5!}f7vzaeorbzbm%<6M7}Eb4ooRWKpSDOl<%XQxx0Irg$eb=%Y;|L zl?anLN&6c6`b8|eWz_RwC+{suyz!#$X?(b$94U0Op8}R{0n@W#kjI^$(v|JlLo+x{fcOf*)Z5AkKne_BnCln5Yvw}pDyz-QjW?dvlNvyYR* z+C<;1x-zC^i|OmauAmX?VE?j=Of6*>FKxd;WXxWn7zkWqhnQHuJ3;XC!TD0=%wy)W z;6b4ylRN^C^jbweRcHavST-6;$M;l-frky&)cq#8YgTsfo%PIs=I~7#{3m>T&@a87 z`{;CzQRo?73|d-#zs0gJRb?@O_4nMrBG#(1;(v$^63vQ5xeD<EC({7ET#0N@oc? zEs1lZlmGq|AU!_~z?k=5YEcAKRp^YQn450fxD%*J^48P{D7NxnHLE}8K9BN^DYbq6 z?oMy@S{~?F%m8}Vp$nnTi`kkJ7?0* zU@NzqJC(kwFS!Ef?V76HwJV`C9}^lG&q1sqY5iV^>LSh`4kxa_ov{}J?-!ft7S(4o zmuyd2l&Y~!S#lG7H=P$~molWjHX@bTU7K&LcD`QK?=D!78{577xx!m#fV`&wfvosL zAeG-+X!5@+x?%qVzepGXyp@0Eq~%%-N6g1LPbMf4`FYJue=Z#`UX)v#5trg?=1EI3 zw~{N_3%`Ffx#LS4rbUzrP%Y<7Ly@juaBW!327)%+ZwpV%QZ;UmHFupVj*^hA<_99v zln2N9M&WwW3~3dqVX6@k&!-}iFBaYe!aLjVg*w*u#CxtX+RZn#+g^W%EAM#-uy3Q< z#EN_)GcmqZnK?Hr^_DjO5TGsI^&FsawWT(Gb}E2;%qB}Vq^;gpWO#6*5qhgp->vKk zaFX@Y`Te7_wKEg=cl&q*=N*lV1ExWI z^__cB=sAm+t+#B=YUeuiDr|hGV)P0rAh5kqkI!$w8)wnzn7{xgi_3AEHI)A5GyQn) zcqc~Y{~q6_NcWF#zHuKP+h=d_m_XpGJ>H4mKK*b9FMq^G zbRIh2S2*6=mvJLIh$VzUD*G$kCcwTU;LlT><1ikl)9&-U7WZaM#x(Z@xBwJoN_EFqyDH=_v7(@Au+h|oG;t$n~Y<%}-*skJgj z>h@Gnzjt|r(Ivs*OVl7~w|@N!103z!72YQ51^fK8-m`Z;$?M@ykG=l^r0?#1h_<1xPMk&k34}uD>S1QeGtcvq%;eWhJ2Kp8W zLre)j*zA__&-zNjmT$n1Ff(VEJL|~>e>&Rd3ZmfC`5jZ7&ALJO$=2ATzu8urI^HlZ zFONq`t_n;H|L0=!PHqr zZWuxEyRJmzI4rO^GuP)&7RmzbH4?P3}?Atz+R1qFlz%oYS-yhcf06DK!pWJMs{*1ep0)3R8Xt2;6hc{#F>B+$Md{?aT)P36~( zUtG?-Da=_3udYi!;lKt>CjS z&O45;KnvA-!7Jgu-$9XVkMmjCX;1Ebw_uMeGq}f^JIKspX@D@m6HHEedLqH4rbXe+ zTafez6NR4p4o*pLlYFW~V|`md6Vd8t_?zM6%*fNCui51RzhwlN$W#V02xiO6E#rKe zUt++>JZEg5PT~L)9fkH7!vst;Q%3l{e~uM`4wDoAcl`;#bi8(29z-3D?W6arpqIcW zclh~dU%U>Vel=0m2V* z{))3SQ0kz%ApUN&1$)H18`yd44$%^u42QO&KYTFN_Op8;FOODsH?MCQ^W&hkD@abYUe-{t#}p}POe3N)!wK)101X_KK`DC?aGq+le}5? z1)^UD6s#4mq0D-63d`%$rL)UN;ih$ZGQHcm1sL6L3oQ3>W>JJL+w+y2`Bo=KS~ahS zH#SRZ9jn!-U77dzLo0RgXWkzTN()Fd>u`1Q@+Rq9ux_i~(8;UD{fiv`-3 zO5}6kI~G7wp-Dy03O8S+Xl^XD=dPiVwY2%-0`f0DXUnhIEMa>5Y2s|wjNRu6r z%UrJTKxA1$I-&;qwPly%vVx72U7L@^sh)JEz(Abmk$lTpO-kj%NB7X^zO0vSR(0TH z_}PcU07x9llkS%$HxJBs)9vOVf*K=A=>71bZ_4eu?z5|Hg5`|o(EI0l+fVa8HE;%d z)SJ)HFKl)eGO~Vec+YB9;&_b%&B&BHbi{l)fh0=@mjB=6NpYOpno4Bw*Fa%bA&tXn zu*7S(Fb%hSel~dz4aQhh{4V?E`#j;((4rH?PXx49{$sF^se^OuC#6`)OL)XKvzGDl zdC=J=F?-zy-Xs1=Lt74F3*p{Ra{Kh5hQE+&J?E|AXf{Z%^AmG&me8fcyhYxBA7m^g zq%7*}Hn>cahq6|IxSt4Ihm|;&8$41K7s!ut^KM+OZU^MBp_IenV#EzujI_zPDn;$N z?uo2-Y70+A!zZW}6D`cClzJk_Rl@rp*ld0DBfg{@!3xx-lXP)qY=KA)nnT3HlLVR= zR^jn@m(72HJS|pxO`IMDfwz|@W}rWR-fj%S*SgrU+-uqRT;3BQ^QF*DzxvGeInSN4 z5Y(OWmaMP~$^}+hvT?tC>|9~mqQ!KB)+}1AAEaA-cv9kF2?#JE8tC5PK8!P8w4?=N%OHdg`d0zOB0{v&oq4|&iJU`--2=w9tf2YJD#XFWn& zYv-0S?LsraesjJ~;?N9f#@`)hc;Ltu))t$4wIa3`x0JRAI%643=0d0l*ZTd_=UFZt z*{?{E>zW#5$DpLG#`*~~ew40p^D<5c($^w85B>=Z`W{$C({CB7;29vLtu6N0imM zd)jpdE9im=(S}0M;w^5(8V4~sscwk*cq>ADa;N#>L1DwX!K5IpCRTW-Oe^(sl zDy(s2;t8GKY+Gx&QLtl1?;L4}QK10k(X_MElA*_~jM74R2{CutHt_Nd6m}ZUnrCqL zK58#`iW_{J_&|8ySaoVVx6hjVFuqogqhR&@BEx96@}YE;A#T*OUXwH8WQx#gPEq6g zOKp-a>0PNfR3oY-U5|Y#H5Zv&sSAQ>)OZ?|q9x2xR47#2!UJ{3I4e}riSCO(0HCCX z&-m-n8u&6uGS^1ladL#zeXH*wR#&n2bQ(#=Od$7axFpz_Ul4E9)08xM6oGjO|JhUAB|4Vh#7W!+l{T(7jb*&77rY)O{;xs&sj zQamk4D|zx;4QO4XJBY2zPK|oNVz0YZUjg&71;|YjDkX$?`QJY_UrRgv@`5anvWK;0 z{D$_kUnd6NTb1R`Rc_ifNIK60b*e{3s~9v*q^fEyYyh{~&wsZS|1L|Qe~^k_%0p;0 zcTKI%;fA17svPRty|8|xf~bCnj~y>8%k{VM&d1X5u8jqsqkN375vT`-1mh3ONRu?$ zM!ckDn?x*}HcyJvAI{`aY!GY)tWJ@-P-mhOIKj)#D^~3%1D{V)dCkB6b!#-F{RO|S zb~`&F(ACEbk%>_@(}!jiR?%rvk2#E7<7`cHe4|!`bCHtpn;~v29Cx2wqmww(Tmdle zJ;Op>t!T`CVv^7S^HJu?WTBX`%=G-!W<)Vr=XG0IzVLFTZP(tWfBvn&Os)E6Fe9bU z^p^hPw+yvuBoZi=$*QnzSh9MIC&{{gQX-Fd$YjaS>Eogm*>+Z`KjT?|?9EHsdE4P% zF~xI$Bqs`6lM387s$nVh`YnA5tkb7q38Ot+S^UD`5V+JGb^qy?f2V~W?n_JGI}qfS zL{nA%?aSN-BtPbJ_3NzFZYGic$D(Y71`Sd9oV21xo#^#FTVOL%nEATI1G1?ZdbFYUH7FmBI5cJn1$`$|Fn+F(48hlD%)9ep*FR55T$yWxs@N?XbgWU5ytP5EK|U~#y$1HK6n zVoaj%BNKzVa;I~Ln?0Es7PU5_^RpXyY zUnsExJxmJ9u7q=2KbE%|@q4R-jS|58Y%Ja2j4P+eNDFhHAj1i%v+mTH^zB2bZ`VtF zh+!mBSBVhKaN(7*J_FJB!$bWHhfpa!w%_-lno4)#5LR2oeMcCjFAHEUAZV%@qvG*5nTNc(5?n-qEmkbxJ+QR8f*gD0Q>>3P0vW}@(#{I)H`P;kU`suFO=!YH2 z_Z&?LVst*@w`J2I(PGt^Q{&uqgIw1Y3ROJ3>E7y3Zdis8DwFbEdheVs42Z9r3h7rO zVn)jKY=Ur#sHIUEbZ4Q6h%@`lI=vTy5JrVn%2&-F^)P4m-`!}~Mn=j74G)qt{xx$Z z$aO8e-L*pK>=9G=uFI1X2sXu)$xm!9b5>%Y+IaG`fi=rmY__e@nb)|B6RluZ36z|` z<=iJsj=G%p@M=nVuEY&zqn{bX)4Nlr`y(=_G*)O=p1InAS}4^{kG*&_j4ETPh{{ry z)qsx{#p~X_{Z>XaH-by-Qs>1lX_Ly~hkQ+2{BdLE{dWdB<(pD$_)X6nN9UR?iu3cR zM1cHL*->qaM{mtQ_1)BBB%EIFG=Zu~hWci^c8BZ%L9*6Is7|HWsKp=u5UdmyFdd*T zd5MYpZQ3KK{-%hN*;;v2)6O1W;JJ_PtdFz})-rAT&NO|*`t4QoV0Eq*7n1Cpb#q(Y zk?19DXEY@(<{jIP92QImD{arnP09f(LI#0harLL?#BZd0zpQU_ zYCufU52spk68`)7uL76-1LyqaO>ILSCqv`A665zIZXfFrU422k)n#rvvXveys|C}u zf&K3GYKJ8vSpsKMfUB?Ts1rp$mdRcuZ$;awO#7K$2+~4p$8rjWx?Z1)vF0e-l7nWQ zV-Byn!SdgAcD6qj`bO57$DvoljQk$PuFY*4ayXHGbj^-!%u1b9J#x*>8o5ocK#XcD zJ1L*%W?K+AS<{|0I%Ty^x&C}+JJAaHcX3@{EL#xeC|%2xIu%p`k^J!vrK}K6SebO> zY|XmapPhiLGk;950{xz*uvgjU*!u^v)$!ayYbybV>IMB^A!l_jeSA`<`c5KQn~g19 z+OJLbtDeD6Us?J`OX6Otc_G)%?im#FcahD|39isol}`w0C)!(xdPu$SdH~+Sp$LY3 ziP%aybz|*WoAO5dG#CiU_Y!E-LY~C>93~&wAvFGul*zx#I0Y-zI>EY3C)fqyy zeN64c@6qxvc{&=6_s*p@el`k_Blo}$NjzYGvzxo?Dx{;B4|4bQw2P_us4^xDlr)h* zvxE3i$I`=?cVq$+X0Ln4J(#^4=wmq0`;B%IwBBQ%rMOn$)$#mGo(Ggt0)xdH7__(T z@^3G$9?DCnaK7x%$3ZyD6q-Bo*_re#M`iNOT*|Hot>*G83g-$A)_Dy#L-^mO#nCPs z02Qo;XQ6#ZsxX8zKFhVyX0sES`^~Pe0&%An{&NeSe<@fLjob3{hq^Pq1YuOv}{e?MO+%@h`~qJ&sOluA`Hm9+5royWuxR+-1% z$h)=gZ@L^%7D`AP_0oTC)aHGNk*vqYC$r+Y9&2wqUl%iLe;}{4CzQ7rR;5Zd{r=5- zCT46@9GoC@M^H%0cGq)3_;7`>0<);p4v9fGulru>exrlS)Na+DzBdWKZNg;D0#GFs zs;Mty{6;mSB>dPRTw{8#YAx)3Nn zseh?NzW#}x$X&&3+KinHz>C9o4W4qGtD3V5vx+GUOQ4yGdWzFaa~HIuC|7)ZhNxBc zH&R;Erc#rP%pjemOJ44pGUVUz3kv+Ju_=H_d{kuBB*^-XV4FPgDF7j6kCf|5oMFt~ zD@RK(=mXS~giqtH7T~6r4P#*b!ORo1UEPRp9T;L;N$7r6h#mR~!5LqCV70u_X}j&8 zx=;&e1J!d}{=SGY6pD0MYB>4Rk=ek@ZL#KS^B14M22~fFWrUSlYt9Tc3d|&wV3oCJ z@N!5-@&PLh>t5r2G~n}TSCfx@-^5CK;6sUBmyA2}R!ZPpt$jp{8$XG&895*sGFs5n zl7rcttcZ_=@7;s>`>X3Tqf-!~%S87YW#2{-0=_kwDr}KiVLmVJ)Z`RY9zJ3>2l!gD zA45R$yGN)^%k_eBq#BglfDz=S%nM%kCGQc`S-%DMsQe=)1cYIq55yJ+B2Ai(NzYJP zdYHa_wFhh@2(O1H3KUCGl%MJ5tXdfYgng*O=5$vy$shH^$FhW70N3#hsjqaCMDJZV zF!A(J6CneI7_xlZDb9~86{fZ_iZ1%V8@xw&TLeWUAwE#^xJa2>7#~l*Z_FheBnV<$ z#B+R+1zTCOgRi}e#;yUi1)wrCQpVRVBciO2ydUDpz0o;+Wp7x{AR(}e$pG@Uz5n76 zNE!Z-T~#odkuf&#{gYm0ny7No`CnfA)wU1$X$`{5!*29vOFsrw3kfxkyErI-Rs8pP zw!?`-MKsl&Pau6@Zuiaz`Zy_cq|r%SjX4ueOLjZq(} zv)&yzhTUdpiBl9(M{MX#J_Y<*gCVjfG4}1o?|e2w3ok2JqJ)e8PX)IB%Mwj+%)&y+ zw>cuOUAdc>oKAw^>R4vUM$0ff;U#Jhral2XFTP5j13}#*a2OAJJlUyNz~H~qcn)SY z*8LoaZW=4wXRqYJc5YVlYK_d55?g1+V#70sq)f4si**KR)(YDvrwo8bjhPhL;jJ8R z_nuX$_Qsu*6N! zpl|2bY8h+~({<1nGK}g}_Scg9+_>Prf7>Vogkk-3fwS1q(%%G$`cV-rB-jl8`UVu! z*XNh;Qw{d_{s~T{26d`y+xzJD0VLq5{Sx|5Jf_ugzHHNsswYKmvXm!-#&> zF~2jkMiRx!6U6m%EE6>78s-nmva;i=+O15V^MGY6kF6Kf`n|W*f%n%;atz-u)zis4wgbI(b1}u{P)=qg~9$)=okhO{i@&80Q+hdEqX-h}W zjvzlbi1SXSp)!%47VPsV z;rQcdqs?aT)d`%L(a|&80%7z$nxI=s`7q~Cb!lI+LNB&BMKNDA*Ik;N zCUT}*|G55ppq-FR@OdVp@>-EgsC%%oGek`dbYTQ6Gj*5;-qfCLZK=$4XlL(~#-37X zI+W6F&x`Hd60-SY={qNA_Ti;BHJi0rxzaDy$R9U1im=sy58ipRldQkLTo{nR(@ye zC$4BLXTP7(+dDs0)Z2Mv{0ncSV8EQzltK7ea#DL@JNm4yfC|3c#})NP3n*qrRh{sL z_!a(#$Wa;MVzZ&$RLa2n&1N!9I??xMen&DuXG$ETG6v_}(_g$6NtskvP5mi0*4Z-T zjPzct>1S#!l@8VG<|ZID>o>P__pj$`;BtPf7W6fNPUrY3Q%w_*{F)v)w?#3`g3)8 z#_U8Uuxv+;z-D+Ia1 zEX|mvb}Ymt&GX({dW|02#il*oo#wx?buiV!8?kIUL0z~_LbeStG7Qfe2RbdYYkWzF>e!>}?q+HCa%YM%CSfzB z5}|xB`~%MZ+z>K$ZY+Hj`Ql7{lO?v;DbqW&KCD)T!wH~A~P4!ZL!FECoLNUX(u+gXdCkW+$R4%lWBDSI`I@kS$ zTflacmN=!P_Db=}l=-l=pr05?25z|)#&9XU50vXT{ahudV}D-4?8tKvIWg8C)!uec zC=A*Z+AFitwO1S|CGvln_kirw(X*tkFoA^%&EgyZDvKfOV_ObYrQ2dsGo;uIg2{W0#uBlPBFuf=d*ZZ7K!?y%FqB1 z?R-U!i1RDI&Erbw?+(pM!W`I*Ox)o$T6~?y>(u%P-jdwQjD+ z3rSJzPH=o1_myti1Cdp|&QX8cFN{wgTbMieW_55lBF_r+dfO{j*RFE}msqRt!rN$u zhO2Ln>q2Z3Lxn$};DN6nXQg4>+$TY!_Xx>I^_wf5ng_b4D>2I(BK|!-V|X9;Lg-1S z8K06iGnwrX1xFLLrhFg0NT@9eJv zF5*K4G-d>*;ovQSW3}bjRJTig!5wMf#MH1`Zo z0=p|sBzCvPNV6RT-}O80fyxNEXbrtD{GwQ9Ry(Z%*94}ffFX`dZ`wRG3VV zn%5m?)UN%d>lFRaE(eXJW9q(Rl zezS`OG$+~S(fxI=Q42(E9;N70^;Co;4HAJOTiIV>*VwH+`7&+%VVY? zkkcV#ZKG=X=2&jJLXSp$KgY56G(M!@~E7leoE zNbdEtksGf#C!HT((rW+f*4Fn~y>YI&{a{MYVCD9-5}owz#p+j(sQ0HS0dixoYgJav z$2O}pvYdKEui}}SW$3wUgT_Mcw{7h5zu$+B;;;bD-2R2BFG-L3ptp zFNxbaX`M>@a$Tn8wbR(k1zqMXfNx6fkNwm7&poP97zk8`e3F|Zlg2AVGF>}1iA$ER zOU^nxSi}I0Oq9-Hh0ajgJ)cN@j1K=R^IwDctm-^R`|;sIcGZPM;A%cLc7OsmhMRPJ zHpXo&j5l1JF5{||BKF*E8JoA~2~nCHbJmwEyn$uc`9|!W#C6ahkZRhVN!I zU?jqWZz(-STtr?}b#Y1+lB6Aw1T~-wR5GWK=21oFs->*gO~+=Qz?RrH%iqJ~E!S$+ z3J;dBQ?E`sIzNe3&z&zzWpD-mP_Kuc{(ZI01~GIOR#>?S`n8b>3mVAQ-MAYXBMb~l zNY5s4`zJ;rxUc^qNNLUKct2y$jj3+cx+7`wFMElU(S`M}!bIH?TZW9@9zwKXB$jI& z-1t1C&bg4)dcjdxzN~6f@S4YsMdh_Gz3n7|U@I zWcdPU{vFgGbH~08Q<|GaMjTvFv01hZkkc5ZC%(d%4u9w*=Oddwe+P6l z>0=*~5XJ@*IRA6>cHe;Q5mh*-mM+a{NWwmyNBLL7Gubg-Y8l!O!59vXUoEuv>c|JpI=%R{ z2fr<12?|dzN|+h{tNeg+ShhZql$uE`&G-+CAz7_ zkHpZYG?gBiAaK6{i~T|VeJ_3OQrUz~sQc7WR#ol`KP%#|Z;EZM2j)ZupTp7Y9tyOl zrAxlP8E`@wNKwc_06+*b6}Y4YPBx}Y5c@_$_WLyLGy0KyIxudrEav%FW$;i0_7z1< z@!@;cKoprotZMp@@~%?YNV3(@2n|9`i?TJegy^Xq!c4{oENajFM`N#Fnr6>8A8}<; zy{9axr}gz#c2&y>oe<6LNW+~GL!%cSw0Mft>{U8HvS$2FgO($mX$SCUup;9fgxLNy zFJud0PV%5loIcKiEf4gr3@^+Re-rNydsRh|>=#z}?V?yS0|3-><(Jz*N544H=*NF_ z2{f2678IkVIKo?bygz$Bm31Lp977rdcMf%ZHmG1NP}@(o6&b}MrA|UXMV^KE3+K8+ z`sja1ba4;GqKH5ozq2mcG-A@o7nA;IRX6X=E7y{4d3R0t_*2zIP$3#|<+}g_bQieI zFpy}P#TZ0w5~h92et#IB@Vs9p(&{aJU-`iX_uPH@OIo#pThF#JyXAeCa*?0(XwJRK zrtLubGQtC0gOdlhE;;aKjzi4DbJ&P8r0 zB~R&ssJhGY--~LjS2?PjRXD;Lrk*jCnJfquLFGwavn~jl8y$Tp>{N)V6s?NyH?=6% zI@EU*P67eR!7Kxr6{Nmv#Nk|Z+!(4SJUBL4>p*o z$OAO|W|LOxwyV9%tp+cgQ7J-FhAzX;DCxKCqINzccc zvJbLwK>7gL?=FQI@N_@-^&q|!nwXGfaHCfBJCOt|U&F;E|JV2s5R$5V23DWT6 z`>kA5^==R_$XjxMbS$?bx6SC~R{o9FMj8f-24d#!>EhL$!>JaVT4CIY2E+ZN8xxn3 zGZS`|%K-$V%j_Uo;L7Nnx{Sq`zwY02FCBHaJ?(nUKbGn>(40NCur`~$%OrQEnbiEq zM;;-%VZO)HJsFovXrg2X}!4v{tGyy_A$w!fdf1$#BN zMXG=sTrN4nF_emTyThq9!;)^%v{M=fj+fT16}L3PLD3ClORFTtA(Y;k90E>RO$*=o zM%cM^;zgksAK32yk!4MBstX$zk1YIIovwZ-t(D$x2zCZx*hZ}AXy9Za4fZ*? zgOL-cp2`iXZ2`vFxL$Bj-|0GKMPq;uc{yZYd%BQI{a25|4$yIUCi`TaTN~ z%pb!?b}`XFs1p1okjmf=6^~G-Z%@_9-fO%Gx85{iDz?tg@TZ~A{L=+3#&(TtYgpeY z1qBBGK8FH8@?D_kWe;s=%!f*h>UO2U4OeIVApX*cO5|I*BY;hxfG&4jBqlKo>>EX_ z1w^PmVmyeu>UHCeQ5RfFL4xKjn|JtI+3peh4ua*o;C>-}Fv_=IU!0UQ4p&67Ffm;V zzJ2F6DXuZ!=zgXbwtkXPFRrpG5Ym0!G9_o7-yzp-=Dh>QA20Gj{px5XWNGvLjwW2T zB-wbGxHjd6Z;&7t=Zx!R&cL3;o>bZrcj zwcaq)=I7-C>DaP6wVNNv%5K}2pgr=dB9Svdzl5HqK=$y`q1;f8wZW{udd zM01R)$&?&;bq;8RG=ZG0$@B>E2^t-PIPF#pa_nsKPw$#|@#Sv%KedZhFBVJu4V_U6 zw}-j?pi zZXF~y$-1F5dfzKHuPU{DNfAGrI*UIS*@#6v(aH(P@0lHa<+17swsSMUD^9Q^WKthK0nQ)>X*#kKx6>i;5)wAMPf5uZe{LV zItkTXXW+^qW!J{)q47j)S)52CB`)FP2Ab@k%H6rhA%5z2F3&$@R9#rp^J6=xU31a;)U?}6n|+Op@4|J;XbPWZT?)lY zQO1wsA@{Qzj%M-+9M^1TDi=9RZF*H{UVPg8X}=oq_K?dY7-(Bb+(P~A!8lok{Ct4W zQ#yM!ixRPe+md3Pzq-f5Z!W>3SAGv#H}k>SwQ90(X~ZVQwy;OHYwjh~!5ea@ zmPJJ1h5LS1(-fF18`-8fC5~Ne*=+--DT58a_q`8o*#e1B$~mFdLGFJ_MKP#x?M@W0 z+}z36@1pyf``s6XLF{&lD!2p3J@a9IMk~#wT2>FKfS-O4o>rO&qNXTitTOU94qFx% z+EwB&K0>cZ#Se&UBX|Ix1XjT_5S&)DGgHIdFO8}M8K3DMLFH{bh{w+<6_rOr zI`lpQxdkJCOtbuIK*{}*46pp)9*-;vBlc0>TdAW{xqdodCU=4VH2CwUy=b;@+$l zf9vD353~}H^Zo)C@nhPa2=H>&w&7xZUBZnMXpKPnm7=^}R}$*f&RA!5Khenv(uYh0 z8M@i$&O}5uYa{CwU0G0xr`AfS(63oZdk_d@MNS(Rg8`(7d`(KZ+~Mn-zX1Rx&|bfd zq=TeAwzd&%V1XsTawS%Svu{yNFXTJltiG1=W(hd?-+9UjvJu&(>gu-JMUevXsm?n&^kR+4sLZJr}9nIsyP{VERlkEZX#eXm^HennX zp=30;GIEiJOUM?@IJP#wtBw(oqlt-S73%Ub9>aZ*Y@zW}rrtA?U7y9^8Gfj{K2lJJ z*%&aMU-j0#JN+18!`7A02Achb>3gvotywn$$u2R|J~|J_mOgxS9*kzjjjyxHWDoh9 ztPZgH5aTEt4~^5`;+S*bX}J8F9kLi3e=r;A)?p357K109jetdWu|gaSm+rT1 zKWW=zmCicU;iEJ70}F?e|RK6Z&& zDxx6vVri1;cUH?gsq!?S+YS2018ZA-1FLl}pObLu>jED$)jV75u^r6FsrPv1c^^?= z43&2Yp+#Su$A=bPz8oE?=pnn$@6&4Ke~y z6ECy!pIQerjNte}M5wj>f&-N(rK?le@|^E)Tr4w?7~ijzDz*f(QSJS#g2?G?7}!Mv zp{IJhLFAJ8&qm9^^?RBA8xyS+MPS>DoBlzBBr?2Ng;{A?F)8VrMEF)1Y!yeu8Iux2YjqI!Qnyio+j|`;Cke#g#IAHe~EiUbBIN zktB;3O&FWC@fbDQ>3r_X$xI| z-)IY`+QLNG;#3XBBgsq3r4&au1*;k!X0Wt&(nr)Q-)G1*secJGWheZ>U+ML3S*eDm z(VTjR<=JWaT~Y00X~zuTx<>7R*Zt(|NB>%zpQQS1F#im!UJMBNXKPAsNXjd(Za&^8 zt~EklF@~;@wYK(w89PM_Y!MgX2O;Y!rYvJP62bs36bDn}npTgVb2?e+Kr!}i<2;iH zJ&D+)p3mu9@RXaHNZ#79q4+>v#^DP{fY6@lMDAUIn3$3y@au=h`VP(nVlw}SsJ|=2 zxypP>30WBz0+^!GbJD0>8X<1I>~~a9<6YORe1|6CIX(zK$)4b(a%*lZTS%iQ8g4;! zy}bVX-@{OOcwlU$s>LLKmya)%ozHD-1g;iuZDJHSY%#Qc0orZeOEI4n24RGO_k58Q z$2GT-R2a=UBrzP&ji~OxDksrA1Ck6~r3uPE=Q=p6;70-I*=^DI!u>7&84qr)HX8;b zuekHLRJ~=N&AVqlW9zx43$P7JinIt%gcI%FXSnu|{qT~3=P}x3ZkBrK1kG&GvCMu& zJJX*Nj9|vjJTb$|iS+AH|Jw z8oNU2kS(BQq)4msOB{ZQ0Q?nOSh-0^NvIVO|RLJe9-}uilblhvuDkveG`V>ZZQg=ocUe{{)j#^K&j4K%NxHoANS|M^1ILLp5a#+D;c3dkckHE zco)vQmlE%pWvE3|eX_xOD$?;^eTPbBclW`e1&UUmPW<~?)(N_&jXf_RKZCfh@W})t zm)F=V@$?I?b~q`QPK*K@jJ&`9DGX_9JyKG_V^`OtrxXYuece!W^Ju9YV~gfnH}K&t zhkP4Nq6-Ui(I6H^@dR2i%x}HjkWj~;bgNNw4bf>-xl&7=`*$|pf$=uB_I%*W&g%9b zoPm+5>9LH6E8q`fa#*h|K0yJ0?!U`8(mehZO?9JQU zv%k^yqb~@ZuY^LYznets@2C|WhqfWp6xu#V+DEBw^5f|PYHE+~*>|6!aswxy{Q}4% zE*q_&#uVRqHF>`qL_O^8^+Tc(bP$$Bo{1c*9b10)&Hf2miV=+L14FslZ7Pkqt%u5Q z;DJaKrw-Yr>5REjbO-7tF_D;togqnHN3lbO>*GnBE?xAk0lq5J6=AAVt~R*uk7+a#69MnzHujO$3*Z^2t_174k3-=E2Po{4oT!wWeMs*u3!r+jY5WzOId@2TQF!I% zBbiK}yc}}b-s1~BQ>+3g$ZtE^7k)Celik`Zqm_*1ZB7+jeQ2eJZL}9+VVPH5fGcRY z9Gqw^b7poI^Jl{UXvVC<(9ZZfZ_V;vK%Mpr#SnQrp427E4k$UoAC@9U$AHFbN@llo zygdG?mGK%+AajcN+6jHq{50^0A{&>9t`sZCDmr4=Rbs7%&a(CS_XB=(Pl%XP=W~m_z&4S{x`lF>OE)}80UTU9QGaM1xelUWMe{XdxHut zDJUg`Rv(J;3Erh~lMjR#UjcF)c=Dj_4hgz&Wuz8#BHx5NIPF5Fg1kR?J0a;l<{~9< z0_%zhl1hRnHkM$D1P6QTMhjugfgDgV-0(2@&jY&4RBKtI0tHW3_jD2=SPq(S`^G^u zHl^(s3+QV%IhD|~&xcFa-G*l=Uam}4q6}?SM$a#t9*F&Qk7QyZ&s2t}_Lf#u2agQG&pkj!yVD6qE9b3ePZt^5O1yNEqm{CX!^uJz zav&TDNdT9EqfhVwfez;<(LA$y3wv3z;!>IlZz8-!qs2N?)>cuCo-xBZioWbPM4)~X zaz~8QIqChC7I|?+GqmU(Q&*mHnQ5Q)goL6IiEoy_Y_-p5*`F{Eor=CJk5D5q5feSMWF+zfc=t~P!u-lNLueWU((FN zEIoV)NS`^(EX2^HWcAbYX?-L(i-!Z}L&nXLefr-V^p;DuEbfFV*|7p)rxQmrPW@ba zQ21=;J{+_DerMrq+jVdMC1h&g@EZP02fgW`cfLtklw~J0`ZJu zplPF%6K3<1U8w)9j4_*=YN+$A57JxvLDu=WWLEDJxepBS+a2?wC)pY~Q|zPt*2@@p z6XknPLaKXD94&bumLft3%Ww+1RnPYS@$?RkaX;VpaCWn?ZQFLz*l29CNg6ih#%ydm zX~U+m?S_pS+ic9=_Wk)j&p)uQotdk1?zuBE7$to7!L}cT;gKBMq4V^Ej#M12`$~yO zE4+3d%`n)<+EG2;cRpt`#Ssf&D!n!`jKCS<25HP^1}B6uV>5N0_q!ozoxYvcd#*IX z;?|VL7c7l&h+pqels5D=!j2ZZAb>PKZ?1D9hds&s9Fs;(vuJnKXgO~m5X<#KUnFEC zr?rA+?pG~f8)p-^lCxZdnfasqST)FzY1O>?aqQ#A4RuRZawoLGj@R?Y8|_6rV{$KQ zD#d3OvA(Jba?K1qd;pzvK{fE@4DetB%ZN?y|MVzsDymnqS!v{4)wH9=y92lW>m>8C z_h2KsZ;)X$VSXGs9OhYUW>2H;o|17kZ#_d30q4w(z$U?;Q+WXYUfSM?im}HG>mtNz zBOBg0YzXX#0t0iZX;-42Awz8-BE81>TW ze%$MPU<3q6yJDSj!JEU(G4-?w=R{7Xo=@3s7(t=4$}=vK3hrTAcwc(I0{PUway9Toib(Ck+1B)TyZczjw=1|MZ)^55t%FZb!A~d-}a8cq)v)n{>xY0 zzeNY=s~>I%VNLJi&IhP2Sq7fdwC58<-HFr0BgpRU8D=!pqu#;;Z>ZH_+N z&q-qxc&THWd(!}O@KCp$&Z5wdl8)Vxj9aM0bMkj%PWL!wId3lLSfedE4hcXYTWsUD zKsI^RCU#3sO{E8cP^12P3)r&cm;E-!!IMl<{QxbI#fEb3F3{sh6yCVOW|EP3&os(Y zVuka}@3_iYDoqU2UTK2&cFw%&Zlm6;6G7O2PVtoF+G$g|_?M!#PI{T8(-?knOp&6q zUY@O=C{|1decA^1M`A70Pr=W2`P$acQG{w9uN?#=3tPZgFck&+I+3J;s@#??35zIq z+#a5wI$kTAV{A`TG9USt=tj^+pfeQIXiaGB3Cn7c?aCBVYIWi@zh0EC4=O;MFf4ts z`nDxkUlk@ymfN23{Cy>HM`TglzQ&NUn=R99iSV+jmdi#gr$WhbdYtEe|MhT7oN{T4 zK}0K&8fboR13+Sfz<6HPY2!q$&7ju3%{`hZ_}Jmar6o{6qHEL41MvkHFjsoht8`yK z1c#3t<=y^_TMU=$s*kPb;KIPI*W$!U=+hO0jhN{Tlr747{xGhoO^Zp*s;qPcXmJ#% z!?sFHC+j61?a3<4Q$M&Z$&l3ZOrAw@71Gkys@#UHGc=MoWPMt%;$NKcE0o0->Nuf- z8UBo@w0W~Og0077e|1mJ$f=v{PYzNPBw{ZN z7*9EeaQJ+x2{wP_1OxPkz9;V$ws+vi;r2iuyc;xdtcZy<>_;c|jy-?2!xGGzv|z}H zo}Jk(w}(IPKtyvcG{BXj6b(_GzbM$!&`QC!^ov`b@6i*yrhIKJUs72!dSfP`dtDh+ z@pG5Tq4jrSvY$x6aMqJ!VEma_r3fQ4J2iHm2{+W+>o3I-4ppS~xzl|67fmf%wsyE69&wktng`~bfsBoTnXS5aFGS%$d=Yp;XZ6z# zZolft`18EUOD%kW5Y_Y$%%91{9SBWC0*HRsVl}jvppD+S>9-U4#0v=YK}$Ay&Q!U?ap{~^WRJ7bw=&QU|JNOsZngGV&}y? z3b$7Q97T7Ei*do~Zucae$j)PM1O-+b|T)%_jth(0pbRq+j-d(-!_aNL;`s88IC& zm#0&>=x0G7&f>@3N+J)*rs~>(GY+5(e8t1Y1k>wgh$Xej_*F29bQw#Jf?hcDH{qA< zJFnseuXGG&RM0>Kg9pz~R2EWtmkAJy^QQC^WrBKqcQ4O%GC7B#%2>t;hO)jy!d=jW zch#?6%f+2BbJ2$Q1}LV!qI{LqLh+dopQLFh_Y#wSJX_VHuBiRK(9@~vQKh9b#TF)+C{WYDqh^bO8DToiqFOKz)rQ90|A20{>msf5=bl0&r_1N zM!0@YSB?+6#YT0n#cS722NZJY-Nlmv>gSF96b0LX@%ao9&7z_qnju|KE6>5hWV}t8 z)wtX*%lP$iB}}{F#qDu|s}(Jhv0?}4a>R_OUzx1#8mHZAbPWHjp_5+|(WfKI874F^ zX7!VgAkk6Ew_c}zvs?1*C04%^Vr9Nt@@u(=N{rV-H`!`eYpr?S5y50wq%2W!&A7U{8_Y`f)7h$grzkA9Om)xX*zWN-7z&JQ#$n+4PPvdNE zs){C_=#+{us!RoQ>2!yU>-^j`kwM&Dbq%L*A8{hY5xtO{7g~ zH#OY+D~_&i0unXbNdtvh_^&GuK1%ihA$V=rG2KnFG6xKuyq)6LZchu*2lZHRCkA9i z)0v7}&uFU%KMK{yNjL*ACt4jSCo(ACF=^KUYU=NGp7|~Qd}3mrwv#eNs41mbMW(~i zwopjl2S2oaQB#1Ufs0{3Q={ON^zj8A6lhdj{vp61ACE=16rq!!{&2^irry&VLn;Qw z_X|r=gefT|u40TM3AW^+?K=+2E_)4_zKC)S4AA}(sNI=KfKI_HQasa+xW^^bT6wUM zvPCSR)~6}BpdTbn=Ymn9jQHK(*pm(T5haV2wW8|j%L`RoGqfL%OaT$(Y)so0HY>ay zCX-;;Sv~;HNl0J^=3Ogm*^tS&2=*sQ;rh7)VjU0hO!@{fRIj=dzOw$J$8L5L!Z5oX zepSu+F(e#rkkBG2KZfxQpH2n8U*Hg%zQe;9bV-#MDog?4eImrb`xYh5%w-358!-BV zKV-QuwBuI=dCbHS4{&psanGmZ46Xe(H2p5!eu10cUbNb8bVBLP#$!UvH|KlRz3Yb% zk#Vk*8;TL|+mVBb<}t`UZMHttlBm63_yE>TL55I4+K6@dAG-_D)xBZYpAJZuzC17! z3%&4kG<#!x7EU<<_U}>fBLXSk(pU2t_F&5#3R-AfJ26Dt2yKV$Sls(h4yJ&+MSYv| z$VAE3{A1`XWU=6Rcwq(PYI<9~niCG+1RYa%U&BOdvHa{) zp$%$OE^|!1i*c4D`O?qY^S2IO@gx(1w9*IDq{9A&Occv3zMCpe=9Sbl2v1q$!~9kw zLK=Q?7~55Me&+T-E!k#mFxy0KW&{GbGzp+qOWtXbmDp z`bX3+P&`n#1ALDVLS&^j+f};`qd%TUB)esVVH{EdeO{cJlsxkJ3u%5l^?~KU^nQBx zy|P#)3lNHCQ}$UEK$|(;{0q5RxK~l8#~TCwm@XTFV-S;sJvol*y00LlV^I`0~iJy`7>z1;vpuBalf{A*VyV z`Lf_txX|n9_`Y}2$uMwWAl@eZTNy?1U+8BvgVpUvyRRU0x&b{O(jYi)PG&KuQjV@F zsQVX^Ez%`I2$6o-IN|5=RVeKps;iqCApq^2Cv>+Xp*> zAYGWYSa9Tf^-$=2sa#7q+1vO~^Mv8Yqj+4?l=Wi!UH61m^^q2z$lLTI^Id>3CvY+b z5^YC|{@Nq*T)>_c$Ip%OLvGg@&AVX!fO22b3D?jh1A?wC1JkQF#r?rR?$7inzvXu# z1ioxEZ?e@ofScJrRt{H-MUqU`Fsymn8{R8WzVQt!B2Q1hW}?FD5V^Is9vLA(@9h4B zhgQ>*Y5s&Wwc9pwIkE}^@R0bF2EL=+WwbW^+hTH%07l z4h~+OPc)rPvgtQu-QYut52oBIiivr+LVk3Rb6k+0=@(;Pi%*pbG)SKEJY0sHDB|AV zW?Xkye^9SVBQ;U@} zj~)t_bYg;kHmfltLf9r+8Blve#=uJ!J-1phq#fmtGgUGMA={m;LD-`s#+t^MA$vdb zCbYS2x=Ka*j+LitXf&bz?=FDuZ$zB6rg20K18-bVZ*Rtg9UUj|)Gz)v5z%l#EGY!n zBNXEC#3Hmtq#W(U`z(**FE5tIQnW5yNU6U*p0*ca>}$#4KHljj<6v>OwraGACwZc9 zGH0Bu3c<+sreMC2LGBJd8YMz)P9G;jZbt`;k^Lx z9csfzKA4yX$^;`rdW$aXAzLj7fVj?vosZSG}FI0Y(rb zP(HSsnx=Dn&`CfK7vjn!1rBZsItmP_S-6zH^X84cRUk7wuY~>0G)D>a!aHKEH_WO= z<@OpK7Z7%34Y5Onh~<6T|9qJ-N4{ftSYGk=X*cFOLbAPhQE6gbqs=c~y-YODe~m&@ z7$7Z<>5Pg=AU-JvaZz|!p->DC%NHD(9uOqS@M*QlROg8dN+Ila&wi*#)}4m-j@iFJSBA+?)L_UU#6ROmP;- zvfiJ*$h|TQnXJbUBN{hv0`;HXoa_8gRu)tBptM`^pM0Mp3)9qD?Uh|r*k<^N+K51W zZR-$5EsfX+mr{03{14-vfVAVe;D!v`QGyh+z#D6nO+VBMmX_XAUOh1Xg9&9EOXeT* z*HY+hk+|f=%^0DIif(fmga!GA$BgmQZ=~$ij?lhXx^4ebgD%E?dzl^_bT3Y~Bz&0*)6xAl9v#NM8g^b! zLg+ykCo%g>)Bdxep!dt4l2Ceb>QQ5k``={;QyCncR6gPNxM?b>Ed)GRwR7Z0xD;X7 zl_K370%oiAG*3-2&bZ`K$Oog19)zIA!^^|QIhq#RRaC_2PgmMQ&4X!yOsS|05A$c% zM!U^8@~$6z%24()f`CferiJu2V-Fh#Wy=od zo|QBnaV3-IHYH6 z2zA(sHLlHbME`n8FzBj|)9qt(MyA~70-z_OcDh$d|I2WY_>(x%XVvH`osWi~c%7z` zz`zw>i3VYRad@c*#WWMaI=g8%@D_fc0I5;2+Qc9kDw- zQE!(gCF;bGH8~j<@%fImi=A9O=Yw7@f(eUq66?t)n1Ii+ip;g&KTZuSBry|c`5VNx^xx?j;keK`p2) zQGQ0W>m&?k6wnBYzS13gV#3_ID#m@~tI7=d09+@EtfWihz%lVl-$|X_Uq|pwPMT<& ziXVwEUrpn!gNJF=X)k}nIVvhA5t?77`ch6hsQ`(xLKb1twlzE$E0qSAN5SNku2C++Ko)-Ug8_3ql^yq{B$lT-RhPq$maxdpi`>bUMPr>D40S0^I z&V4O^vF9vbj74Gx4aE!|NO7@|4rbbCv5+1w*4(Be(mi|~*b`Ta$E6fW>KZ1B1)q?M zC5up^pr9a#qU+_r0qXB+m}ETFsHycM&QI+Tlf?a|Jt5PmYyNw?((KgY&y>yRtR+^> z0kn~uc9tRo1u=hB&;o@)5qguvsg+SE1+2 zx@|6&m+wsKm8qj+l%x;x-y5L)J&K?hLfK(!0g{^FY1E_d{Ko7;kT+|h4;~GErmIp| z&{q$}G7dnKL%5S057OD60xtyxTrOBsH?~EMu_1KTz8~~gUM{*oIhE$Z@l##g&*~+c z+4~TR0fwmf?t*zV8WHgH6ipaEpsD}q)_+0=rGJ)GZi63rZ3F#hJEOPs zN-}t4V-jH_l|O|*rCtBr#I*{)2`Z+Xp~ICj)-u{)IZe56Y$U7n$#MzOj-{N4XL4*K zjpVd1a0Bo#UVp!G)plZM*Ik*?m1ZqTUcUzb^Y&p*aM# zW~X-=1NJhR6PE03fHH917yxmq%uZAD>A8;n4Ncj+Rm3#oDVr#=`RIigSyOD!)5cw1 ztq+)azZDc?Xyfbt39aH66V0J)8jlpl5@XXpG-QkJd_D!0%11G|!7V#{7JyMC9nD2& z)ePGC9bzI*gP)J39-JK*73x42`QC$NoW-Y{@_&JTEeN8yS?Z6seiJskD@Nu9QISp=C*`N zG`K#ghy3NNa~)KIP9u%>Wp<|JWAo^fA|eb1vWN(^8d*zTv6iU0j|EyAXLktmuWYou zG#H6zakqwUWB!3DXhxs{d1tD4bty}pS9-5!fJ5n1J z@fPO7;<{&-`q&Yz!W5*W@$r4SDw+MB_t5uQc18Ub0mj_lz9BFi-||T_QY)r#YJFYI zl>)CXmfqxsn+HArWQMPs4J;wIqoO?stAI*Q7)*VOEcE8l58kqiyJTPb!7Q&7IngPK zedov)gjHTMW(_3TS_SZ(H67L@6149&T{4A9xbP<`iOEPDUn80XKFbP zm&a)DsEpj&TbjQ4fs4#*|M>x?RghziU+fbOqm8Rf zq(sY@ddYD8iieS^RkBdosE%+_MZ%a<(#EcXxWI1^3Zq%9hinaL2AE^4e}vobekcCj zjk~`?o&Zq5%{EPhv%Dfg!Bbxi^dnyTfMxOuRb7HVtcV$lJ173IB}$1W8Rv*#fA+mD z?GoMfE8PWDzcaet4=Y>#qb1h{tCM!ZEfc3+ zK9E(_BTB&8KF_g3GU(#`QGYxgM)~kG&>Dlf>-wW+717rGH;dTHs}KK0Tn}kT#Le** zXYA~v8tRL!FunYg;zCRM>(!K+soSHIKxC$cvL`ys>;aI*V0f$p&}w-G&Oml?_irq?lA_@HTd*e!YBirBgQPhDZiFlZ zujYzrtExM~1nH4@JjcA%Rh%-X2*+yk#QIWG(Av0);Eu5+>GoPO(|?Tmdjd<_Ge&X0 zWEeylL)56eYgoxC3ZB}@$M})|oc1f^;ww{-XN_ggX)l!&S(P6BUe0Ql&a)hzwj?E+ z{U5g*!4mGFlFy&!l(XOiSbX%dcbbF?XsZgt4X2zW`V!bBY)M17r~;d@3bWk!l)%&urgS zT+WYRHX+@=N`<0F2rf^*ftOl{K840;!Nj8O`7GC#xhuB??RM zr4l{@-zuvG9Q~uaX$zMwbg1@&MD>r$4VRfa3evER!^byx`?#7{B7U`ePqfxmy{10O z__o?!4?ZgO(kT|IzaF5JPBC};7-#HwM^K_E!&qbVLq6Ma?M6+#BikuZTvUGHOI9p~ zL-=@q&c13y1RtcfD#%8MAn^NZ={*?NtKCZb__)vz@J`)ESnU)~7-z0l$QhFeB9(E+hXm%V!ZiDJhuwB2 zZ*ff^QdV{28#GZ5`4JTX{wYMkQshcLD=lrnEyU*R03jG}az->unw7k_RQv@+&kfiq ztuqjRaQC;7D&L^DghK^*&XYb7p3Cw}X^CZ3Tmj!5aV+MGSbd;|`UcqMIzB2xx$t2# z%u|YWb|<>}EJFP|Bm12mynefpUK~Xf_UPY3#wy{gh1-09y$4-?=ZdIIe-&IT;!Sf5 zQOl)ZMHxB|HZ_R`DeSe_u;N}^M>8|dLaLnf+uEh?1Y`H_?Hqs$|F>jj?7g_EPLg}E zGHyRnYZ&2_HhG;YhnX6h#eL5IB(L=U`fMmPi}L~%%#tSstb5KR0_f(q{_D;jAo{(; z1c|Kqo@^p6kpHc#%UdySTA){fKMcOnx?JSz?;X*NUIJUC89>+NLLshe`o<(F)!1b) z!~zAypl}$Xk1SkJLBE~oFkTW$RJ)8*Xe?yyn0go(n~{H)x99~y9E>Dl<> zrOEI2)Zog2liFq`KGwADT{m!Sqnu9J_cUe)!6JHd70?T6(L0}GEok(@FUG3jrZ#b3 z(G(va3+Fu4z+uDMI4UIx=tsJA(ydf6tMj_)v}}Ug%jj4<56@D}vrU*;DHJprhIO|c zLrr!$D`x+d=D>eTUoYP8xrQ4nTt1voo`4tD{rmf8ff4IA2wruJs$h9nT{FOixAk#Y zv3FFOg#Gx$;|V)pVhH-w52if6)u9V6@sF^c}DP@38|Cx z_QmGlVdE*L1xBB(e=q$($R_}tL=!~2%p@x*n9rYF&O$Np`W%H|Pi+*l@Dd>eP-Uux z*0g)~-8GM~OTM;ed|NCUvqntBRN|N$py}&_ApKY|Pb$-r{UQ{~du+cOTiDg(l_yON z3(<_%kO(t{acUdS`_x(+2ncuzCic$Xkx`_?7Wd*nHdYnL3Pf`n5{PhvKA1;@4l4?g)mc&l<**jW+>AY? zjdC}8!S%IDjP{VNxZYX2eog1+L5P~B@UL9d!7`K9i7f7Xs1I%JmR{X-}q$M$5ovo_jTRd-K zTYge*(RZ09HPfA}yFeWWTm8JwbTEfn^(fSHiTunMP`MVUS6`dR9a zwo8s-g2OkjXc?haa-@Rl=YrW2Rlo0Jiq8aPlCILrv6kl4F~M|5G$y41o-8~K2e+wBI^@Ys;c`qEX)*6av?$ zdW%PjqDTuT`K1<<-&HF+$PFC#1+Og}Ozy&vuwa7EqZF7q^iTkkA>|u%q2TmP!5YcW zI)au90cW>unqQfbx!XUpc`~%YLPW53@DJX$APN5lG(#Pt0;I1I*c6!CVsb1!Zrq13 z(E2ZIB#SIoDP$z?+C{fj=6zEv9(<*|Ir6`~~QK7e8rb-xRN4rtu`fW^bu%r)UmS_g)*g?jCgJ>84) z#;?vI|Dk{!Vt_KV9f7pP0*2RLF;=fn96hAUx2metJGuG_S7|?~$jvjach5|O0#ZH@ zzYg-3;BJ0rtCHrcfee@^i+&_JsNEj#9MbW-saE)ueR0XR_45&C1(9Rj48$zeBReTy zmTt#j#auNs2x$e36Z*b+U5Ke-g)+YB+)9+d_N^Y8m_AW7N4iK=(y!WqWj;D2t-?gD zeqc?t4cSMHgizRjqG>;J$!L*2@j!wtk`w$^R?@*S3UPc<`#D1i#9&};sk`OCFWFk+zBN; zr?b*09pJV}MCjSg@nr^PoxS%%C(ASDJl#GeeKHLXEr^0Sfyx<|ZPdGQ?o8&k=UE%*wv z#N-Mq%-&4g(Qz1Yhs({o8r&7o%Pvfb%nw=;PMCBHH;^X_F=1L1;R*j_$-5T8 zoJeXRQTUjBQb%CZo{ICJE1pNn*YR}cr4_dLuP%iFVJE5%D#upZxO@VEd_UYdf652o zzcc9tX}#1Qx3trD*>xoP!;wF~Hzydx@0#N0BR)!txt!J%RUCQeC+@1XbS4MKJ6@Ct z7tjykA|@3I*5NAzyi>KLp?!L*4oJo-B&)aO1JXa@>83_hKZ62Kqo|} zxoKvY$agK3ofU5+orw4t+dA$SakBjviA_OO5(TL+{3iqDb#K)>(?Yzk+pkQf@8u>v z!_TOFqM}PMa6kFq78yfH>SjfwC&%zu?i`LnD|kg>@1>F0$0Vsr2lH18QeW5BUE&o<{jM@69GpSK~dT5+{ zL8mFTba6_YwPks^8NP$N8dxFNs2RDYLOvr^QKy!N-x5n5=7l-<1y}=>;$yXVjJ;9D zb$mx=56vx)mxm;}7rc+TrOXqWIZ^|;+372lb_ zW8rp7S8pyAHwkXfzexEuZ=UnTWo!%%u0`T{>>r=a6oV$T0FHC_hr1b+cDi z6Mcawi3MTupvOy*@Y=+E3+!=A-A4G;$R}S1Lr);p2-3gR5(WkN?>ad~$Ly;$65_*s zms{tcUUz>pC*fmOs#^0FUvS2(kLz|ND+BeNLX{l0B4&F_W3`bLClop5+$rR+>!Mq5 zVw7M2vM7}dct0R!1>TC#d%NHW-+toWSCo+UbVMmx{~Kxi_x8{Vs`%=?o6Oow42erG zDAmxuCjtIzgKJ*uQM{%>hxNbE3&wzgw7h-zGnsAp;iT3Pe$4A^&HggZcp8D*_q#W1 zp&Ed8yk!hcKH?N*hCUF6_lxvKO1HTPKj7NH%MB=pvf2BnFn)eqazq;?0DGVK^N~9w z_vO^04f7vo40Qt!;_aj2a9|mp3_-k4!Ej~<(dVMd>yu>S=n>-tji(5agn3~?#Cp+Z z1BHjzC;OOjW?a;z_)Lo0Q;mNw(4YS{o#NWtm^66!PT}@*c=+-W@2q9eM(s*zrRmNE z=jF1^9uQoN0%v( z1cd@uk(E^=lH)qbf!fT`VIP)tV6NtW1!0-`$gxjk32Jg{>aR?jg&6j@pcgFLSXF!d zc*s!=Ud$1MQ;q!jcip(o!`j}%X8w!F0okPKF!D_ZWC}3h++g=ymb0waO2cOa0O8aR zjX{SyI+3+$qVr&{8eiW&9ecE(D^_Fj2 z1xk3-D*Q*m*uI#GYwKG0iLSZpdVYlF>Xs^>>QVD!AMtcQILi=mLsVdb^3+^Wfpvfva`eg^NU1-8sNM`X5AS8|H$_=*B3dc9lv92AHd6gdxFV$>kMn90ZwBE*6ijTCN*(wT&lxTgL>Sj_l&XB2+?ifzbNXmbF!HlkjK zHT&v4G_dY`eD?Ly#Tcti@>zTUy@9x%9|h<;rY>!wgOY{%jpHLAr+3-HwfOqpv7g;m zn7;Qo3T_^mx3#ge9k}+hYy<29=p7pL&WG${TQt<3x|NGANG=hBl*cQd_J)9l+3mMp z7P)1j<5p;hTxx`UFO+iZ;Gsan+W^c#xSYo6JtD0&Sh_g8S%|cBQ~kIkLor)wnTG;v zt91)WET5Tu2Iy@90Rr_vIMWJDZ=)G94C^qyo-fI?nH7T({}*TlW5AApIG@cA_&Xt@ z^;w}xnMhc^JQCW4!nmA!q^mkM-e-sKs_T6yd+LBZ5`3ge&mst~ih0`6tu#5&%oqL; zFgwOHtN#gRkar(W``}OI)WJWK_yS*H`29O>;^Ys0RGV#iqk_GC1HG@o^Xw~f(ayTED9%aruLi;8C{|;9o%8YY@gBt(xU|aF6BGkvp1pYWC zO(GVQ$L>oAX>W(Q^f%%+qGs;`3x5h z4ckW+`^YpwWk{$CU%5>der4ksa}r1wTHOtV|KFslDKtnQ+q9Zv11eCVu_g*nYHfj6 zIZklgepuSHcK^-qhJXDRllmi#%)vKF7OKxP^2(PX`y#}&7U#0HYq0G$+uG)GNjZ477qgDBf^+;d^wbg(Enxr zf;o7l>r^wi>5?DSo^3ESrfeYW{J!MFH>LXS5``+P41oTo7?R{Ro%`ih0}E8h_`rGl zZrJWXQWn!Yn-D~1D@iR;d2gZJK=ikAjIdVcDIPn5bV`7HVG1qJ(J=J>KSCbjYhWm* z9wVqJwaVxW8p<=dzgR{}IfOPp_7 zw#E8({KE@{(ag&;#G_Evt8GHZA{hF5LVA81VEkp_&S4iy{lIga*!r13pVVR*F5)54M&4jfWdE&hwcpKa!A>Fw-kx% z?|ay&K~?@35sFfc5dBj>@!U?d<$tViO*CYiSiNlSwX6%TV;Cw46MB8+Dzp|%f(J8B zqpQ1FAL=d>h+%`EKFV|Jx+-em6PMYXuO&v85dpzo8$sr|#zlLdQ1@?S9rBPv>M|`3 z1*ivz(u1xu*RPe^g!iQsd;MLERA>zd-&f|hj*b%Q3hDaGJR1dSrv6tbnTA5XF9YU( zX5BNz=DN(hnGbTb(bfLZw))Jg;|#)AFPRk2iXNN49cOR?8v_1r+3f`qSP zd=9(6$Di37;$K9B%Ua87<4`ZkNcpF%G?ykC!x0}EC^bBCQnrJX&7ZMs8i9BUU8;&Z zfetRi^dw1?Mn9!}SO3>*j1@xyRzWqJZebb(PD;-ryDl;g5ZEjEHheZrncv~G%(XnJ zoo9EI{+9%$;1unXDfN#vt?*d<3WQ-)?7pqF`9@~CCk@Ce!++OZuvG%j?`uolr6z-( z$JE{K%*7A6Z_4P~c|@9(Nn}5WVU07E!df+*#9>lsR135B1 zNb|d}+U&VR!Gz)0f<zF3_t|krIa2w-VuI2fjet4`|P6Knu!%+73VHw{D&c1+Zg$`H>GpY zG(jpU!i)W2#@zwV?ZcAs_y(GfpOa9hfJVp$5M~xA%oa^2@+T5B)-KlpK8++B$TFeZa z2ME>9TOJm_mOh{8;P)%c=6>$+AT3hP=F4HH!L^_%L6&riEtZyd0po8%gskj)*J-Fm z$3N|-_qxL2X*#P^=qlJZdH(NMcF6x7i*Mp0IgY8|t=Ek9y z4v&2YTABZ#`a|vfcCBSN)Cr)*4|C z`2U}!Q2b{pW!E3R<2{nqduGzl|BV7DE((fXBpljo*bw8GCX9) z1)`idiyQyT^Pkn$F#$^|RPhlCzv41Ek*jaI*$ZQtj{43-{Totc z_E)VHPew3Oo+;;B82t(nkQKp}KD39D!tCxzE6s-^3gJAI>>~SUjeab7^qYdQ{>SY? z*hMXL_GtwNzLJ!)i{7X{Q{9Kl5a&g(-_rA)@u$J(!+A7wr)&^H@=*T`0hXDr1%syc zZBU`VDnX5XU*;X`iu+J>TBtAf0?`f^yz$or1~4Rhn@TG-)k=_su~`Me0&Lz5hF(df z71bCft97exDrnzVS#5(us6l;9iTI~L?BAJ9l^S!8Oet#Ao@ z=H*#@y*y9sGFp22wrYC_cb`bAfX?_AaFUFW>-g;iN8P?QN=7?OIn*7w51vr-6-?73J33W8f^6m{nt}!fdcaCfJIcbl=UCE8S4h?Y{reg>1O&JM3OMe$!KE)Y8c|H|vSEgPbYk^Fx_!ZMQ(S@C)PQAg&XxVpayN?Dco@e8C23|C=` zl=n42SE0xD2I$TCj(u;delnQxR1c7+UwqZjKbHLSzOPSEwd9Qku`4>9z2jXp5FR}d9FV1Q^-Rz*+tCguOsSq|{e$UW=B`g&3MbAKB z#JC^%)e#IjgGj>7UjhOG)8mh9(}Q2fXdipAlt{$om+EDkmZp&wUZ|uUJ^v@@$sib@c$DH^!(7TAKOzA*C)fvjsPgOtXaD2*^A$d6L%H|!JJJBYp zg8x_Q8L z|KbRQUHM!KMb$h1obB0*10Tk~brDMQ@dJpe02$TjyDissjERl7#Rw?l(=-y=OVbrT zE}*#M#PnWtvgsR_>#iL8?P6I$Djcirl{f@4Z7#3L?#8e~?RfR&--cDQ^mX^g+8tZ;YAoZDXQdg?sp1*dv()tA&C(fU_l@>+6&OPzKfZSzO-7%vVpY_?9dD?9kNm z?s;0{gc+_G#)DdA5tVBu6-E(V{-XKf-BC{x{^34cDc-C+u2V14Uu-Y^+LZPD3tiRQ zC6Y@iB>(F@He&6sGE+^@!6ToBnBi%UwTJP$9GDg@<%m{~G*sApH`J`9NRWU`9?Uln z$J|0xSCKhAgN1U|+%iLljv}7YIX@SJ5$f9FsxBQN8-EuuuHcT6aNJ&_khWVyaR1?P>wJV(ILx;$4P7C~?ODC! z@?57b0*=WLzs7L6bpL6R6l z?t$%d;8m#gql0i((O{48(*-7O?i|Yzenb^>+Wk+MCCXDJ9L#kT{~7n(fuVZ^-al8@ zb$6$O4-L-VvCrMPmjd4C1u-J6!e1DB<6L$z|Jx9LAs{bZFQwRv%&%~hm(RSv=jTIg zOq^5+{4GfJVC-?s{mp+URsF%Tr<~S*h$+9dNR`l2Js$w5%5KSzvXM7al&u$|Cc2JAkDUns;=F~VC_v6MnpIW;2;uKeuS;@=b_{D6*vOu_V3VC>A2-#^VHqG2%$iB>WKrX(>;HjzMvx`4% zkDWV_U^kZ-74*#Z|6}i;;xlWSK43J)#7<^nn-d!o+jb_lZA@(2ww=kuwry)-eOGed z&-=W;ckh#Zv`@aXt6KR9 zg!`Zn>e|Cp6F_*v#q0F$M7GmVBLv!M>mJqHwtkBb9ZR#HpfS;Yg(8;bk1ZEVn0~V9 zSfwSVcCUA7(MSQkP5OTDf_SwqR+x3j^erx@E)D_|@E38U1J!&gz{{-O#Ri z5kZ+L#pL*b&(QYl;lq8l{(Nz)LtU#r$T?wx?n`27N%8T+_($`BgE%SqWWe#SF;`7$ zIIK;sNvSE2Cr0oPzcb(aV6WR?eX}q}MsJ1KBMqq4zU+Im?rUJ7%bLK`9lw*c^~UnG z0>F|L+pM5f&c%dfV@lrXjY=<04eV1=kcwvPgo$P{SY*GcW7DzOkxUQYsHHKf1NuDJ zD7$wdB23LKjvy+a;sCc53Ijpm8QK`FpHhPy<4~%$*D$K-v?;(s(;l^!-DN{!NYUr0 zB+zg}w%SX2(0nQ}OyCnlUGPPqCIB>LSB0pBAQzh(G~{V1M(av!&d57!FpCdC14^zD zH#PulJ^G%zhQKrBGbPZ%_*-Rqo3CbYq(80E4^wGR3(%Sk0ob6t(J$<5XeKFC_nPIR zp^l0F%~x05sw+hsthr0U4(WP@;86O@=;2urGx zN%`%)q>%;Uz>aD*IuJ8?a&)a_u}4w?iP+t}HEiR(c0{|0Pw>VD3rrQ5r8!|+Fk;(V z%to25IIA9P1QaO#>n|ZSwU7XC@4Vc==VDp;SLZ zdM;Y{W!D_kA-#3rC+p%C2T{s%CuJtewC`39oaZ6&A^HwyM-UluDEj3mT~6#L@Q>MVTJt=HSWYgsm$~>rPiW zg=2SQLBWl#h(hftPVqeQzXE=Elt;!ZZ+i3kO2!K3dLI&`%mHG@bq{-zwj${^eBK}T z*-rV%j5?+MLR^j7|D2{E?lZ+ z@FzBVAktldwmJK4VG{t{wYt|nY)(N{qepyQ-#E&(>~1bv;qSQIRPEIe0awZwu5)*yStyCp)X+plpdo8Irr@jCz#$g`Bhe2dpVAS2-@`g1ImX* zQMAdpkF1)J9%4QsnQmOJQdjUI>6~wNV@5E#;`Zt1XitcBLQgzl4ZTUO92@I4emxdobaa(m| zu^#3WGA8x?S@-`~g+ySh>cBjCePD8u7XTolnl39|F@Sh-gL;K=44>+Rj+<VWzUPrTHHH^fx$U)~e%U*;=9Ae+mC8IyCb zCD6AVIcubI!UflF*8(lKqt_0Xc5@g(tbXws>X=UbJ&fq0An&it?az)TEL+VohU^D! zarvU6ph$k0E*{~4M>(+Dm#dlo9^En0Tw=A9RY-9-EKosPO2xZ04ZezeZ(RBVX8ceZd>3sdL!~ z{HDx<3r3)z2*n5^iKn)0_98V)QB{KmT3ajLcr&3wW28*cb$6%{v^wDb0k{B)02gdk z8Cdz;`#V)RHK8mm+Gbl&P)dhCVbIuSd1er(adNLM%IVW=sxFVQ!ul=mhilW_)&vsU@doj{!ivyZNKQ!Q)DUD!jAhAeYK&=+yRk zdxW&X>&b>Pn-4d@2f|@2&~1sPOV~|FfJ{~UClsL;r2cpADAVve6?|hIBA$-Ro4R0N z_1{XJRALFT+25@Ji%w?4QB!ewNhd~txKoK{iet!6|L`iZudQC}Y>8BPeh`Gz_FCLq z&SIVzmyuopsKS#341J7L47Kt+T}=@K*-m55bBWV%Ar;{!BS|Um0965qv;mUm7+)vY zN1*{pB_?;tf?|}HFRH`SvvT`i^Ed1WmO1s_$Vs~ygAMOzqeAQlD(O>31OVN`fycYlES-w~ z>fza&##NtJLZ^_EUctgpH5xfe7U0Q0E4&u?m8&Mr*=kfNY3mr-Dxa0Tk)Nd7DFkwr z0KGeux>jM$1#PM8L3qW+=umJ38I>Wmyf)`f?C2aOqifz;bi^ z6a-i0RvCVEb#D!SXHMATzksp+glM{!cV(p-EoQ9M5O5CT?ET|!$Jn~@>j1Xo|Kh56#@Z&;8p`DEHjJQq(MCcU_lGLV8 z*%iEt+6t+4!OBEUh2%Aw3i$*bv>I^E5{dpfD|HavzJEYwq)|cQ?DQ@oBS8DPNrbi; z8Pdz~H7`@JMZHzY*-5@W?{4k3-7CwFr1H;T`&k*I58YGmx}eKN42U2D%L)h)a}T4j zk0Gv%*a-Sq>d3Wjn7Q4zf6rBsQ9gi*9GK=p$Vco#)hsBsXhZv)=ZGic)cPI}VcWrS zS0sDR06bb>XN*H*IT7E!rk*~@03f+Z!n)B};lM$)ji#RQ<~LQrV@zSb*H&@?sz=45 z*4*A`xbK%hu&*mTJ_gA@#SB7ORzxvmcIWf#{E>x(lv3McJ&^o#TyXl)M>fWszeN7- zyeSwYCfDj*m`KYBRd1hP>w*+rFzWkjZVM_5PL(?8m^RH2lPxf=ZvfU#GPU%yhMAX1 z*tF%w-i@`w9fvAG{U#HQACZZ$g{~8vc;HmawdfMRiv2Hm>raU9uZ>f0e|MZH-o&s* z1nZ}>M!Q7L5QtR6rol8*6;g9(RJMvtQK(u|v22dLN(=a|nGHmA9yB>KLZ1YUB3@6^ z(mn1c;Jj-Gs_^RiS7Sk3_A2BaXj@RWORqwDMVAavQ?Z*+r*(e)cufRI7OABfCLzWK zPZh-;9u4p4shz`=Ll){6vq0jxgx9j6FLar!@iOWkNyk)ubnMBLQq4vju&4Xjdn^y5 zP&&Eqh+-4{n{_tmww4sY;_u> ze03#K`!aEuZX8MjfPs+-McE354FlYEF1$Yjbv2X@J{5ep4+&K9tQp1a9%2!N&QSr4 zvaY>t!>p63Wrs9SL3!>6^5y#8B&hur%}kzgr{9!V;h>`@ks%2uBC5RWcWwkqCIzt*wW0<{{%VQI)G&3=7+8G{@ zC!sJ5b$2;ju|J9`FlM^r&G@QaLOWgEL(#m21^8IarXUS>Rg-qg9S3u}BP!}Yg8*Z! zpWXk@0YgQ&n+9zYreR@^do2D4Bi6fml5(VK%O7b;1$jYMa@{<6xtMz-T#p+NXZHad zBLc^$!y!bAh;7T(1LbK_#8r0a7iIJ<3l`x6+eg0}BD5yvwsUep`>~#(qO0(c?(FM$ z%8;jUVrl}k73QZ92#hy1uZxX z@i<1uVaCr3M7Qc-e zl@8K;zJ>_rzNI9$uDDBObe67>0Ay;S{C{>0?OKm`Mj8r*J^Tynya9VoKa9eW6m#7}cF?FUx#fAllxZ zmPj`!8sFbGNFX4d95Eq&#jU2hyuD;i-l=W%>sf&g6V$zif?1wo3v6zUDI@c=_3A0% zpIRww0=puuw;0@3gJUUA)d1H5kR(RaK5z*XmeR$nl}{16@hM$`nIJ%jzbI#u(cBiUx2<5Tfzm^f#f@+gIN#GG+nvL?P&_l0Rki?-Q8~J=&>KQ8 zbgtl=b;if7%33~D|HdY=(R>W|f(WF%?9eTEddVYh|NJi?kBTiYhTZ0Y4X`X#1NjuI zg}W7l|2E@<r>5pUo8=lD3 zrYX4?jSO2vFWkO~iI}5g9I#B9(`<9y|5PSyH+0pN!5=g7nbN=63e_A40!VKuss+MZym0ss-Xc*r}wVvm76MgKLBK$r)t75+e4Y4UruEHE7_s%2E$kgJGFn{Ivsrglo zOBhQf4#-U@fvIo%G$IZNOqzv0PP{uvsz<_2tmtbLs4aT~8}Km0q!SD!geZ_EuD``J zHm95P@?xTqp$e9@u<7<}@5D0YqF%A_Wk{9~m+zs^?hz&u@STqn&ovoB#oE%y`~8aZ zZ%4;RAuEk~=x%@*5dc!u5IPkm-2QFbahv8)(?!UmUheuC+yStY)}c&vR4U;>kWytO z7lzyE_(ypZ+``PTnc4(f?jjDR7(k>$y8)59%>i9%+r2slex$6@g7;G1d)HwGs8^N3 zU-lFG>U>EbU{5f6ul%v(E}Nppdk=!^ws>>ze=k8`k+JtUBmT+p^Gw@-l4Tg`ccfVs zk>`s&kS_eyP=YGLDLmQ)ZR|(*TKZQ20q;@}ywRYnCa-^jmm)=UpV zYYj}A#KLcqyCx)fZ|oFow?&o~+z-|K1%zi>KT(@7B|AYWLH}FA=!|HUvW$6#Kr#1T z((&8EXdBQ|&Ir9U6wm#++M;?axJFrBz{-(-3f@O{!ZHaV;T-QEQ30xMihkC-3=lPJ zOrn{;pwJFk1yzYNT4Jk-TmJwUT z8Ro{d!zCfw9EB?W&IRz^?9qQOE7ecMK&Op3G&>tz;b2yr>i6(~1KYyu)AYiIO#?xP z7T`rID=9b4Farc**`Q3p@4u2TL<*uqS~kmiq6>*HR^s#4R=?tq6#Wp4mAO7Wa$Oo?}59kWE@Lknv_-i@iIe9L)etuZEnENyd z+O@Giz{N-pno4}i zsE~l*3=E8xX-GTH#iII#$M?(du5nkMj9K`GqSJrrbqTq<8TL~;tUf6wJfK@w4Q0sv zPH^Ikht`y`ma}?kdO*MgA|2v0z8RiKT?-W_i%rsUUpZIam6kMt;p*7N(e632&%b=p zNr$FqM%{$ao-o&f`n?o^vQJ8s8CgEZA(*o!mF;jrrXN`0D|r-eJA4?=6Krf)tq^ZG zMjA|HNV9SG;zoktIk5fJZ2zsx1_v0bdy77kbydO&))zfgu< zRww{`2h!aP^}p!$n+3Bz)wwh3Ij(5yqtT8qS$;H1zvKpL>-F z^8=f=P-rF6c*~Sq#*+`oN@kU=N9bepQaP{h^5D67WdUcTly&Dnf_K*MBTdI#K$xeK zinUrZQmVcQlb}Ddez%xmwe$4iI`Cn8>nkRiHku!HIcABt0gi(rzq3OtDjTp&Bd4M6 z{aUhgr8&ZdG@I|d;g;?)6#}@iEqQDOtlcBz^RUzOqq_i27YzpNz%M`oZ_Vh5+>PBd zXl307_d{v1@qh?E4@O6%yGd>dU(zXw#lbEp{t;Ri#pWq>|L#@z%+!tyyl05}pc&zy%Ez1B9AcF^b{W zZ{zUc{&*KWgsHVGnd-Ox3yDP80Lsf4Mb&S*M*ED{LFD4rF&Ov$NMSQZt)zL3`B~W_ z{d)$56abJ8z|xWjYL-Da{a%1p0DO=4GI*Cyp^HMX9T@nO_hJto#6;vRds*#cfXUq1lo!hg_R1#Psh_ zNyC#cWQI`TrP35xqVqp!(Pnfo^@|0sk(xay$;8~mn9Kp)ZsfA3*jf3;4k8%0AJ zpaA4v@Zp!$0*ckW-OZQuEL#T1d}>^PeGU#%Spl?=3*xb^u_{XFTvL#it-*!EB?=gp z4HTH4=k5ijrZCLUpLkP60BodAY}L9A6gJjahu6DtP2WMjfE#cF;BRP&bl;u^&T^Zk zN3nggJ#7h3vApIZdg4X_*GIEV)fs`?q<9i>$O07N?KZ#FZnaZ?ABL?2@i4=4vp9k8 z=vOCpjDw2otx3+>E^vdOzJ}7)f3t@N)Zpa%qHpRMN1Tz(zXC- zjcM$TNdKAm`C++30VgT@B-}dBytIV}jkSGBARFyJ)S;dCuA4|jRQa>-eDB(|I(+-x z)3Z%Ewm`|*zD)9ko&Tejp=PK~m^Q>Dmny(ngDSc$&@7uTKG$@N|6aJy5=IfJo4xDj zqA>y|DWoPl5A)VSrJ_i}Vo5lzU&9L$b%&RuBh=S+0vXN{;KwDaei>z&3H46RL!^0X z00@ZdYto;t+Bzcml}lOx>Cfn)%^;lFn;G7{`{B6wfOC9(%&EG+|C#DodKy9C{8;$h zAiOEONV)37<5~na{jgF?V-ki5(J)e78pO1NF zwPfAzOYa+zGA+e1(8}dOM~bc9o2;@<>H^7WBFcgpnjY@GmW2it9)Sp%lnzz#e9@Xy zDjI)8_iTN0l+FmvfF(8u)vMr4$|G&={_+AbOvVTJFn1c zU)0(_>3J;9L)e>A65lLa?LA9>BcJe6CIu9%54~gsC+7qH(hTEp0pt=5{?1kRhJ<-e za=`WRGahm>z2vtSGf5NPsy#RPMf{4B>eHzuM^) z{6q#8f1WYy&`TJ_6qJae^aVAgwDDVH%_|2~2+4381$*$&k z;D$I~Wh4;Iv%ES|LQiG1YSFptP+Asf!gK-GGyCS50vRX($TyNL-|)2l-hSmmYMr=&akpnt&{fq=395(o zyw%n2EX#5dcss?cx*-9gZPZFLk*a+DtVH_g{yg)WV%hvHALx@p7sSJP;LBfn6J=zP zxWi*VP_u!+J;XO78`Q_F&HW)94RdDaTgVSyla`C0+>SOH=f1m2;Dn2+A#d1WERcS4 z+#Gc?il$0Qr#UJin2uHDO-7DS_OK!${XtrMaw)O^)!ejwyeFV${aSsJ9LtCrTzBKk zdCjUu?QPwsjsWV&X?pv5R$lY)7=YAXoC7(u_t8Ij_7{f}pv9S2ey;ORdgQtvMTu`S zMd{G-nMq+W5?;k%s;5iK7W9vomrKht?M2LE(ooO}N8K#Vnu})jhOd0 zndKmZO5O(qHQOf#tkn_7g530H5a7RVPpIO1vGq0(qwfZL`k6N|qQ~4Z_c7!Cm8DL7 zk%{vg`rhOft(I`#X%obo6jP=N8{AXyzT;W0t-of<@6|{S&u-qy?hs|9N2STDY$8-a zQ~&zv&uyodt9d?SHM09cO}@66k>!sU6)!i%F89$t5co2|WCvuA86*^I4uCJZV892S zvBM_)SR_TW)a-3HZrt9tpn9I#yu}I6ZzCsd#%;9kGpq}!rmS5`pQCI6YsEA3cqJ$1%etq9zSt8S?ceI|HFNvZXi|2ps~>UNWC5d_1j7Wnq4WJueTld?(MEtkFagH7gRm(vA#r}1)1+9PM-`_}g;%@-&XJ=Gnd zU(TK7pF{>{omh1JmCEb6PtWl&)o8s-XDKBoX?vgV@q&kfh@X@6CGA;%{5iUN=Ct)( zP$W#P;6t(`fg6Fo-89pyeDtOWTyWbY29unml^*p)zE%Idr1Qu9!9W^(kdoXxTUwJ0 zDkyWqb@>_?S6t@yUb>z0np*c##@eH5*}0~2V#FtVI8=OqZ4RCo9X}@f4B4i!-O1?0 zTCl%hrG$}2`mBm(n1XK{=Zw8;*l#9*@k>4IQ@Zm*;?vud5!2kp3%_q9@`Kkd{q2-e z+;0d+#aPMU&sIANKwy2k3LrE43W31GX|n6vog7c96b6^h{x+7N+LM~y6LvY3n^MuA zCpXL$+&b&OV;&T~NcqAeJzV^X+RmHBXsc%x3Puh1^&x`Ud`U3$K+-V;mxaJ1{)j?lb59iAFy@}K>Q4w6hwckn)iDZ7byj!kh^xZ9^*v#f9b~n zhUlin^bq@Oatu5kxFtPGP<{i4oZpJH`dpL6i<`A%>um$gf9YvOGwM@meSAzG3)IH< z?Z||l?t=^uzD?s_wmw}rQ|Uf?{a~<<95^P>x7$H_osVu{sp%K?cyvk9 z!s3wO(lvhmBi|n{n*bVoO!}b02jre6dmR#6YXgLuNT6t-7W|omWn;Uu7;awPinD>}(|a!=ONLhn_Gj@5o%vy0 zDvBdi8Q27Wj&_p8SFeW=QCR>7kur+^i(Ph+j_eA{uKIsj0Rx^YT!0fF1M`jN_f;-A zwbmPE;ccfcPMK5oD9xF@xysgHgeX^yKGsj+^*`s#Z6w4H7R)TGC8drUX?35?=o0(p z zMIAS|{%t*hb`0SD{vW|P7s>7g)g=knx~xkg_Mz#yYv9cP=D>ao2#1f7sFXwx`-+&Q z_KH3}ONU2)01aE>eRiP|?s;Lnsj7yo{FdHR1%spWp~LCJyZ>s7sRNyo(e$MMcN_>% z4lY2a)OADqe2N}~(`I!xSIcUKO9~9DOIyq36=*#AxL)jZSPL>1f1cZT27c+u^ins) zpjoYi3W(6}cfOEA%vYUAveMB_JjQl?TBF%8;PM6Fswwe_y3+qMBi$Ha=>(<&dvkW+ zY4aYw5+VH$M+!BkkTSYJ;IQReE7;sfT*e0TDJ&UH@ zzn`(}sYiklNw_f)5Kyd(YMk{avYgXq@G!^!%yF^dCUzO^X}Qy&dWi@TWz$CqWHWi{ z=4#YFZ%gMd+WJr|lJI}5kpJNu5IC;vM+YdsuMT%gF1s$-wOw!F+G{%aK$ZhlS0@mf zCUASUn3{BBIsNc-I6#`KA>Rp3Ue5k%5!0$tYvtp}J{hi1EAcI&deAYep2263(DeRy zRz3vcQveVMb?pZK;zQThTS`UzI6s05cti25XjV}R>z<58pEor{h`6A*hoI8R`~N~A z7qr7NzUuI}AMt~w>$S+lr1Y4;NZUc%|4hn1TPXQu1=8Uq?c~N+cJ%nqP4$0`(nH(_ zxLO8>_GjY%eX>ji*r7V?RwMuKga7xF|CeL`{}hnl~S2aemk0+T{!eRMi&js%5_u`ppi^99yMO{C~Fef8T&W(FL-o2K{~dUt{DSV)+gLHv9k2pCn^5I!aGVCj8&^NUn?h zVbUI`(v$t)l|T6lptm`ZlEf7L-3tEMbpU$b7HqUDVTf%>Pr998le{&A8PAfx|IL8S zz6Hc{1|qk~j^YnsC+B&=OuNJB-C9Nd124sI_!^gsL*7%!Yn1J=EBfc&Bz0uCyaI4x z7EL$~6Ofg43Dx{=R(G2@(`8>y+{2SH;&f17@rf39HTZ?q^A{26%s!QY@$H`%dZLc! zlSUXnlJ2%Mx-5Igi4J0%=}?Jc^K>h{y9zv7i1nhAlnu{T{gklWYn~TL;sgE=R#+FO zHofd*ZDriH)o1`NNw3Be?h9hxZ+U@zdsxTHmloxh-H~ULjg)zRA{f8N{k0V%jg*A8 zTcF*#^vu(B94h z6#uRQf?UxzmyA{$O> ztQO>kF5m+GAP8#~mk=PRov7%gnUV|lxh%T&s`tB3Wx9z{1Zul#lm-7iZgOW2zIqeU zDDdJSx>dl3ka*pFVz(crc+`5{A{%6L(L5fbEMbK-xh*W;)VM{IO>e-s@NS1&LC6$w zQbUwi$erU*tLl`6b2uj0>j=Xtt3I~_BZZ5URdIaZVqfcjvROEiAdh{raS2z(V_93b z=gu$RVP7lM_~umUf0EPafk%#hew#e-G$iP`uFk!iS*f>1V&LW&wcZjcpP0zAQ!&p? zW3eIMI_#Aoc7a`7@M5O%^|`3&gg_}zjeA}dp(QJcd2XgEn5$^T!F(J`#)qc6pOZ8r zl?d`n%~P-E1F0^QGIiQ7O!g8DfWxrZAG~dSCA2&hjH|^0UViwx`e#J#YUn9w+tvEX zH`|)ZR@Zeix~&GB1zno5ct>u@J1Q6J8{5@0UbqP^jxT7gkXQ5TX=c^7PqWO)as68{ zDRK0k$1bo@cZfVmSp#WVMnn$3iCnBqDXp?LA`oh6YV(w@_Xw!8I*NDy{iX8JL})qxJRT;gwTFR&9pso*m0P zTFu}`%a5x|`lEaeJ2z04bjrTNfz0kuXaX~y$XS++&g)h5@(Jpm5Rs(q5;p$;BHM}^nLQ`i4;a>bGDUG%A>#V;4X)#-M61{NU4H;Q zL2@*iy?E;Nim+*;1{RFq_13SmRvXO0#}BRaEs@n&-L@@y*R_s@2pD#xGj zssgVih1qaix7%o|jwLQ=LE581fb8pewy^++ABn+y ze-K$po$f)YR(0TU)7#u`I)6zXowg7v$XDXMAo? zwjj6WV+*QIkiVEA#)TvXeAlY5^_;Q>a?*E}G`nR?;44{9L##ile@yf{Pj55WE$ip6 zoHnazhoz&iSBpsZ)lK_lvb@qc+h<*o98HPHuU z6ghP9#0AIcB;uPa8>q73V>6%}80^LVz&y_(B_F2>+~{6XWh6!yER6E$N>*7hw>b`f z;U3itz#4*1?z8|Vlqz8J?AzIkx-)Q`P89D>weLmHgTd|l0}P$q;K4Ck_+_PY~Ql@g!T7U|QdKoe=DS1bVq@R0YeH zDRi-)7PF*8a}c3{Lgepk@vy9Rs4c^eDr$t77a~%-*%8L(gL^sn;lPJo4{W;MMi(4X zzF62ktgkAT#1(A~8E$IJ{b)NT2+v7(7jmdn^ zY3Av2EiT67E^{jzF?mw3LJbT`dwA$3+JZT`eD7Q2CfVd=S2m$h8q8H-&UYO|eKg12 z*JA8AuB84H-js7bRY9aK-bB!5hqHRa9{tv$w9_c;wRh$wdf_2{(LTB+U4=U>vEb=G z{!OO4>1h;eV&dKol?(a(a>wuioLtMTwA%APNtI|IPv!MdqPh;L(YhQAB1zU1x_cHp zj9%VvEnJ1y0y$u9^&TmbO?ZdQUgB!}XA{tvQvcM0F?t3j@XV69Dy6vWp|QwT{~R0n zNh)?va?7@wWQ66{Uny}IJ389)t*0GL@8PEbQhWXO=zP!xis5sDczbP@0Q0a!;2He#M}ZeBcw&9IOhbLoE@Hqzi4LYw0s&TJIWM_X zQ_n6f{)P5oN#a`V^C)d|s{C=3Hu1H4k}Q`@;@jM5x(t+PT#cpQKlPj!ywA6-sgibG z(M0$a;Qs0~!2?_8e%_zSx-RS#0_t4$!tmTjFgBJyFiPi5GZIFtd%F>}Y1hL0G$s~* z3$kT?E4z6)a5+N!&5g34yft9L-dB64;H37u2WuIfdU0@kV$JJ?m#@1)ez54{fy;&< z9U57Fkwe~|L{VX}kn*o!4-`Oo%8=q#%9P~Ezl_mEZYKF2%ICw0nbVHvGlN~6wYX+C z_x$AtZsmJVVhR>DxYJ?_)o&b*GR-@M>8Q-SDkWxHOzVxUB2|%-8taPE_t>J9G4A7Z zyBfU@aSJB|T`o}v3atl5Rsn#$Ro>f`L&kn3y$%A^n-3&fyG4hM?6u>w0D8cm6nir$3p6>~Q;{1D#kVkWNHU} zhx7F%nrMlcO6n_ZT*(%#aRGtql|3#Dm2|~0-&JZ11PIxzlu>>Hs7AEyuzS{>KU%!uZu;Os!#d7RRC~3}sOo3JO+4SO zB)mg&xVw6a?2vw!V-9-$HRs_9LH0(mn5Q3gwaTndKXF$p*xPrpB*%3(^w~e?e65Sp zi~n;>ng%L&WrzI_9CK8gu{>#Z5lce@hxfSjVfUq)yRN7syz07IEA@4FF|Ba~5?!Z2 zOJauCC!3}7q(lk2(`{|Z1>&9%1Y?gi@1iPZwfdxqOyp8}hN@pYDsmGW9bz3v{kCtr zygSEUdql^!af51h)F*;hm%ogv+qT^?Fb1oT6j=)c?Ac0x(>w?@=tGHJAi;+t$+AV0bf&JO^4A3?|v>ZrVL(ZU_o+ z-5AACtP?A3w^Hid0LbpfZ{EPz8;#2aEmp)#7L+da=u$13^2TA65-i~HDV^O+Z6j^& zl6&2F&&_y;k=ud!h4pUZ;g+a$ve&n&m8%gl$YFd&ugj#z&(d0#h0Vhonqr*+om5qd+oX+B}VHe|=fscYBL)_6Pm&ciP zIl~ zha9>$`80Pc4;iCCYjaqTc5B2_?pdf`U$^7$EgE;0cdAljDupiWbP*QEdXgi1!X0&+R5SHaBrH zHf}zD`>We#4LEKm_w$k%1gv08?K@2WfKCJ7BL3|}c){%zZE_Qh>Ne;gGeB*gH(0B{ zy;i(e-ThrtWRJ$()R+l+UHjq7EC$3claL)OzeGYSFV^xq=4ZLw!?{=ublqrKl z>?joIc8)bIb9S*U$9K_AOa9sR(LZyNyjKn44%R?K3 zS@r$_9nC>mV~)ouVLA3!O0-wQ{ALUJBZ9SjF~JypcJlRL*=RztS3nkkLmw_WR(SkF z;fk?Ze#7g9&h}PPA%P}@#B=SR@b#T%z2y37O*R`aqh90O^L8-5LOyEjVn+(6r49le zO0sjTZ}53sUL0>%B}h=^a*_THh6&n8y~r!tmEF57E?aHG**10zx#xuIEAfGHjKzzB zde=WV&CTL!lTPx>V9WDYqp)uWH~VkLkax$YL6D7dYNfTdj4ak&!QYz!;599_66ghk z+_=@(h_t6!D!K*1&>h`p-=DV{t*f&gD-Mt?tS5+Q8o`1G`DNa+Uj%X?CAfF(G%2O= z@2{aP#&N=n;{0uFC%}_WkwR{Idb45GZ-ZUuh&!y zKqaS8%#Cl*NR}n65sxK@#coh;8;~XXMDH4D@jHh#>)%j=ZLeRq(p!}PC%XFpF*-z< z14=W5?_>cZ7c5zzEfzD0Nu({^EIMzTWF7X|i*G&m2|C7Z!jEeux0A7k4Y;YiidFTu z2MX7X>#l0`-jrgi~4liDQ7Q+V}6~oRG_gL3acn~W` zK7KXlc?WubYia+67*&KpOoUI5&NNZpCymvodwmCs9ZPn;HI0&9>=LO2+c>{O! zv(&Qdv<)S#!uT)vF!V$UNSTM;TDO@gv$HfP?jvxfR1#z6D`J)e78>FE8^Tse&Um4V zrPQ8)KMQ!SwZ+EM0{Wl9O{RPOC+7OTy#HL8&oi#Xk?RO2W+84fcHC)=M z)N6%X61Y7>_s%;FDhX`{Bs#CcTO_7|r$%Z4aka1JRc0##5jvq)92nxOv8UH+3b$yx zv#FCiPXsRqWx@3?Y#{U(MRc1v1rSkWayNSYrF9BP5<7>?de3_tJL#ytdRPw`QSC5d)|S)Uv;O&angr8O*Px?Z zYrLJrddbFQt*!VStlN6h;$>#~xg^-me^F&#g!{;0qsoi>I|lTUICt(` zupCk9yB3w{(VU>2vq#x&p}TfCE;EIn-6vsYvLv!wr~fRl@Ty zW|ke0<0cCvx^eRllr6oDN@35Vx!SGbBT-uF+5Q?iO=46fj3)8VZkB6rJasF3WzNbi zxZl^lvcvCZ_pd;=VwWl~O^V${|IN5m?P_T8su8ckN}Cn5x>YSwcVV^dc-rQ-&q=Dh zZYfScKb$}2?r;;OTZ0_4R zh3nS6ZFfV9s0*|Tez;2V9|zEX5L*|WY*Sv2i;FDxf5Vmj7iR481K^`kNp3;;KTtR7 zp&7se(((jr_Wr+5E<=Gpb*t#@|34AoIRGNu_5@|~{-2)z)6pgxpfPQ|ed+&$Z_8$v z0er;Hw@90BpYFf^g(P=V0UGQ0J(}^KHvR)I^QnA98ypjLzkUGE4~bBUyE=cS6b_=b zekw?KxUPI{zdrt^n&4P<18%X$M)v$hSq9{o(bZJBCU{2jvWMW^*Y)biIo!%Z2Dq^E zlb&uHkhh=_B&L5H?rZX!^FyACwsQf^KHp)_z#kxkvW=^;er@??Xln-_aqz5oV>)So zOb3%?ixgedoH%VpTRV{qvM#W-(xR*~{o7@v_1`;ILY-Sin&*X6Fc_}B`Rh2FG8Tf%2+FHT zFXX2}RqJHccYtA?+y7I5PZj$o_bn!A5NKrXjcVH-@inEHq0G+!VPf5(nIkqp$<-xs z4(=3~%Yg+}{kNNuDPC_3p7o?)`7kXlRyGdcC^{V<)ZMKPXQv!WQY55=1>2T-_`+(i zQOpgLgFBsWoHky8r+nSVy1GeTtva&sZA{ zzs-R5fjblP{`@PE;fq&n1QJ?zr~mq9J^S(i9z@`HENv41czP2olP-AiFr#`^k0~y6*4YQq><>$RyFvAje*gF@CNsbm!CN_Tq!I%GKlt#2yFEbWZiSW+0p$?* zR>x!X%4_&)pi>>6CKT5!XzH4%p>7KWZ+Tn2)5!87T`^0n-4<66+v@AUUXET*f4 zd^N{^3b^O9?1MY?Fo&@tc^O%>k%jXDMTw36YALX{=25Pe1T7+KKvk!vo+(%+vvqdH zZdSd-N&*c{psa{-u+wXqWN3`O8k61rSG^Og%ZYg-G8E)=#T}_hN}R%?m~yDU5R~=X zwP#s1)ICTq(y2}HMMoWh0*(I(f`$?W0K;01-(<~JsZN#7i9hO4K!(*>3S)ILEPI5$ zz$vUux?LhYrHBm!(^s%86+SPy87#X)fmu-WO9vuC+cyo;zLDSw^Hq?+Rh)a zt6WcvT9lJ$<`H#3h0DexdH!2WV71y4YdDJSd zQR?*waJ6~h%1b&ejSp4z!Sguf^}#8Ls}qdQsH8&SQmWO__RuPs-Qp^D_*7qG z73vjZ)5S^}O0FV}#&MiR3{z8$3C%`{ zwaU!Zq!g|@LvtlEuVr-PrR!~^c1((kyd}#^;*v!O*P?mbLN6xD`^%=-#EnL8GPLf{ zxplhFU(Y$uInVi>=lMO~-}gMv=Me=t8$uG+(osp*t6x3B!Ow}gD_Xhc{KrJXhuOJ% zGD#6_#bxO*75Jr?29=v5cn5!4oWPU4pww7aSs}~8gtJ`?{iK?QonRG@r{@0&pR_|y zMC*Dt)=p#T`cYtwF*z#9b6g<8n?yem+4sI&U=b{z!b1xn^-Jw^nB{8#|6vT8WG6(} zYuqde)G$*iHA@-4zzgKw*|>2xs0nmUaKyF6+N@Z%&!Om7#LBCnJDs_o^`bL}BpxeoU+h=3~JZE0AU@lizr3UBA_@Ou{`pEDe6)MJ-z z;vB7Ms@zvda7}ymXHmiBcDv6H4}5!tB!eZz$UQ}evr1L4tYtLBDxGg^cj{(^W|w#E zPhqw=Z}9 z{?7Jl_1zDFVV zkc{$jIQI?)cR?$*%J*WqPMUE)+_XEkwDmTA;hDim6EmbG_;^A&64bGJ6(ss2UC*F7 zv2#U5=Hm;Lyx#H~(>BLJf%(TIrL-hK&z^?kz$k<_lj4YXe%)aoJh+Dr99J_vs4-eZLs;-`}R^STJJX4#{}N<`yg26q<{9O4D<(RuXAh{qL!2OtaW zO`V+g_v1QnyVbaLTN{FSM_-5#L)+BjC$>xx2&AK($wtYv#S_`Ht`@b%{P$>V;ifdQ zB3YS|$Z*^*&e8izx?x+S`<^hEb<_k9yv+mK-Ih+*&=EhJ%* zm{jw>XL@-Bw#W@@>Y_$~ktQtP9&*c7nhv}DPJA+yz-J6?h9dFVoz#(s<)Ok;OwK(9RH2Ds_d zj3zyJQO6E(i<4?|BEO{0%NvArKlSpgId*oA>e9SV>*6C6o4oNd4)8884|~o7M3HDm zqUt06&p?A*CefsK79Ek!MI+~IWa1BO9CcF(8LTSjgprD2h9Ij$KMGO_HM0oM@t$$6 zEKQg3X@b#UT+Gb@k3qMQ#IMW47#-A-cJtxBD-9&$I2#K06bX}^mt0qc8r-4iNs;Ei zb<$&qipY0}E@v49H9Z^Rll6FxE%f@w0SCxKlaZCqHWbSOa{9v0iidcHXd9yH^a#^I;CdNzpEORyCU zVGJu^u?4sgw3&Co>f|XWp>`Jd?%7PcSc~VQy{a)xGMAEVlmOR5rd?dcwYYz9jbYVNa_lz~02c3m zqykw&Jw`vyj$V)ByV0ehAQJg%my2omwk+SLo0H%b3n}Q&M}HCC(snqpjfpj}ZBK05wtd2xm=oK!olI=owyiJEySvZsx3lNhId^wgcU4!{ zbyfAbf6B>-!9im~0|5cSeHRy200IK-1_A<(fc*0LghASo7YGQG++0XV?z@l>ft-V_ ziMf?A5Rh6>f;$wu^2Wjk@5AcQUdn;G3F8FG4fli)fOKU=d`apUfNF2UKYJ=!H{Pe_Fgpfe!ok8uc^atoJE8Wv8oDK7w zkn`X`vLZw=?Z8><*AqBf=Mj(D@B+(+0o+~zh;zbV7tRla?m%7Z!H1{tu(0brH<0M| zF9>|=A5pT6oxI;kE;`*zsaL=&PjIl2`9q)iWClKTKtOvwZmVI6m3WPWPRbdGZ&dd2 zt!jdb5mym+O>6*q9$5IJ;c0Q!7iP52dhHP(Hx+oKv{ke-xt8T2eopNdf(3K zd<@l*-B-}3#FAfvv6kNZZFcoeHs0ltGO|5beXF7hV~%VyhXjE4eJ@GrbiIkeh+~3V zFwP85s(Ml8(6=mYBlRr69)h1&l3wTW!|taTK5~WN8`rm~{WihrYC7&B;YYDCpG~!@ z^0>@XL8<&%dcCaRT&?uQCgj9s_*S{Oo$p-OyYMmuTsk>=#huJGKN_T=p{icX5p=I% z>8sY0O&?Xv0>|)I-swrYT=s!|F2oocR<{u7f&$f-&+UYA0pBKpTl_EriRa+JK>0RT zcvP7}yjIMoAV~PXtaqV^zy5lywp+A;63k#F!1fZ2CWm~pENovph9W|I>eoFQR|Ijh zZWOiBd<;Nk{_WLutG{meg<&U2wF}JrPPo6VkN)st@U(lkA#q~^;kiHVqc06331I(Q=HuaT z74c+=kJF4WupUGMr#$jftSL8n>XaXZCMsMQrW_D14IJ}M-h0_e{hmDH`_R~T!8h=z zDMKS#ewiAyqWjM2TmOD0a=^Dg2>QhQ)JT>FyAi~-DR!yY1Y7~!W{08$F-6*RF2scq8`u_&ya=5uK5s^I_W_=fgx`Tge7_!;RZePw3kmeDJB98U{$<1z(-s0ba?3Sl@dKKN+!x zZAl5J$?yzMCUP+*yAcnQpWg;vwChU~Fdw2oKM4#f0DBV5qYG0FNOKbmo}l&|6a;BL zW|M*)K9(Pw9j2yRL=G}k0ILb=+z)#a2oEr@iFF7bu*rgqqqvEZ1R<#h=TCqu9EeH4 z`U3(v@He5lIMy=Ekw8H-%Pv^6;JO?lD*V?VCDY=HCHwxQXLM3KOB zi$pNH;^~=2AtbwotKhAGxq5Zzu_A*Mw{V*vD*{hq>V1HtdL1vJvBOOGZ*oYZADM+$ zve%|UCaVt>55o=_omg7n-ay{qd7|Ef2o2a7wlS5%k@}H#B7XiR(RZNFPkKpmF%py3 z|5c?%&7^`^8h9#AUWR`Ud5_uvq#0!0f3Hto6~>Mm2}d5OIH0~M-bAm3d=7H{r2=Xp zDz)eBx8k<@WsRG68xt=ie$+&_?f~2N%hk*y3$zBb0JJhR3^YP0MW|CKa;QfrUg)<_ z7Lcz3Nm(*gq>o6EC=O5wfi68bJqQvK!`ktb@hb5h@%Hh}@r@Ljavix^**S&CxzpJW z+0ccYLe3E-$;W7O2t+Xy!m}itD7`;Sf9(BM*~YvgX92AzsYj}ZxMMixKDIdaIi_Pq zW%g&*V1_WuF?TRKpT3Ns41vXsfMqq zsJW^+s(z|9u9~ZgsL5UGTP$BBUZSgRtevo@cPO%Na?EpB-NM`wwhy(}aKzi9-ap;e z+TGq~+=Sk0>6#m685kQaA37UI?^_$>8R_k+O&mxh4=en6Kum{HiEayIOKXdB!HrLz zNvO-df!1NuQLo+8dg{`5QL@Z3EW4??|FAu=ZM&6;v57>F!cHkiNKYhASVVnD?50yC zWT!5!HYjkICl@gpPG4FTV^?PvbcutM^CNhWFoq}wSiY>Nrl`{lU{;AKAtf*>MlD+= zhD%yOaZaT}u-=GMt7+^yd7iV@i>!pKfsBNVhI~PiOe#&fLuyIVDGnvhA!#h`EdCa& z5)B_s8{m$u*3?$`O8S%t4GV1oO^&94 z%8HtRIux1^S}Oq}Q6cdp(K{SI{A;){KC?(qsZKevn7OE|h_WQOcwPRjge-f%(nZ_a zBSAy0q~+my-5!n?E><+A-&5L08evRf%$!D^hOZn)L$@x! zerWljKG9jp*}y5qSzjA;O=-=B$AgEfJMlNrYVRvMNGHYB1c zd^KV?Vm~4w+%SAasz}O8iZ-Q<4ULI3bv`wFG<=M4Y&{7k1tNJt4G4xfOe#!XQh!9~ z7eZN;$}bgn75p;evM3GLMfyeDx@MbNqf+Zso0nyo-?Xb0eHs1heI2VlO9Bg9X}wLk z@rL0$Oyn-)x`i61X|?T+#W&l$?cPf-1zMH(!!R!|y%U;YF)$|Ixm`jwc3!;0`U6I%Dx~MkKq;tE;tqO|jqWGdCv$w-i zu}QJjs5;y|E|1s4R?PAws%p71O{)=xcf+3p1>3OO!;vVFFZjNAbYFdGeF(b=vw35v z7-;Rc1G|5Qk{pvbOkaMlj|&<)G}_obM{SOW`PQkxqrf$jH{&opFWa1X%uVCS`Ea|B zRmrq=6nd06ZZPpB{p;}LAO)A0EA8{DaDlpsGrFyc#k}Bs_h})=HAm0^)Isk=)IrMr zybfZM-F?rqNM=}o`?9<8Q`*zFrx9>G@V7)}I%RYRv;wpWo%{CJ>(K6mW1{SwGy^Mp z;j0kjm)OYCVbu}ICCM2HwhC}<9J*RsODa`Ly3TQTo^}vrBpn9N-aXL^>9y4Q1mpzO zlBCjU8-V6kBk#IPJCD=AuFQz`UTeYw*u~z4X;Y>KoU)km#KPlj_yXtL{T$Xp{cP#n zhA+X>rUt04(7I4T$WcUK|L^{>g2RHo{pkH>vGpi24fuvO8)vJpX0;Y4Hf-j;*J>a3 zmLV!)GbnfDaI(so^Ujp6^W2JFDr@U!;6eWJtgUB8bq3Uh z7aPc%y}Sul2PcX`6>d0Ct8j@p`)#cF^`~j^iP`=k$srF`C>D8ApIVMu>Y_P2F1uut z&*kZs^seW)EV&+yo5o=>Bw_-}4KiP6tD=Frgz&NinM{MAjT~@3MQ)ughjPBg-5Op_ z%e9Vb1nSs|hON_OOQ^^BvniYf+(0-X3a}z56W@(fnCCCLa-U`TmEk4zMF`F`mog`h z{a2ffwvp@i`hczkegnriyM^njd+x)%BkAkvtF@hh6NINI2xf2{cnQdRsA}#!seQP< z5Fa=kXxng#uu*PY95lo;r{9M~9^ndQ+YAZpNoe!?nH)D8wSUI~(d!OaD%s=l#AYh;Km03{RQrghE z#jnLKd(@3puI*PFKv)h8$6Xcjq(^1eyYP6dPioJ_t>7#d&d8qnUJ==LS<2}{GBI;& z-7{_{>_fWbf&QcmVw=guGPE#6v6YP~jqvqQeO&hd$tX&xOVvsqey(~EnyI_H-5vd@ zUc59EWt{21zq^mlr|ND0A@Okc-nRD|Vk_2J>*f1O>`iDEQ@0!RdurF>C3ts#|Ni+# zs8PSKya`@Sw}9_lud3L*otGg=t(#HzPuj--PqHcc;itJt_YuN0wlwQ#Y_ zQ}eLrTo8|%b7VUswn+-G#?f-4lZP|k$qrl-tZ~F`_?gtIbigQl>VO7`T1J&bjZDM$ zdLH9Mi_AmBy~7=&ld@LXUJ>8U8^B<|QbnUUjZOOb8QufWO?#vp*Q5B0!qfQE-SYxO zJj6bDKEyysG(vMUi;%nELeH08TTv2`map?+v61NPJL?At4AapZ zb}-Acd%7PFujHuqd6UXku>NAzI-!2?O0P8dh{@p5Y4v$B-ZH1b^CxsCKE?~|o-&TvOxH@?xz!CB*(b}VEopQzk8 z4qEqH&zg%~v3nf(tx7ItpySy-g=LJNj#KLC>9Bg0{@e5Q1p%cz4qP#W*GxuH8etT5 z0`KMUWyo;+z;QD%x{~mQaAR<$CM zIvVO0c@N5P%c+jN1p*}^p=W%%V%wV{k3t19Jw8Y&M%dCq9or&>bXnDD>~;?}Ih;Xi zeRQ7sxGcv?%If5l3HL26LMvglkK4VC!%Mw&`Y8^K2<*F{M^9T1pGb=^OrKyMoa@c^ zED{sako*ksg%Pd9v4pTVS7kZ*-<0bm<*FDo=gg2+p%2sn+YG;xG0uItBuU4S(OHED+inUpQxyZLsr6`)+RF z+HuQ~Lw=pb^CA+a+C9jIu0e4yO$<>FqzOC_J^hIp7S1*5{HPyq$?SML7!#Tc#SmFd zR4qB6vBMbUIdDSugR~|iOA6#W3^{phW}#Akh6HhXEMEi{kHfx-$FcecY=GnK0iNmk zNZLr~`26q`Z>ATpF9VBhjo(?URrauP0u{7~LLU*Sm+F$lQ3cX4#jts_xI*Wa$Boh6 zSP4es{Kbz|=ML9%A6+=Zaz$;QH#^Y%Y_nW}IL(`Cu|y4Q&5Z_Y-u;tF_wBb^rGqn5 zA#I$s9G(}CiHC^x=&$TNkH(Ap#%qupC#Ukojk6a-OhPZ(cCw{K-6GB)IWh1OMqsFB>WhQz@k5Xc0ND^l4Q7%92kA!=0_bQ zPA>RnC(3xxO8`DZVe2P4iNNdU`~wd;h&kq!9+fjLX<|( zlRgtWHf=!2lv^DoC!%(^Pf+$i5&@Pv6?%__!A=zoJM1{7EBXWSLnTjI*K+zJMWv&7vo?HQRRLW zGRp#}eLHk&N%&GkOPW-U6>r|Q>A@N?(_7;wnc zA3ws7MGpGD^x>a&kRVYV5#Lw zjN!Lwh*JbSXBtB1*0)>4%epJht+mbC_x{}>g9Zolnl0%1~L6)D_BW!t8 z1Gc)&u35pKnU=n9oEwa6>w8=dZ3$jXM?N52KStHtmbRVeqvIpsfbhxaBAfa|4*O|D?fX9{EBGvxR^JgPBf(svKw2@M#O?Gz)_coeJA=H6*!=omIGN|a@58V5|ok8=TK<8 zl-4-bFjCWbI=TBermnA>HgW^H3el_Ox%#-)&RxTE;pyxt`&{<8`cK? zN!$c#4j~M(6P>{!iFKHo^KEbhGkY<~5M`hp#SDcjt|zwo8~My+VeEumUZcrxQ%;kU zG}YFuUxg}J3KSmT+P6(4Iq^Hoy$+4g1vt40XN?}`)a!}G92yU0fCBl(DSDy>BkDji^?4v{!VkTSkb-@crlyF6mnc!LuM+sd;-*{SxGN($?ibfmiSB)hd zU(}kHBOtbDAWs0=B?ton#43pPCb2N^PazXTtl^-P>?jJ8cv?zWM+4{YIF* zV)kMltD~)xbTU06J2`9>!+$Okt=Q~T2mMx zyeNs#Xsu&%PQIgrON%%Rf03d~fzVu5A6X7xdDvfF~uL%z;{Pd~<6z)^2M9h;aSp#FDL86UfBCW(UZSzVQ0w4r>gpOdcvu zDzwtybJ26}EJ5|F^|mLXCn6`7I1M;#zTKU;zH5tttY@s1w&lJ>iD{?zcfj)Ipng~@ ztFMG$KTQ-K_0CuM~iQxZOBEzginR=@YnFTiG~ry73VG zUk}dD?|(g}BO>^}E>4y_L~7D<1VXkB#sn<1%(V1GywC&$1l$frCY%bwqW>KJ`HP3h z%*n})la9{S)s@zjiPqM^l#YRegM*Hqk&cm(=CcQlqq~ihz8j5=Bk@0k{7a6ov7@1b zxt)`_tqs9na`g>tot=1yi2j=B@6SKhY3yeH@0n~I|Eboef^>hi&@s@`)BP>`b13&; zPdVkx-Hff&gw3suZ5%)6;N@Ut;Qqhza z1}uOm0){q;R_*7vy{sZ|c~NmbTwz>Y-gI1njVNCxUat?D2MHnxfh0%)6^QO32tq=5 z_cqb&YH~fT8=JUG%JFcczdoAI<$C_sacMHnGiFi<@=cH*5)zn@04&fC5fS7cKjikn z#z}RHt{G-;f!$_rp@v`oqw(K8lfZ-_S-%e!{ypyBO|xzwL|_Y#m1<-Z|0(#N9ms%@ z{H2kvzx|IUmJC10ZkBhdUr^A%g#VWJ&lP^>57dhb^!gu7{1M~?f_4!fp~n9ILm@rn zOGejS#Xl4!04sq2lsV#DJ-upIz3fIVoTKVAJ`3*f5I-YOx&-t!fPi)W#R_frlQMC)Kk= zbAOYqjowfau;Q7AQRMI)+%i|l;k0V$q$Q7UKxW#SbwDWbG>mMK6f+;?&&BPbTn@tb z`Ca+)KcwNpcJAq%{%FIneN74G{Lu7U^F0r+Sx-c&^ zxqAGu-P|m1wtT}NmF|XCBTL&2GvOUHtJ1jDS4=F==ZtUi?u>2F{;WTZ6ZK1?H&r_2 zn698R$k>yXl0uqmBb4ubkI>0BIv{T3BPbEyD7Tu^uknm&HNWJ!EoHOhRuNz*Jwpz=i$!8uUhO{#k!* zR7V!rW*Vz1CEQ(cJKy#O!Yg=dEqAoH2YxIqZvQjBZx29aZO)B}wtS!t!{>{oPwx8H zu*3r!K@B;~3Oq#Mm7uR~A4d?EBUlJ&uUA-Y)lMkm%&q7Ah`FTZ0%|Bdt zQ~h%O{ZVL5>ecQW^vce7&e^>Xv#~EJ|HYy&db#VV3Z%vcTagl=A<~-#MvR+V@8lAb z*Y}Le#Tn7-|j=%YS;2V7KR4YEQ97Lz$+0}~| z`tr^gRM*L|XtyiSG9Mf?a=+Q<@vM0zF6}Rtn`-xh617E{SztEhU-w%EeF}^VN+Jd7 zk2AiS?sxDsL*lpGWitI$bC%H2fMoWyYJ~*T%S+VF)!6e))gIW^eh1C9)XKU27v1**(l#c~(&^QGjw8G1`6ehYbt-EF z{>GE#A|~=kQgxaZ-O3ZMm0N{}Q6gZrn@f07{)1>lzJtuD?X4>maiDwU1ve9*G23^T z_sRBe+_b{_<5YB;nYO+^5WaUs(FI*pL3Yop5^(*G-4W@sX$_P@hHuV`!9N%C^;CSC z@>p)oQRcTP$ua6RT0U45ASDR^v9Px)ZvYK0wTzpu6b&zgsBk|N2>-0yho_fTpZSwJ zq`-6(U%cKCJRK(iyiAjlL~o{R4#KB~(opr4gsypOok4IeDlafx zb1iv$#owpkJk}lloZsCzOU&vFWQ%TmUWeM|Aag0 z;YxXaDvxoW#x9jv-Ju3p#fCil!BIyHw}b)m-^wY0g0Ko*)_am{orQ>ThO4DwZ#)mT zo9%9eyi^SJQ{gZ%99(r0_-KI{@Vx+I8I_zm&$v0=3=632_%zWRcza!JmDhDF1U8Bk z@~-){SC>pouE3-Snl8ZcTTXv^bKh3^RPNvkyAaHJ9?N|$&{TOyWWdCBBS6}A`@>{4 zw3M-Nr%*eif$S6@LHT*td4eRbT0*d z>}2ccDymo(W8JRI+}e7Vj&E?9OUsO>Upj3Wpx?PVKbFvx0o7vCNfll>)(L}OcLBS@ zHcNx)v3`=uehL(G!F<0s8_0d8XVXG)dcxI0F5BBQZ!$vwGTAMsT>m?(Qk;w$Ha zN)^)#Pi6i3_U7P-VM%Q2T)GOEuikPMk*r5$y%KY$b{T}{Sjj-QbM%A1&ijyf_51#$ zPN8qNXXPu-hM2Cf-eI|$ec8=awzUgovuixhA1L65SVReO5KE^tSZV~f&?6Y!u;m(Q~OX+ z(7!ne0oXq_DGT_~5`~gjzs5vW<@FAg zRn5RnjVqQBlK-Bh2p$aN^_5_GGxV*1 zY_qDCOu}Y44+{VQB(i8vTbNA8H+zC$AmQQrJG@_J+gn{P;LigjwX|>}@pwY_$5MN7 zQ#rS)p3WLKsx0OxOZPK8&jsY<5P6?=QE=JqfGig(z@v6bN-JuXn+ysif;JzGbqxw- zfw;vl#~j3cjUR3mG`7$j_w_>#(A*5H@E=UNTB+XQ#gXCr?*96GQmIyB^i#c@ z7Y1eY;e0LiV0E?G@#-))Y8wE7R|U4k<9RJaNKC{CZ5>}&P}OGhdbHDW9Ak028Dr}vTcS3rbx90b?|i?jEWt;wQAeE6 zRa+VHzbB63j{S&74lI<-j0}K4KxbE752c&}Td{}MrCyDiM2gBbT*IUej!@e5pv-Xg zQylU6{jC*0SgHlwI^y|SMycXYWSac~;?l}9!K|J-9Q0=Ul&LP0#ts-sq@x1Vd%xW4 zb^nr1QHR{;4Q-8l(GzI=bkSo@bQ2CB6$yz@WVfZjWa-jor?X{c@D2qC_*9R(<8R%` z;0}t4qi=SX)U1srfMw=Nm!`7SR%IeE)6*rtUOc0Z*Iw$jVa2R9r2@-^_obd;XR|M45qgQhGe}(5OS5k8Ev3?X%6f9I=2x&>vM3KXZFZV}*$fo@(zGmnsAsadAkMN}cl2@Jyd)bElJe>12_Q1HKP|bWZ1Zlf~b? z8e2UTp2x+7qh#}iSTQN}L#qg4p8N6A)Tp)J9eujf=uT$}5gM$P4AyEL55^^cGB_Ly zkp0w?T%6^oJ8VBSDMI;yJ_hpiUUg8%pXE*GuThGX4a=)cr!N91#t z(vpLW(Jhw3o2yRP4VW;_SEvi3M>3;$?lISWUaZS>J9VHs;jo$IV)Dis_Q^Lg-vE5C zmqMhF6Dcf`^#@84)Ic9cvc7^Nzi7txX=T`}o4;0*HBr#m??noqll>Fj z{tc*hd?3_X1%-uF(lihq+dcjaV$K~W6|M0{SFcYu33T+9KU0~_bCywE(mCaOf~CvU zsjSvZi}uUfO+I##k}!=0$4sWvdVU67hf)xMse&mV{h9sB!9m(x7JtSb?i7vr_jRwE zu{ph4W1JkwZsQ#?)jzqp1uXD}pRCjs&GCI>L|@M;le6mCJ}fq^0_A$2db$cd`y*fw<2P z8S57BSjqZFd+)>SBMXgn{|n&(3ZJmvgj$tXj94TNq8XXlw`+`Zz3LCX?)e>vb!0Tp z`>_22g%mc``OnhA%_nPc$w;U}1woV890(ZjyTPEgrcdR2Sm%>nn=#bo`tAG*V^W7% z6seB+i|D_0gZ~KX2cnjTi9--#5hBRVca+h-*)JRTOqs~$g}IUjmb+GbAq}-YT|5Gy zl)LNG#s7bZFQWOJyMkt;Zgl1is){TL#yr4QVDa@g&$9^xBS6j8s-tM4*#vAl>^}|3?@ZPOayetK zyVY0Ag$t&a5%X17mpgQ83V+9HK8UC=>3d}oug<$j2HmSLS;M%lUuM)YG@thRa@fgL z<6xy7r57w2k3C-tN!h??;lYl=yN@j0z=lbMuwY^xYXpCzqoxA@+# z|Bs=ai*LRJes$v_EE&wU&dn-SMED5AIvYS;?oSPe+gX_Csl(zzOx%ePOSKr_@Os%# zedO5Z9-8$zq<@xu9Lb`7eYg@oeJFTXcp%>lvV2FNi?roGTc0{Rq%CWH>-1fZx(`~f z-W|@}@P_g3OGw+X{%Es%XOYMj_v|wDa=>$? zIsC0u$L0Tvp%!d_xgiKiu*>xzp<$zzs^E2Am%P^olp|359z~Q3Q6}UPzQ+Npw7hcq0f#I~5_BJu^|O zkQa|1TX6~G*K4#WP=zvEyUmX52bUM9bLoW(dIPZuv4GRA;389{<$#nN01rwl!rmOX zr&A|ddri&NURX@h$5~+#BQZ9I_m`XmLsR&d94Hq*!dAhoVaO!jZaBIkJv9_ttTN_~ zIEXvZm(AxOcq&3o=I$W>7>WM~U9*B9nWTzR$*~eHoCeb}*>S!%AT&%&R)c1)bnosH zbH}SpAM6ogh1Ojac^g+Zz_!ddj&xsK&0#$gA?z()nAB>{>B*$=02P_EBByh%Ikw~o zB*qc_U%frH`l$k9enm%CrDax=vjSso+jZW4b-N3SiZ-A!6I`>rZhcf#)wQ%r&SG#r z2E#8UgTb2jIggC+_tnmM=SIn`@*#~DdAZ+(DX~7UZ$1-&95*KP6wH!U?=q7p zAW<@@!wMD><5zcu@>YOf!Z*2w}8pEe~2vXf$F4;pm?ELTtEkwy2Y z`AS$_Tc^PQKxwup%z|cNvwT5^Ca%CD#X~zZIUM+?9=0#yJi-1^ngIA49cG~Vw zetao}sk)rxU&Tmlc{ zC!~S{=mZ50P`yeNqy1+BS(FWCQ?U^lxcO*c>gveP1m+I@dECwog;wu(x>(?5-J`+ltd0ZRHC~MN5vI!G0a&R>QPM^EBDeH+XDw0bI)U8s^0$e;*4kE{ zQ722Sv=vM1$m-skHY}VKf$4QH7i|zQKkl-nNo75pcv(Mr2~;Op=6}*|Idd?TzBv97 zQVPvIAX@pNyOB42JxBEa+ATfd!Gz0OY8?*7=IpBbV(0hum42oh$JI?!9}0YfUF#? zfIP@j3;DGZQCwTihI4scir~47?e*-*%~&(O<%>Vw7n6xdde~_^Ec2Jsex4g9Qv_}9Z_4H$PhJ+yHB@~ zz5+mk^W3leWk@JT3sUYSRMmJBaM<=>Xk=Q5c6gH7Kx`)|{s3|piRPnUUS++1vnZax z>PcVW=|I%dgP^VJ1$H$t*01TiR+8G-hCZ>nkip!=?{?6l*68&mLpCGOR8&lFtq0-q zw+ZJKmVYD-PnGWjZdvmx%mU7JIYdw?_Ei^XOJ;0CHrv+3f=#z?VrRg>uM>u~`&Cdg z(FNmzKBD{Iz-a|~8tLb0{#OH;eFqoqdHpg53@wpwVi}Ork&QJ=#q26d3vWUp@PyRV zWi9Z!{o0aHmqLuM`AY;a$a+eQK2V)vPoNkfxg%m5gdS``fQ!^nEYKH2gPZO?mPE(D zafP20#m!*pi!iMr1Bm$J0twj5!Ix+Ew?kPq_bqxAC-1gcG1A6T8nAIAvYmyzH#MQD zoQW8E?j{|~3YcR#Gy1ZgWFsOL7L@n*cW@cg`~sqxbA%cN+5#hg>ZGlM%G&fidEI>< z28;x|l0$~QcKVCxoyuz zkN^QNxF)J_^CkZQVm~rxChq!{9O7>q`*|YiVHEUY#L|B|$AQ*OmC-kMo}czECSSA6 zV#Fqt3xoWW6V_psT=Ln`SMPTmSyH*QILa#dWHXRz?@IkF3ddQ{+U^aZ$lq>l#{^>X zEF@zoo_~>2S;cr(SM!`QRhC&2CXoP(03BHUT`C{^^Cm7U_Ak-jv-Vg83HSi6ji*z} zG)H2Y+s5oW2x)}~sfC=}F&RmPEky9Y`fy8s*yPLp3#(XCx@J4HXJ5y0voHzrfZwUT zV4oGDi6(P>agxO(he+uyuIQQ}{@TK%&-*nrShW@m5D}ywt5%TR){G;&2(SoWe{C9x z_E`kzzB+RSd)Hys2x`uI%G=lZ&I3$OoW;^lfnMK2-nt&{ z;!V^t)I>_GX?XJKDqj|+(<+qKP_;+I*%_fB#GvsN&XFl4Svs3cX8ff zROs!uZF`wF7dSf(P{jos2+MR`z7{Zy8tDCuncSH2f094IvUHaQ^L=T+efJ%ppA+Zk zcCHxG{Rp&7PqGU34Vt#Q+7EyHEto6OxBT5KV_o#8UOps1;`7zGz!M3~z*RxWSym8R zn}}&en}l56^gD?C{$s!}7e=jn*sfq~b}7o<>ZMQ`OP9&dS7Q{S6I#WBmGswC1L59` z{clgG{{)v!bTD>sd9QwVDJ$Nl;r`mC!B=|ryo^xDh=w{luYUX?Pe)?W((ah=xBFos zlcgDs3<4!qg0yW6zI^0N4gnrM=}j0^aWbGGX4QVf7J>XhC~EbRg_8WOH-}Qqt6#uH;-50)zujI^6jX84T0av>v5i%RnV+~(8ub(9 z1B*KHWg+b`jAtdBdhfPL8E-A6M@s+A_4yM}^d$3FcF=dO9(7BZ-{b_bzc(roiNmz9 z>qAU3D%KL-9Og$wfs6tr{>-2_!m6Mv!_sUZ+qp0me8oP&hk)so@9l{wbYBjBG2CAW zhZ>n{h& zOOBK&wS00%lUcm`Waod@8FrLKMt*P33g|5QGfV-?KG`Rho@8|CW~%7Fn(V#0Oi5s9p?H2EUYgcY-@ z#>jdY=u1p}Ls!BkyXPrD_FjdYm6vMt3LVQj_>k~tN`SDnEW%q?Y;m5UT1Yf!c`>7M zwsA)0S^NvN3I0kGiwr+&YeEhPBeA=X+}@>@YhEA3Y51@OvrG zlT;HdZ_4Q;T`Z_SOUst0s#GYBsvuLXaEfM(QQKMofR2%k*8H{rp#Jnoq{4+R%>I&( zF>ou^raru`za+4}Yruu(@V(lekNWGf?81i(QuE;S$xLl$!fZm?zlvva*GhJx>E9d?C^U&Ipw2urc zvv4}xoIc;w5OZ`&Z|?~Oc|&km+GgB8wrOfl*D)FhGATKyf=(jMKLwkvt?9mS(v5~$ zUqbE8sXSeqYoZc?tgzQDmKsclQhRa<7##SO0Wh8h;gYJ-_4ARlYIj@AW_5MF>dtM3 zs54#TleJzCh{`ok>MK7xp#V-D`^7o!9S?U+4)>>U}{NiJfrxrOdr$geEjb`RImI?zIEJ&MT?d(I(5;RmpXfiANlgK>>zy zyFEx%OA*k8<{blr(2)w_L2Xyf9^?#$d2P&9DFc5RJ4N z08Zb`NPSe4{f?+kp`8PhuKRNk+;+V@^l!auXB@4U6Cod=ETn@o-{niIUmJfju4X&X zArUq?xR0z^Z~#al;Akx4tfb5rb4NAwBH4#XU;KUi&=bgpyqb~(*hhaP^m)UT!k%44 zb!ZsAQM9Wi86Tsa-P&L$e^*R)^vLD?^>?hKB*+5Gcgk{IYeV{_PQc1iMaiyA8X?_G zc-8LJ?u4hu7&MOmTH{@X1bLhzGp#TaU?|N|BRsQ#pTjqFn=G9bk_^ml4oGLx1$&8e z@$(NUb_iXL&Bw8P&S#hP1K$MJrS6l(aRE-wtD}c8J>o=-n!li>t|hQ~sM^XIgS!MdjJB4afRguaCy)r~q@YSxZjR<%x91|p^j zeBowcVN^-!PX6^&pJje{!_Z~j{vAYjakQnb4H7hb*0p1=Z4Xw$rvx0&R z3j(e_WVb{IIotK@a$;y}w{z}b-#x8K1}=r_t2kALqbae!%I!PlgPGA5H(x&HmhF)@ z+BkxapH_2|Ubjtu2wH2rrzjkbHh~h_WUymKy=v zLkLN)0G40a9qj>Jfjy`?s(qPMzdkSuIvLGNnycKC4XblB@TBEHuFpqz1;<9!l2kzI zclv7R{)%0n|M%$%0*eo3`-N`IWbNj9^d+$E7TCmU&2UY?T<<%I=4|b-h1zIj{t`_= z#7?m=F%PnrqmuK2A6aDKT*n=C1TmAfp%=?s`{4CS#C#Y?Oz$&Q#KmxRFMYCMM6`;V z!iE;1TNBbF6K`-IIloj6+LuB$$A_=qs?Rds#JaF|Vuyz+3%6#Z#is5H__m6F630|= zEm-l{%-^W_5a77-yrO3K?25V#d>~|2a7O_i!bLAOt&G?X0bC)jVrx7XTkUfYQzo~W zm!pb1Q*hO4k4ww3Hrn3~J`IUF>JYcguZIT}{fdYu@s5E%!69X#Fg2;Uv&sboT*K_Lf5x<ldL*8raY5uO#90U zj$@hTuK^Mdyw8Nx;k9fok%=o+1`}QNu71a8!$ZGMhjqC#nbVh1=%pKdr70zSx6d67 zDTPrzNkdNz@UWKsR{iwT^LkW*2C=HyX_{2nDAv3#N%&w(ak@J)$&i#Kdyy7v5nK0l zgT)fCP|gb|vq9J_=JL!IdLT@?zvZ6>GWEBDMe-UmTV>d~Xu2wahqPWn(&um?=_kt) z7y!NOaMPgrm$x9k%AvyRMe&@5n0sF%+R zU4Z4dNktpdX0MwL@9`0T|Mv4x?G;p>jn>DqB7VFuYlJJN4g#5c|N>A8KiIJJK2@^gsq{_RDnpq)oG)5cM(&jeze83a4&87uYn1 zhsQv#l*30v6R#>(?93K4*rIb-mnh(e_H7wskk2*#O!dkPs1EP55v96`o6+Heo~JnR zqo{AiSOLB-JfA{Y(QI`cqn6>JA)r9N&%0BC@>wx9lH8j!ogef$xDAr0urgmZ zx+{XV5t_*TD9+0 zCRZF{49on!$MAE%PfN#&R=RqbjYvOlurn9E36V%;F0f*m?lIN7xI{)OhlZK-oO@oLjsSIqz78q{EIut`X250uFxE^q ziT9H{#(>y;%mquA=L48EWHlV)oeQScM0#LsU43vkEAB~RwJfpf_JPdpF`unyjhcI5 z0V5+3_B*HU$}n#kA|boS6o?fMia(O;MzAGLTG#bNV((RF!`FeUqcj%w=`5W7Re=s* z@`ue@aCwXKrNr7JG0DzARaV&MOhgJQ05=E;jqRAZE5s=NRk3CcL&bMr+#d_R3kAM5 z>K6$gEjo8j!lMZ%vHD?JKsd>Uss4vac0w>x7;rWMCL&6#Nd+3m6t0HmH8`edIL*tr ze0+!G%$))}yBv4G5S>XaF0C>iwoWhmDsP|skoGhE`iE<0y@L6O=iPDyf4iMgb*|?$ z5>PW1CYU*t5A&73GiLbN03z~B0mTX~&_znobb!;5&!3o35z7BQA)?i5om-ihm%}U> z`bWL2X*K4IHcZ2~)VM@~(_vncFc;`(_SkM|3($K?vygMfT5^EL9tZ7d*B4){sY==^ zW6ph1QV?@7hL4RfJy3{#qByfuxI2H%oNQl8`0{5+K2wiwVkT9ekrpRJv59=bu-Ty{ z0NfPwZ~OyoVCN{t;2nkGqMPJ;T z#vrw%xiFlfAr*lrM}bi?LSLpzJ4L6S4;DLSrBsYM%!qMYb;|HdB-yOjsaiZ4*Zz+R zi)3Z1T?@VF%C=dwl7TkhsaEIXT?h@SH zT>}Avy9X=W-GfuOyG!Bja4UQ7^V(~--G8ub&N)W!-{@=jo2a^>MikXh=1AB7^^wkK zMuxLt)3+V6WQMbO-mOl|zXVMO*DLNdrpeh6>peK;sIrTGVw`c;Q%Q5xxHceB!BY=?uY$5 zgd+d5MaTP{va9WJP{q278p?LwefE~i4_bqlLvr>uGj=GIh27*KlA)w#oI(XL(IvKK zlL?J8rmZ<)y3%-e6<6cK4rdnEJ~0fF<*(j6O<7%m<0+q}r>=;DdjH4>1yWE4Roymc zd-8sAk7Li0niZ)@9n!9N+T*j8whwVid{K%6Z@fO3k+D1g*fhmJLKio#@eeoHaG-AS zB!M#O7$3pxU+p5{f{DJfTN2ukVC|=+n%nsH%5!a_{nW6;W9b@biyKJ0MySsdC0F{z zMF5FEF!r;ZmN$$y`1j0$P8ywhYqC1$#}@88smX+1E3h#>p#_G}Z`B+tHC9bJ>0H&x z|35inpEM8ZJHF0VS(s%b_aTEM@j;k2%A92``{<};kA`e9OfnC2ECg_o8)v8mC(Ewa zJDiu3&bT`f{T9$-?#6$lo;6+^pP0X6gJmVCuSad~t^o5_hE)DdIYakzt5^c9tJRZ$ zEI!a_ulP=uoe?yv)2BG%eW<|5% z5E-5=$uS;;f4toAe`8=!wJhA{?N6Ye3=1RFa$-T^+ESv3F(#9Da&sXZ(9l|w$m4<-!Vm@~P z&rBWY=Sg|ZR2hf~7hHz`*(so^V)1k_#Q0t8i`Qzr$qHIvYfhN42Tc`j@NC@HNvo_A zM9kDn8)VCFoJRr2gz4D^R>YI{4xpW+ha|!NT_;y;O%<@fljMMVa z2XCxv?btD90~ryq5N#Fa1KW}Y`=5xXC1RlUd60dEO7oy{ezY#+{y`(cuUK<%pgY~Lho};-ng!i;=!=|(h z{?cXu@%@d&8d&hi`GVsc9JbySkEm`Sp;Qj*p(DIZ$7uN*hJpIj%BN?c{z+ zwxpPuA~Ab2&7N6Bi(bH`&tFmEt)}Z4%LWz3JF%Jm*1O*CR=>+zEG@S}Yr&MrDbhm= z#e^Gt)&NTOqC?rNnCe14Ea2L$i9lu)eRXvIaA(*|g!7qDmJ7IGtGKm}u<>r{O|N|U zJx_{d{6j2Tyqn-ZIA;i<2h_^O2QKwe9FUb?_t5jN_ea_0;e8b zfr~2}#s)g4C8mxH$>G5q>$i1|j&q%!%)ShqZrPD!F_y`h7I z@3OiAN6lvVVbA|a%=78ONhm#R%^&Im6Mf1A9c1TO*!{tcy|GX=pwJxCS*{D^_W1V3 z-$YNag#pwYy|Qv1gOU%IS zy%2q5GVbu_d=*t3c*0_uc6F(qeUTdEpqBmniOVR10+p|wA2;7HtRuD;iixEj{U|t3 z`H?dvznmW}qYZU`B(01O)+NR`4GH*ReO|CID=C4xd*0oc@E>L8jiYtL?n}@`40?r5 zLQ7HzCf{v~5yXM$xvB6)CuzVpD7b&BFbL@xBx!{-EckD=fr$tj^~u<%_Xg zui7G#=Ib=siwwRSm`f{O$cj#zRdUqkwJ&vp$&?CMKmMNQLZtpMUd*x3no-lQT)kQ3 z6-V)1E9$>zt{QPrbHHZ7CzEL%d+94G!p2_3%Juw)-t5j3)A!oKqnC63B1m&GJJ9gS zJTw*aD#ShNL12oljKK0c=9HKX8>8x>Nija2k6teFu!QHg;;FUa>v+oT)mx$}c=Up& zH_>#F+*?(K;`<`Y0W${Bi<#XkjQ3=LnyfNy$=Fh&9u+hK28%5XVh)X>e5ZTJ<*ni8 z?dyHl>#IA;XeWK%n2gY}LI0&YkKHH8;})@Uu`bAW34(xMp56u1P?mhR{`{#g26ppf zv1pF4P_{dKvBYTr_L0FevM|KnSF|QMZ#tu`+Mj>I%yNHt8!6f>!m27b4E||xU}0@Z z^hT@2Sz;gAfgN3H&UFUT{OZvJZG4r|4X&N$L+o=BJ(pZ zJ`1!7lx6;};(mF^sH3aftsAMzqD4U1>P=8E^7m;x?8GTo)8W;W5`G3xM>d_sszE0& zXH>@s;qeP`tJK5TcdMmpj>yoh*{;VQhOR+}S-TKPzvbSC2=g$E(~t0#I;fVeRvd-w zmIu%aaJh3z!(XB?-G6^h`)r>kg1cXTb~?x1hynWrWZal@nS!^Ed|1Mel!-X6bD+wd@3Y?IIRLEho6OCQMlq2^Kr4qfaT5dCYS!C z7Vn2m-IWLgYurIcXU8b8^+}5YjLtx%-U*89gO8V;8*|>MSG!f{&ya3+ljF@p*5T3( z*AARe*;l^V(_uO}@AlYp63hYs098 z9Byq{Qxux~3-!$1tEEBQWt|47Y6ZQYADLV=Td71an zMR9$rGYg&N@(d^?+rnSqJFBvdnGluIx>Fbevra9H=elcNKcRj!+Wua(C}&`>7a-)T zYL#4!w;fGKMZoM!g69w-Sh_1CN>^1qwzTxP8=!Qs5ec<4?~lHWq-avxXsfaq!DP-4 zGvoAMP;Fde?yZ#}r7Fp$Bo(Qt|MMpsjRlVXJJl4u)(c4dINV-_%y28nk*dJiOH)+# z8EV`W(e|02EK)ANOf3l~?M0)v_9&T8!JhZ;qwMP|O9d%O8{efL{G2PjrBJJ|?`oHg>m$1TQ; zQLn2iABonPTqxoqT!Dqbe1w<2DGz|)rly-~Ped=ud<$;YqtPL_5c8N3gVO@zSxrjA zv1l~u5Sb?8Rm)Wb2C2bhw-iFPPg@Dh6@fTp#Z*g2?QxH3C9%6|7FmHVbGLXeRwPww2kDC zraR_5jO;GgU?q(bMDwU{)P^{Y<6yE$`gWR{3tdbxWcr8m8!3AxmCL^@fBAh>HiN=@ zP;SP^QUbNddQW$@I;NpDkpzb^(MrgDkpVaT8mp45jp#4fjT$*qJhi|!gNH`h@YB+& zL-MQVPIHnfWkbrWNDmK~G3OhS)`5opLj1!cPb2CC&b5CBubcA}`L{i_IjcVS0cGQ&KebbQTS$!7dy5m>*Zw6Tq3pU+tRdk-Fk zmqCNPx<4@Y^$anSdlF02{?(F_aa<`?T&RXmmq(fG<;)}l* zAteFZJ2#N=qGU=~kd-hKYC2o++5O4z>dJQX?Hf9jm8S%JPC{eUM-`flT&63&!NK`X zR=>+a0@%UiHq%eHT0S;y7j+^iB}}}-X!u-Jo?%hq#=aZpTB6ZLpFITB6KuZ^P{0iw=pxO0}4tZ+3GFm>xow zcibJLBe^-c*S}??IU58+qBYDW!2pZO%a+11rH|B)zWYHWO;pimOepYaHzx>nymD=> zR(cY~U|}SIa#Ee)SNk4%#~0wJm<3qX%=I-clXJ+-EKpKUs;M>c`hvf90%}&)wC(aN z4W+-y$>~ae4dm&rMHly)uhOb<^fXn25eh)Uyt|MvP%OVH>xo44e0lEjf;|Kjq7jMx z+N1zGvcKT3Z05SbUU54JWk!*a0(}3BFMT$%u3YiQAR&JaavnY&r~9YV`)Loj0IMd} z3&_K_-2lS)gwfI4D4=j;0-d-H%7+DjqHpq9C8wLTd1cV3;ZtadbK{hb)$hjWZ- z#8Unvmre1^HAD}vC+uo0(7pz)+iKcb?z^d%K4>{>gj&+Y##gC`K>Fi3`J(v$R**eW zLug516Yt5@@CVhx>5p~4r6+>|JW)lvU2c(v^A;`fF@-R>=M&hDF%W&Tv;2P|2$||U zvMaqP(dmHg0V#_q4hWlaum^4IP%MiRcqb1wDMVFW}`#k5uWZHxI3$Y@nl{n-_{3(d)?i z&?l$fx?Y19jVAt`vZbNDj>C~HePhj0`VFz01A@fBuUu5I?t}qy%;KqoTIR*k=zZ!@k9N(x1=7Uv8RN%Jo%y+i@sG@_W9IE_(D7OIv16E2_q}8HT$$c4LH8_BSPV& zbPlY~&glKBs){}-f+CI4R{+mkr&1)NtcMA+Q|2D_q2e%gs0`B=fK8N2IrWRR2PfiO zdD&rjn;;8n_rJZ7`ojH)M{5A4CS?4$Pol&wh-{V|>DEohQ#~xEGL6ROb(|#SAlg^3 zzqS3Sn4XBuRyqJ12dckgn+Mgm?l=ghX-;jXpN%vpJ-zNdjk)TQykWldFVCbmVY4Ce zQ2xhqcY{v(Qg%G`wcK_^0~bBhS6wrZqxr{Ezj9hD|7JJpXBZw__DAN<*aq^sr(gqL9`GU#*Pz`98KX zldVWS>lu6TU%&ELFSgmay|A3Y@)2D%flRJiFPm!b=ZFPs2EHQlDZwYsX7^HZIJqZ8 z19Y5A(S-yX_l-x_7M>(v<72C~Td!CACNiU%m?Xp)zi2B6o6wo+ON{Ltw&jD-=U*|F z=_dw_wwoUCFlymCgD;~tyujE|N?@#SjQx;lY6%8=G#~?)bU0weSHt6Wws;XywSj5{ zra7L;8vP%)FrBKOS#13-4B(kXJ=i^IP#P2bdZo$#)1iNejx9f3`X$Ldt?}>li<=Q5 zyA-sf=N_q~8HWQvz9Kul92+tbW8FnN*%mHoLQijyvm;6{p7^Y+F9{uS? zfp`VgvRr;j!oPj6HocLgRsAqXOco(lqRX(DmYr)0H_wX#w)egHD$;A)nDUhgJR5sW zVx!3{^$;Jwd#&zi<5)-WpU-L=nv-yrXE~yg1&5&Au}ii3q^Pl%Jse)2maYyl3eVN% zdO@ItRT$B|*uAmkJR)8k6BlRPv|h3F4L->D%UWm1O0Mh*Xpu$>w|ln6ou?Zn%UW#^ z7<9vHrY`2_mv*pP%;SOA&hS*H1T;t7S3j6p2+H7vBW{>B(uU_9OF1yNHL*&V?kN*KM*_4!L0Bp$F0IBq{&%YfrcYc<$-HMK7!F_0DzO!-{$yb~o zEvW84s!E>#!>^C=p^95Cy$sFj1|UcE)E~bKS)H@WNV3B!+8Y$hZSts4W+v0BvLNU@ z?MxRfoN3^FuM(Vm&6TNzKrvVkD-cwDvU>ZrhE^|Z6|bFX3qy284VeQKn-;{awu`}; z@;S>lf54&)TFY7IUkR*)6@w?OV`8TmL}VCF;03Z`1tT=vKHqlphq(S)|}D7W4NX z={pLR%}d;;X^Ck9zUkS}!dsQgNS|)~N^+C#2d{I*lBK-Zub#?|7NiP+cWDD#AisxNBI=eG%0=r;FNoIBoQ@6Z}0R5hd!c z9*aSfk446yOUUQ7CbTv|==MoHXp|yA+v4Zj-rxz;WI>UZGDx9g?e|m^0p9;Mh1SSH zpQLq$q3~>56O(a-*z~mL3TO|r4-Yju4APkCKJ5G%YmV(rGd|z6$mM5*@Y3a{gS5~N z0(14c_}f+eQoKy};1n(uRPHCFI`;ItMc&#{BP^9k_A7BRBtpOvq2K2&XZ&P;vVwh~9LPSD3J5^?6G%;1 z#P-u!w3_m!o$_vOe;q9JCe>s{6G%)SA|Z3L+ATQ;HO&0a-tV!3qK``)KuM+Yf3E-R zaOhkN$>h`U^w2ez_`GDCX^1#pBIl8rIZU#|z@S#%6Q^d30Q9Zugw-}Zqq;fPB9a=j zHOx??e*_1@$=9EO_9WUxTBAzLp^T>qj6k%1Y0nmp%q&AXL6?E{eb!PoL?~ z?7#+JV-`bTWi2+YN^WYG#Y;4zR-UVRoLr$EV%yV^5c8WA1!`qCG_RS)f~~OU$H9p4 zys}peC)oFe3mNRJC_|{E@H9}n0jQu*ii%71OrrU$RPe^ZBbq3oO2O&WidLC=@3EIqcLEw|rrjipQAc#vFh7*GBrW$?Cu+Ec~*$4)^~+tfv?SCUo| zY@X>)f3HOdbSy>dHiG?9cf$T}uauxfE=PnM3n+~@kS4;K(-xpvntRf}Zryg}%*<_( zEaHwV0HZ#AFHTHR*IN4A>i68h2o9!_UDV#}`7^NSuUBPUnx}5dizDLS0eL*y;idHZ zK7^HzQ&r0s$4_nFcd+&bg!O`ht)b;;9&?!!A<^@GuPRfhN6hwwCi*E>4I`MA%T3#m z7lELB?mtMLHGa^@7-y-}TXBsCjY8&XECL0xIU|jE+gGtC6I~etc@`TZVAh?YlKUs7 zVh84u@6HXjJ1(zl950#t0qF>&R(V5mOnWQYU6#hfR&Utf3)<*>LMAau zhWmbWa&TuR9lGb+NMZEOO+f=&oGWFV=!g|MS~<2hkP`wW$@Ex4ug+l>{GkRUD~P{4I>Ne@UUT0jSX}OzC_S;3(~ogSMf#4 zWv-Oqm>Q=c{~tlK{}e)KPy?7sB8I{5XRoTw!~d0=U$3(Mk^u zHO}1q&7Jjb{(<;6-MczmqVxfJA-1k}i^@UK!A1yuyX{eEdRP>i3vsO(&K#N32|SHk zP9$sJUNp0tN|U%$@SzsIcupf?h7d);9)gp zj1Dlo%_!w_B~SdaWxv|mAOS7nv| zS4Yvk9@6T&&-~a4>S<*3rrQ$HuUtt``j1Qfbk{aHNV-CvdxYe1UMo;NE4gSn0d{>W zRZR@9t^rrMi9K0~^}jyRW(k*WmJDjo1t8N&$bHj4jtYAdME**b0`X$HYDJmhXXo+y zX2AMH-hCFz5Gt2j4ctIz?cCqZzF5;^bFoRc-Vnvi-EwgOpVQw`MK(D+aJ*Z) zn3$d@b-}(-0tmzuOz-AT_W3<=F(&{5)N^8c7`MIBsjQsRdXG01=0`~-9B6e`{tCkW ziHUJ<9N6b_SpY9%v` zqI@IAOr)Zk##Yon2ea$of!eKoQ75dW^)MM_ul4eq*DSpDC^zNBSa^>`)u76E@#B~A z>wWD?82={ik?5WSj)Ox-mv4^^xZ{NV2dxQ$%ObMLq(XP-a(!*>SXYSdsSX9+zeK^& zEmr~~G}&Kkj{oAV*Gc0%L*o#A?Hf7&qfLqSXmf-t%r*mk6D0)GrUF_0>!pgdKuFRN z#bO5QA^VMJ5U2~7yeI3e^P{5TaAz_pW&KG-QeU2_*(H(h$MV$VBuy>PlDvC-IA8L= ziGI@5Ks>j!fRcF=V`@fd*vzK|m&}Ez&hOu|=lLzD&id&^s~YsYmNFtwTGD7?=YtXn z=#5M>Qrr5zLAgom+j4H!gQnRsWxz};rM`;Q;Q%vuXk{^y;3t|MMVCZ+s5EH?#z?sc z{V>tnuO8F`=_glNsMBWPf!;w>H{PX;IJ{Xm1%}>Y3?86R#)aJyaI#R3jOMQfJiJFc z^vmbQ*2q=rXK%%@(h?k|zC-Z#4TtMZjx@9f>lnfuWJVH1=J}dMiG@qVmgq+C0rNlb znFy#d#WD{D%MH5Eu4p2?84QhA{KgO_0x@V4An5_J9- zxoHRY^s}J|2YxVI@BGnNLVk*z(L5eStuIA4tH2kTu8@XOIH>9{lmc8Lgx}%=NS=3I zNwM$a!SvNS)N=ThFxxdaOGNpaJtbfr7;y0f21xSR`>?AtBoFu zWMXMpS)ZWi&Azva#u^PdMH&7(q?u#EGXB7Hyvux4;I%4GMZoX{TmOe+i3&X( z(`Y>XACL0AXZXA)LuOX<4tP>kIqxawa%ctb)9V#9bpE!45XnJPnp74~DE*9QRl?+L zWKKJSAWp)yHo-pu*jOVINZ4)o`=4cqD+ykPJ4eH<)?m|Q<;=SyAFrj=Nvjdx$Bu*{ z(^uX~0@daDyhZNWE94sWz!fcJ8D~BqwH8I_2eG;;4<)teG}wh9rb;y4gmHuKMO7hXDKl!h*IxS8R-=~x2*E=tZjak>Nc_`~H#n&434E|-O#e5 zaYo2SAh}*U6hD~h+QETIdx4^$DXQrkF3B<{7hX6fLg*hRaQ47Wko2h$4v}M&w z#``Fxf!0stB}ttDynOLbUK1MRkJg^xExUX>l}eOJDsc9|@rSXJzfr>_gWE($uz-xb z8Ri!3VvmAEk~B)B_&ylUHi{8`%l?>-XA$tdlzkLXC=vslUZuJU(bI7h{n>IRR}D5z z{wp`3DHw#lS=Bpr9DTXOS~uuO12Vhr;aO{TsT8dQCqylx*Hj=4%Pro*rZAvpFxzlx zZ>`~egRE}s?5B+G0bhJHug!Ng>yfe-oB?9ZPas?XuFL9zQ*NGCY`{+n(8($31l)b> z;)ws*zvArdxy$C%)^Oe0GKjCpG^0PtDkj?zIeRI$-&>X4vJ&}#_Pp$Y&->!#?=@uX z1#kS!_oRMU077dc>bIG%W-kEcQ+dYa+X{pT`sS)4=mJcuZ}FpmFK=}=CM_4uB!O7o z+JadXIK|`MPp*nkr+n5KSnY>!h_Q6jk-UW8uSnzqwhY`T=gAUx#CO1kqVq9S&AzoO zzqTKq5YIL)VW%!^#p#R&^Fh;X(Kw2|R#nxaL<}eZ7o~ms=!ev~NZ$V!xO@cX>_wIB z4TmWBCe`TmmT0yli>`VmHL!DBUqi#mrl`sTE*Q~`>IH5h@Ql-2s-?SkGRAngsPe|! zb@01!_ed(nbERMS3I~Mmpp;V7(9T))Pr`O>0r)euu5UZrOuLCraFF6XaB_cBIEuaK z5LIhMLj5Na%f3roxn;95tg0v zRY$uJOs)4P=RLo5*`Y}i=6*pRzF)*_gxkvzHJ0Z%;yf?ToG=?x;D2c0wvUe|LmNE0 zMVvGJBL5uGz@$D(s2MBz?2H^l?d?8Gq&)id-p}Oh;_pF-{;Ze!g9uNYdVUg?Qz|$D z4bY9!8ejlbJY}=MM7e3iq#DZr8EexEjYqsB4|TmZ>jf+V+U(VX;5+_W13HdX=>Hqd#ZoZR9{-m1J!y*eQyeEIqvQ4;fZ`b&`dd&Nr>`6EZ1kaw*Cqf_ z3H6)Y5YsAyOE;9=9}ze;^q?i^8`=uk9-j7YzUEK#ib`}?oMD`ZMNe%HOV-#m6BP-H z52QVtsd&P_C2yx`vlSNvBs5=X#$W5N1aZ|xXa6)lmhahm!KG;<)-fNsO6xLV5fP5L z#1>lt&vndcCt+4>f|RGrP;7DR|6pU8Wf6Vw3%2vJ>Q;X59F^mknfS zer>Q@*VQhU3q!fvm4=w;uKjmv<l)Jq9dNQIIQ`NZI1(O~HFH+PP3pcb?v z8YOG?*;+od} zVm$5CM6WR1r5^^BPQ{<;h29op85;pVNxLVn9y}z1zs+ zJq6%y3%+OI@lxFx9K2mv?kjTpvz_i3K89Hu9x9cMGkaD>2C$3+|`whw2;pI zA{b4+4RM&j?#+Ah%U6+x12WxaQX`4*gE`uVM&xPJ6-6k= zD0!gDAVK8{dN3h?*~UXXYUHsP6duMnp|FloIpMS-a?`qB zbsR5ey@P9;@O!cV3~9kNy>JR2lyUpk??kp8wjKewPmM*#(1fzqyNSjgzCz`6ZE5zH z*6vw|?H@ILSvvJE71h~fzz8OIXu&fbdI(%hmrv`Igq2c4HdCd5cQh&=2Kr>>WG=gd z(|OWTyA+H+^+&ds4W)A5rdea_3DqoIi>4;d`N-wUkr^y<+~}1t}W6S7|jOScPpeo6F2< zYngXr%|N|yw!y{vkNDGKW^5{rt2@iUwZKBsQ;K_Y(g>0^#iQn8k!_(sR}4>m-E(#< zsswN6)p|n3)GwB}VUZ~-8D!Mjl3~eVS%8=O18{D@*kW7^M7SzTSrwKOdufihghEC; z5}AW9iYfvB{`iClP_Nwn*6YId?F|Mbo>~Q206!sHx)Yo9g3fHOpNG~|yP=io%RLD^ zz;6wrpdrN(>L0&3&mbOgy8OSVGR5ijWHaWaSG`Wk|Cai_Medy_$IUlCU?xzZkjfY^ z?U9(%y!Di$Jx9f-n2D~a*l@n#e)Q(4FyvkX`cYzzt`y9S*r;Yulx$;`p*oI5&Agg7 zA%Sul_Q6*XQs;~A{RygbjZH4#n`*pjV=}~xj!F-iQs{beR|Czz`3^`(dRA9Qfi?Tr zCQi@_hK0uN>G4g=@KY$Kre(4$%;4> zTmJ*vfFb7`CoWs_p5W7lExVo_t~m#_3eDT^papF+&2SoZh))$tI56dY#Z zMuzsC{uuBBrVRC}tb%D%4@g{shu7wBIzzQWd+5Es;F(_^J2g=}WsYYLJ+9*w&d^kP zIw9w7gIvgSo^fwwr7%KgJ!WRt^*kh@39za&XD$%6Y3yb=;FkF8q4F|N)J@AwLLA4M z$!IG1(+aF~wsVdkW4j!&CNQj;rEsRH=Erk-y3H8O#~0GxYRy1bD@18@qrRT26X{L; zWp1$zKyBOyAYcdEAd011EW=&K(Ia*N*k@3?svii|Bf`Y%v*eUnT06M^@DU5uN}nm( z2zE8{^vhDDdH zCU3l`Mwk~AYD@Pk5WAAA0lk>gGRREnH;(&Lap6h?z=v7T6U*-;tn95|X0tRAoV4KC zC-h95BGH`mmLAPwnO0EcdiYvjl%>;XW&iZ~Lj%?@_p5QLjKAv9{^3KJw@a@$QOlFW zYw-FjBA{;@+>?i0+PqY5uA5i}^;^QUB-SGMD%lKOOV;b*h?*M`rUfKcRyv)()JwuQ5%=aoOhsN-N_K^`GtgxdS)@&U3vO3Dzq8a3lm(?SV6(@RX8e*-nVpp~%3mXPoEQUU7uvCCqLyaBFeGQQw4(MQ@ zUT-SR8B+2l(g)q*EmNl&c2J+QjmEszsH>r9sYur?`^g{@;}p>$!hV8#&J4Bkw@>?} zpO)1Sg4C}L=lqyHe=VDQ&1eN@XxyyOp(-WR{ymp^Qoh-@QvE_TT8ni$GTI*#V>rF7{mP{HoW!D@ZlN;$h;!b8S zAMAONa7+vc!ND@n4~zv=(=A_nZSm^?%Y)+>n85MObS|fqk6$CU6F$s02LnG6#(&h| zaeOPBW{7HEUMrXu6OU1Lf<=eMLWg__nCMg&|Afpwg}e^D@E3H|)Ld_KyLjN#cROGH z#j_5AB;(Fr)uhyl`2KbX>%)C%NDJg`8#fEM`Xw9A@ddFrM6^?AONC>}zfKfK@v~ty zZtKRoQl?<0a=yH(<m4V`MjjjmLcXu@==5Z3PXr+k6$6BCCQ4!x_1XF zoE|~t;DtGDe?7h5aMP!pOAwBY*H)?Ulbye#O?aQXbWV#AIw0LGNJXc1FdpcHz6Ff` zZP7eWUZ+=3Z_ey8#m8Q}B2N8qT71vAE&gNHaEaZbQlk}}Pmr~tXD*^!s1w=vKGD5_ zNeKMUBjCzkZye|W05uDj=EqTN=2BGNxb^%m1jhKSBDvVh&O>HJ9TBa(nr#!toK|U{J&=r_i8kBi&R$ITWf+9R zf@aSy!K#9bFA{A$;YjKjg~YAJUGFv}qqNN}D+-Zjysto`QEl2=toqN95_qml2VGnR z2erTF#6BxiS*LbYj7aIh?3ugN&6P}3d`O6y#8jj*KYQY_t*8gEbZ}b55_M*Sqa)hq zjt&H@;HQr!@`KDN!0&E(s(B4@lB==DN%QEPX`L=?nM;!}pm{%U1`V;uWoW`iD|<}J z8H#>3B_$e_;^7lieN4n`-`g{_=Le-& z@ZFaEEcy!mQ)P6N0*$|Tg>g$IP)R3;JDny|Gr83^^YPOjw0mfnT`nby$E^}k!@*zi z=QRkC>L!7BP}tU0PTQX`X#ap-(#uEk70?D@KKTs{CZ8bjEn@-7Ysp8lD$=o!k+ zLgTZv$<`;o_o)qvW_FJ=kHhRF%wYx1i>LDKi*65FYUwnF8X#?_DVa#ZirD{z79!t# z^k)@bXj+x+6yiN5c;@Ry$wsqamza)yf=+t!4QN7==9lb8=}-vZeQ0cpV$Kke9ZGJ8 zcnub~l)pNoB7=Upe{HRNa5lAxs@r~a^0(fB4KU!y8R3$#pp6|wl~d7wdRd^DD@tolb_^PIp!8)EYf^ybF}Lo7uVvu~y-=|siN4NZ5pH_x3SX_29bY>o&AeLy z9^uK4s)dcRu)sEjzAFh#rEjcaH*`f5h_tUphM7~IK8i$<4uXgirhI=&XZ7F*Q`;=v zD2+A2iOX_e4Yv+=xRF9KFP5nsg?Un6YbIuY83Rnfl{2o zm#|HzyfyD4Zb3*g56Wb}P9w$?^3BPM3AlcnJQE7sJ-SoL;qRQ^otC0z(kX&D@q1w`QeslDIYGHMt2_f5Y#Y#OV_Dc#EE{TA`kHm?uxuyEs;C(1e@_pq z`1xKX>}TNPx38un^bJbiKgmg@^_z4NiA5FQp0A{0YP=`)6m{+AOXLm_^m`6_%!ahr z_;cc9m);ic=~(??dY35NJ(6awwK{yauc?pS1zGdM%d%qk%yf3^J`8meI?+J{JJYch zM+vvecTF+wt@-oi@bKp@1KyqpUOZ;fEox{S1-x>}x^iT@NmF`wtX%S%Z@DmAV+@cE zMyWBgtspMhP^?7Eh{Wk__?PhzBw{ zGvMV!6J2#%nmHeRvRuBYR{OAyxF&}{Y;SEsaO2qM2*x>Llq7rw`zI;!nuT21?x~4J z1I3~i*4O5NU(4gvuaj)L**VlA$FHt->%z9iMo%ZsXUo1qn&NsXkMCKJu)99)A>4IFEv!Iy zb9}pWk%!p5XE8S`?D+vgL!@ocC=b#8>l`**Z>%SOtsTrao5|uSJ(Yg%Ij{65Q|`}@ zxYN&@=-;&bm~joqU9F3L|23ZifQ?w7Yq-oOM*P(+VQ&z66As$!3%$F#n~1|%-yg*a zPY(Hlk0w77fmwN7NwpZ9&exjSN8EvEX+0G*yf!G(rZt(KS3j)9m=&nT*1H1B@p5=! zxO>pCRmOz&F|roHsY0UV&4KjVIhT#LY+Yp~{?zM<>Iq?5I-8a{*o)5YQ0n!7M^c#6 zX*fGv)+8RVfIH>O71tF!@Ps*B&Jv(XI8dBF$h&zU)G$TMZNoK{0GwSA8E9NM{Y-J^ zu-P&Ak*K!1xl!=P!Vny{tKB20EUe3wRlxiK)Zj3YA;FPa_V71MZ_B`qFYT!xLYXCL zuKzuOZvbywzoWMwhND`)I8>9y0fYg%gS@#yOp~##F&9$sx8$#VnMXI|6bW_2nur2i z>GT!`X6ASCsI}3OXEeI0!(69xZM>4PxKzGEtStA@vzMrn+d_O1Hw-EiDk(s<9pJ?edhz7YgP-Qyj^c27n zhURkBsn}s46=hcxfJ)C1ib}dg1+}xvV0JiAXm5=Ud}IZmja~dc_DAhH>l`7rjy}j7 zKbu~YehH4q&yK?IhdQL3{i?>_)JtamLFA`dM)WI?*gUU-SE1J!?NS;Fm6{UdZ-M>v z<3rcFbpt-~@Tna3J^ zJ*RS)LC!o^ZsdVh{7#hJr}7hRVF=Eeyvz6UcwWx&EiF8YM&TsofYt(fAGcI-zm$RLYtS>kDk5&9X#@m3@@yn$NDa+p9sE!arffx?oM$lS~O_!0>!;(aCeusNbwNdrMNo;d(->*zP0i* zS?kK2xkmQhX9i)-b#IlY&Py%p@jz8buS(^|p;HF)*j^t=awjBAtvq_Yw zDo@s&zz+K$Dz$O8POb+e|M(#dCiDrylaOk1wzP42A?%s#w@Si(QhL~BGrdO#*SL!e z)_|p{ox)hM_-$pzR1BsHIBF;tvtB-0!ij*LJ?Z0WQE{7P#?5#|;uw0`zKR-pWI&bo z6^L`OE6E0h{nUyQZvP%s4qklb^4>miv1fZk%!awuCt&7);enOr&p6+)y!D_l#_}Jw z-jqT^gsMs1`8oot9%_17Ia9e5Ru(>M?W-eXJuPM9n468ybQm$3g}ojmGb)U!6m;D`KhZPivv$erAVteQF-On5!zcwoduz)ij zKCKzW7czQ$$N%9ZG^f>NsMTTp)BMe5(;MK7RG}RM;i@$NK~v5#h*PC)T0mM19rd0$ z#0cO2uwVxOJ;HhRFdk}_y}MZJuD-~Ge_|1n6Q!}qFNKOoaHi{rIimNuM?ATZAk^8d z?1}}7j0YrzXC186Jgo+WoaW&vjFy5DflHJ2stHC+XnoVt&Iom6M3}kddRp%aXJ zxz;K>bvqdLr$c#)ZwqVanQs1EPgDuk*w*s|ez!13rk zsYEqoW4qN}Iymp=iT<)M0Njw9#5~y_-gW90?)aAG zA?*dRZPYXgfP?49vo?UZ~{JXa|mDg_KZH=}@0%v;%H*=RjN?n#gLB0aicc?w8gQ(%te+O%;=XgE2d-55nh&ro@agFdH{ zEsx)3vGb#$JmW)7nEBhZONy7(1j}*LvnUeCZ02+MjY;UIdf9r8NZwGVOlI7|=Lg{@ zZd%Dp-e24M%NcJJ58kQP69Wm>XCBU`q3;>MUBPdxNuD#ZU-RhaYnd!$4q4lONEDaE z@7FdpB)DDgbw|b@$$aLkH+$?`O)V6T{qs~+G^#Dm8)MV|U}tn=VkZ1Wv05-X7pNxv zz2)`9HWf<&z2t$k#W18fEups?I6{rcc3_p<7W>~S{h!>mOv^zRbzF~Hl|x&lp7#V`+o$}YodY-8++5RI zpobiGwH{C#89tA0Djtc6ZC$>Dl%V$gx#FPd>W~j17Fmp89ln^cDV3)(E4L%BBi3YN z$|%D2r9Y%;uIb9!5bp^Sqi?-}@b^a@mSN}P<_~yon zwiHuyh_2p~;&@9%Pj-L3yG-ZMKpw61Xtx@nfAkqJ-LHY6EjRatj6aQ~=i}M-Yfoq( zK9_X8&*@*7e7J>(TSQZi@7J#8`T(*PbEn=QRqVo+k?1AYs;5)ITOO55l=qE!jdmm2 z?GZuQfA1JU9`q6hluiqk`r$hhz$BG@-_5T@+iEd0T_kfP69^$t++Z`;XP*ZTXS>j6 z-!Dh+mRFvko3UHb6yGR7lwbeCv-N)ez#LkpRMWw!+bxml`wTUPtaeC3k>bgG$x&o1 z@ZVc&jw4*K%f4xe=lA@jp)@ze=SqR9jlLQp%`j z6HZ6aEMgmc#fhHqFB7N)FWI2QH~--Jz@Nt(Q7?^04MhvQ8@g;uZ2jRE4*nBdG*Ua) zC5FDy+Id%vI_4mkN~EAvR#jPXTnh!G$I$Qo zF-DH;IK@6*m|L0Y8DSC!m@{r&Hhpw!SkeVo`zo>EDZ}eyjrFvcfzE0wjw8|s^{Lu6 zFYOA$@i>C+Hi%Xvd*QHtMc^F6712A3_S!~LxM;V>2sp&A@}xHejY`xD7#2OrXE5@P z3E5NtE`pLjHN>!3OHCtM1zAOKJtUVUeV~FvsKIU1qCxUlbdP^MQ}4>^i`X6##KH4U zL_}MIH5<)673Lav!SZ7&J8D}Du65MW!$%1N=Ux#!!v^80+awkO z9_et6^B#hMGi`6qh(6ng%-Z_C;}2M}QIDy(8eBwou1lRyIEs4B226hEs`aR_zVzCE zO#ay5P{sC@2`hjxIB18txZt*ha4!l_vfr>&|Va}AB9OUL1#$Er3o)ryBn zHqQE;B<=i@eEPm@vA6X(c;h`OjEJK%k5z0FpG-4+Pw00CxmTM?^a){m{cz!ZZY^8# zlYYk1<7q41&T7x)36)|uGi&~IkW#5~#IuvhpL}=CeZM!y1HW*l2Y|K6dXUA9esNS6 z4x#mr8i0+z-fJ`9Y+6Hbi-J}qrqvU#n}gZCZ*^``PNYpov5t)*cFolt#YlL%d;4?$~phare!molGVpwpS*K@9GNc^p8Y&oBUK+){_CbO5wn~-ct|# zWot%s!qUCaO@LP>2>yqVO_U)E?$uw))$(4oB+JT*gu1u(U14*Q>$u?i{A33zANUYL z#Xe7KSp3F#MtJwDh7FKTRuI(FmDD4YDL24p`oO}TpS+lPefosGFF$It<55I}U-OXt zC8=0S-tSh2r1cp1MQ8@Te`nVyIn9#f5vrJ|jnn%ruPh*p-aI11qx>2tfTer<3(c)p zQY8GkJ8u=py>NsakwgK9NYP}zjq zA{(6eV#ufRwrZ9R+!w^Nrp3P1Apa;)-u7NtFSsAQ-whnG(XsqFz(}q05CL46Kw-*T zCvqXy+2%-{{Jy!}{ID;WDy7ea{TNU#8rdl3RG+6)xvVH(Q%_F@2an?#9$b{S)`Fq$ z;u&t>G2`pkj7v!!u4P!D52%vKVZmDZ?VbZ0Qo!7tptFp^uWEgy8fm5;`_t=TTPRCf z?EXa-3hteAB%=G%x|~4Ne#)4?jucefz?6ZTDP#1MjTUz8g=LmcY1?HRlGQ1fZLSsR z?#Dw?GDY~#DOj8qwU50G_x$PajDw`BDc}0<<7!5)C$AaU2(y;pF9eec)1ZKC(0JdI z)p4NMw+C4&Y{hQ*5d1}eki_KnPsUG2%4R&NS4N77#_5(5^ntx8e-N5eK9Cc8*mJo* z*;Dx-!Smxpz0rs}{#aDh+fg9Hvnh+PWhnVD&=sXG%f6=G$uJd{ev@0`cj%A2gTw?e zYuql6)y>WpeW+1Rw&rYqi&1xS%)?&cqGctQydN&BeXwXx`{yMxZ#uR$e&b{Bx6nrR zDEd_CPi*g=lnj^86rjL7qZ=Sp5l*D{!3ac8QNKk_bD@CMp-7u7#3xkSsHTb_go!{w z1=nkiEq+taP>I%GFG3@L()Si>zR3lha4Ev@pSItWi}cYCnPLB<%?MriU}WyClk(@P z%VFI9Bg1u6ao2?y)uS3kaX2?aVu!?h|KBg$AvX1!h%AAyf8v2NTi z98(t3Bohz0n1|dLYJj(>9=3vQYoW&8ok3{hM}d`I$zC#WY53KCPjIw71HDh@RM{T& zjJ&mBQQ{{)kXsqieQs1A(vTNizr>5FhuF2bX2SKnAsGyor9Od|?fovPW%?E$1|9fs zF|&(+{m>VRTEU=Tn;>Xvg^kVIGQW8C06rMgv8TC%K!=-W!f1)ds*&drbm7{Z64JaX zrc;Hbpr!vi!Pb!`TAx|DdkGRtTf5Ekm9fT{#0wS%jp<+~NF?!K-|Qe+7q>J$zWzup z!oiT~cc-i#^y_m^a-bJ>U;uDOIA3@D>Omr_xz9uP+SzA=Ns9Di1ca|@wI@NPE!6Hg z;WQlXnAr?)4Jk!kdFTAmm?-65yZ5=f5iiqQq_i_}U5o@asp$)5xk}W9HwACpB&-BT zBSLQ*6ZQ=LC4D4LW1}0AHF2~1eAO#b(>Y(C6Zn>YZl`8MvOyZAU%2zW`TKy68TZ+O zG3d+?Tws64)Jhj{W=tRDN3}r$e~;8T_{Ypq2S3z0p9m{RmeT2?Omu6h#^J2f((NMK zl9B#t?VBeT<3>GT#Sl+jj?K{8!p6lo`IJ^JGZ+LK%i{_e^nx5D&tP>0!=bjtiAzTb zpu00R<4)7r6Re$gT-Nc)od^XtyC35Vm5Vg>P3+1b4Oxc7`M`fEj%)tOf|yOe{heAq zOE}=*`7iP8GUtVfKK^p4f?o|652_NI$LH>NKd-F}vVY~0%X^r?TWN2~kt;F!!~OSD z-=gem_R%NZ779!50FM^IdMd~yO?J#jG}t} z+*48n8I%J_7w}XO;c?1h#VhPpM?%8CGn8|&Ui>k+jx+sM8SwimZ!T{{gXjRq*PuHA zRq6am26%(+W}-cr%M`-X1{I}j!ETEF36&Naw$E!x4TnF znEFfM{D`F*Um73 z1dl@{nN>Wp9!eL_@p1K>z53s?a|tknrT}%8qcH#fEC9X9%DcZfznn~fKVxY_78TqM zw~v?wiE8c}WK-Q@1^~^IGiAbacM&H)8!VycD{RmkFvx28LhcYczP?}5&0Yai19lsF zb2jMwT8$wQ7%q*t137ECIBhHnV8Uv99axv}B{k)BwvqurwA(Z_gBz3p7jDpWXZ5(>fGRLpFFE$+O?fr z#@w6q#dIA-cQmhqu`F%2t8wGj!{ocJF+r&4>YpGO%f_4CbAW^$P00KxDpsSiNqIQm z`$w3CVM_O9hoM)94Hbbk5=OrnO&IHu+c%e1mr>*>eYvxM9?$Y0Sk<}^sQNbDab$UU&I6M1&e(`29~BS;LRV^w@$-Lo12)?-)a?) z#BaWcY0}OruAs-$H~(B5>NbL36#jeY@l6>K6OBOUNQTtab?K3mvBH^@M08k{eW&q? zYws8+>_UdQUAEpZOpUS5ZslCywdl7gR!y4iT?BhW@99G>P8R21XWln)oWY5#p78aO z(IW5Uz%G>tls(#!Kpr@NNhZp2Sd>i@4tZQZWSg2r=Q=D(J(_I5kxe3St+7ZAB{%ZfN0wZf5WAi>(zh+jH;tm_~?cRO?-i^*fh( zNh@7^g3e}};jR7#p6N@%Xz8L*2{Eti;Dh+J*W2vo>&I4aM+oN3>mU& z{y7xA+kT2CL715P>v=Anwn8Gj;G5K`Z}ewV>9(|X)7{?FnB;1Ztkya8kj1Vplx9#$ zK^UXgIUI?g1I|GDj>cyA+)Ohs#_#k|LH26Z`VAjNEf=E$GJ$sy(heZ6O#%-_#Z1Lj z;07-qW#KN{G|KWLyAEofiPXWq8b#69bFA@IUSmpF^?L)iK85JhCaE0B4f- znd1YVzW0rtQ#fdwnGRq70$a%Q;@0u>5ApZNDNQhd!{&(~m)O{lS-v@HA+JjSP=inw zpQ0m9^BW5VFRxvxk|zy+@Yee^MZ~SPxWW(6r{TBO<&L@d?_|PitqU|q!pWQ5O-Mtm4tPYruud%~JEA zx5HII{%iU@m~(dw+(~6}Y`gE{6kV&nMRBB3-mR5MDB^cjZlh7;#?4U*Egly=W&iC; z#~Z?6-dp^+VT>m8bY~=uD|8zz#4v|q!}-7rE!dI&E*ek(naVqyXy{hcbAH!D49=k$DiR$+!?b{=@ZsNlx;lWzEOqoK)2 z;j>Q57yo^X0l$Q5C56PoAho5t?yC)}a;e&vcL=T@z-FhEDDZ7tm0_$n=k^G|v4DIT z_z{-TQk3nfhc_?JS)B*Wks@B7Csfy~m~X&_orv2(^1l-g=?~8n*yCuU_ZfE`@%rQg z1Vl8(uH|Rya~Qt5cD+dvLX&r`$}?M5n@#TOV64_}d$T7<(Cc72V|O}+F+yPI$DjXo z?&TiqIX7v55Vny(%p)bI&`wm&UKSo6=lnAQ9=7<%5YiesV7}M3;6To{+#eqTTh-8o zb4aEmX-~WLL7)Ra9C@jlB2^P-KmfI3E(Esfd*@3`)R8Q(=D6tjeedoy9^=eBUi*!L z_0ZgOx}N00L^iW>HR9q86Z13eNJHe*SA$j!nst8i(L`D~GO-SzQ9zpZ<_uP2$2aZW z0WFlWQrL_>Kv8le5gjC?D@da9QQ(ta|ORc6~ z{n5L(b#|hA%DLSFxi6m~)JwO|3OpUp6n;sFSzXzU%e0kuHhy#t)}?4wjN;|l;v{?N z-fseLjxrLjH!eI2YpT3N3@08!)!Zk1QK}tRRXVC8tkG9nax@DUJ6JpTZN=ov2)&F8 zxrF2UoyoX89qcJRd&lkHwr+wdDEkO zaOT!m$^?>2p!j=StJn-1Iq!B{C)zryb5wA+$SF0`bMpj8WPMbuT%cphsapEI>lib^ z!Rd#k^YHb&t{r@va9EbHJbr{{DBQ{h@nAI7MmIonoPgU`NB8v*Y|WgJ{7W!tOXV^T zYvDO(Bxor`m!*QV3N4MMJ&=Fbwk)SADZ9BOu&XH@kw`(IqnIaJ?RUH$r7fsIw??WB zuT1OJA711^?WKDSPcgPJ-gnn~K{L60tfF32;R`384p)Ri zV7OKue^)uCaUsRgISMVzAEOJhnecou38)1*VZ!#~Js+FbRh2*kKh%x%X zVJ-#s*w}^njYbEvKp`0kkRruXomDSk=z9h`2x@x@dJMIV4;iZ;_{Uia`?)Y<03Sv4ESXt?+Xucc{HR_HkFE9S$ z@kY|CZvPFYaGSizFCI!l=Cts|i1vC&Wv6dxMz*4bQnoMgvW}`Q=O>rmqe)N58d0iw z4YTW83Mu5*i3&V3MGQYxPVDkK9SFqk$yL?9;H$+Phmvbi6_w%lZjfg8U|`+5m>bOW zzuz+k`4(v=i5x+KiDCDH6a3z6SJ`0p1+MT@-*5)G{ecrgLB@AHu)>fRVT*`G<|$Oe z5IMv_lH)C*X8pKqNRIGX+@4v<`fKgW<1e%I;%Xs2`D!bt9E1CoKa2z^^_sblmxuT) zGpfbQodEUIc_X_k7jn;$FFOR})|&!s9SwrD3%2R$=ixtP!C{QlBRqb27(K(yUq%MP z6qyr?P#!8pX_<5BZG1G9F~djbpOD0Y-5$1kM0g!|w8N2m#-3A<%n7@qX~oVqn{>7k z{!y1UXv4F8)EhkiU?m(JZe<9OrCIY*W`1>l29@{G%GU1pjn7PXwJ0Q;TB zjIx;Mrcx!H@#`$|(pmcP;eNbN5`ZoHl}VL{M@IXT>*qG(>p!$hIWpQwGoN7$&Ncq_A-{52u3z4De$snQR7mwXL zyNRHWs$wLSz`*3QY#EQz(tU2j(lR0MdRHexv+C!c_3`wD zsPga=4cdPDDfiH}-s%MKeCkM#lO(N^pT zwE(ZIyEB>?#%S7lON}hSn)a@=bymGvBRvtNBBm@OMzYO^0>1gF_2iPo=Ox=zb2f~C zSazG~1*zC*EW7#4*DkhHWo|p5<;{HQSia0+^{fDY1I|f}B}fjPc^(7h)S(Tjg%$&q z_7m;(u-c&vj@+B_v(N5N`-#qSTKj^z3rTQD02nYphN^vo@kQXPiWt2*bqERKn^Pd^ z^AI*=N!~zNEYDF+g#UaQmWV?f%)a zLx&NNC)me-jtZP%32&Vi*3PeIHx6F6W#NoT!HvkPOwcgbI5r_C`kU9k)E2kIm_? zm2fz`35Kra-N=p*KyCnSb52f~-7TN>8gWk}PHt_mB9T_j<34O~dYKOi_Y zDkumBep4>Xzls(DO^jMGi?E!%QjaHf+yCMpE4BUps0e_NuD@iQE;dFDM-27taY2J!kHj9v*PPc<1q;5}$8pb9Y7^*wVi*wqZ2$jf5TG`otJ#1Ao;0j-w zRaoCS9Kd-n&m6t#o+gMons;AquFipUl{rBR=YfO{u>Q7*2L?sB7;u}apiZoK8RN?L z+-Ueg{}S#rRJZ8+zL3`EPFI+C1NfN6PoRD|Wr z3=bFL!pa(?%wO@bNzj3sj8Yjiz6X)j=vO-f^^V!F$}yQGAofNPuUBBJO^>Pdb7n5kuX6G!n0f+w?KC8%>!xb?fj1zb%6h7mo$*qBbZvz*aibs-=Nkky=)o^t zFNm?}m#0Jg_|eT>OZ$-Ji&y-;oF1f-DHn!syW)L8Fc5wrXm>g_QQLpya9S>VE4&GI zEoVhh9LOWlLQDvhPEno#q>r>KFCrn{Xr9#1S2PGR-B6 zMVla=E!%FvA>E2#o_IrZUyf9o2Tpq4KM>!%%Iu4rPJc-5C(QL?XXj|$%U|r-x>|Q1 zmY{#1uhj!8gs|CRB@%0-c%JIXjrl`sKHH|YF_Siho9L(0>UnSlhOkaB?Dj988}4*(UUp>73}^GVV=YeoN?0Ag3c}5a4X!$wm_4hKc&QXD zeEWU2Anm-&l#%h!3H%tf#yV0_-JvwZa$+^5C2CBdcxj>QbRAfCBR2Te-pYPI5VqYcL*%F^*vN0RKf-LnOu z2Vruyv?*#!N={41Wk^^Kz|Gk;PU>aj&&$a%mAGT_U`XN`Ln zqbQjv19%Yq>lVfH@!)X@oY+}3*xtIEA7)Tr3#)G5TKtHQf7)$B>*%2lj>&A={+xa= zSg*t!eL$}ot(V?sQjW+LSQjAnt0^^(IMw?$s;446k2o+tttDQig`9}kr0dN%#gvlX z_~wAot%r-#;rmgR^k=z9H>*zscR24>3^Dr6j5lo2i!ydE9V{a5%4dRi`(=BK)+^7= zo6lM9U{JAYoYULVFc|K{4TG8=ua0#ojCiTr==j$w64Sl15My*vyL=6750nEbfO=)k zZ**lSS27%n@d|3Jn+hfgMt}|-m+77TV-SM`&hg_?%05YVPXJiZ%rzMM*GD&>b*j`7 zE7e=w*{$W(Y3&%ip`7H)sa6G^=6DZ{hiG)pQv}FnijovdG zS*e$rd3;h4p6wJi9XorAIhSUtRfODGWY7c;d$6H@MD$Ryk+q$l@0x|J`qqbz?cD%$ zbgr>5^7ubC1_pxR^eyEL^R1#xCtvO47iL&Nwq0!^a+P^oqo zx~k0T)P9Sv#~XLnpwBh7I#`1CluLl=BGrqrJDXbG-L(2+wQb9@_`054bL^K73M}ox zmjWta8g0Mdj10XC1}gYu!C;4mhyWw2G< zbcms7nQ`i+IHQD#G+Y|oG~RuMS;6IdBiIF!G=|AC4Y(SRJsAQ#j-3U-PsVl4h%w#C zv!LoHp4=H-!;{?u|Io#q!;nlaiJhiD`Rdv=d|*X{{HVtwg3QR)^U(f4Mxuj}B}rT3 zUy_5wD36mE*HeAdUGb2LcarK-&3!i6X-0~=IJ^u3X)Iq2mxlMbTG`MuV3SDC_`k%? zOYKpmG}wL0%vGwfWh)W8hba=LV#%hesbi<12-2TVW)lYy{SR_xEMwTq+tguIub|QyYd~aab6o$KKjDHw1iDDQf#T!wzwMashaY<~S-h@o+wd2SPT3{?qYSai-r6SkiO zhsunpy^@ta$IyRvwJ^Z6Sx{Yav)5x5j&sRVuRdfl=XRT|pctF~#Pgg?>`}7BB z$+;J=XFWp?7?`G8c_b-suBRjuRoqxvs$eHX z#@pxaQa7OH8T`05dsIjPBRzv~s>Rk$~+$QJ_EHn=}b>i#SLYqpNYK$Hr6M8`f1HJ;021+WNXdX9JHwe<$QJF zo1;r5b3ggwyjnjpi%Fduo;wH)Qv8)hEa(^t5Ps7kBURs$eCJkXGiKDhG80W7Hg!+~7s)m28|dgxeQie>djHE4x*w*~^f%1& zxphkmIzgQ#!+p2BGioGJ_xH`kVrtpArca`;W^uf1x%WS53ml;9tGGOCKZ=@lrK%9} z^s@auq7r%rG-YxA7BXvq$V9?F(tw}4laHLKxBsA%t)Q$48DAfR@{D;^qW}0q&8cdn z{xcpzd$VlMDw%fa8&I3Hp>LDeB&_rJiGq85z;gK8q1wD1{!%?T)yK?$m772*?f0`Y z1vzByE%~2JZ8i9^L9N0iq9v5`XS?+sYp@%aF@INPj42MQSg{U;u!sI--#$9?HMQ}_ z%67`L7pXqHP})L)Aj&-iJ}sKuN4DfIfN)_Ra6=wp3LJ+E#g^SoX{VD&Wy^_qgN0A zyX$wbh$n;dok}f9N)HeotH(?uZDdp6mg+H|1^%ugRst30@_3_{gRhqRsb>0bB_+Na z!TLky(7rE;j@!ZjD|avvl9`UirOC%?joCrDuVBB#^h-o-*D7xxE&E`DVIYjp$l}Y_ zYrd7SQ_Ov>X&@jG>%vYQYCwLV!F3;k5xkdEU)Q*1^ zDAP9$7%j8(yD9Q7S&^lMf~HmGrM0MrYTK3=QaWxZB>59g)Gn50M@r$!P!vwqvuJ!=My}TM)AaORp-u5;{h5p&+5EJ{_9xoSj3&m6=r)g)v5NuxmyzoVB4HI0i zmp2n@Pe%DA18u1j@SsyyjCU@h=vR``cO_xc)uua{|9D@uy&l~wzU1*K4CI$@FKJmN zM>*K}aSz0Po$>SSIsS&J3reU_Iw;I!$gnt^31dzjZMNCvn9%wi)SWvutv1>GuGTB< zZ}vLvkM_)_gfYyD(#2*;bj(QFE-#X>Jicd6vYj+ixY!KFUx6(0YyZ{$-}+Fs)Xn2c z>JgTyVcw%Xi`W|ucncb5=Z}vrm^!LSZ@;pj_KYX|Bc&|R`^IYfUGUT^J-3ctqz=o}MO&3qP zl^TKd#=0xUKs%OwCK!G>Nftlk4xp(?_3>Pj*BQ37rQ2Ta@-Aif?!cv0r*%5!n`XDZ z$8H-}X3Y|4;P{L+mMux^-h1>K_@l!_!_XYiTcwDR2MbGwQhnDLUtsGUCLhX`FMNeGK*)_8S1Sx+0E36&rYe{uNkdbM4( zolTG6seL3h^=5B-6Ljo+$B%S@{LiVS3ug}C_HHm=&I?kfr9ajk+!!$532`kzg=k&6}EH{2* zQQWt{P;vEHQ&|&8uH7ELr}+-)b)*V4fni`YTiU|#mu5teCpX21iXVT+^R}1EW`SUY zY}M1X6_ab25Cn#Oq79Gh6_RCx(;lU@?R2mS-RM5enU*v;oZ(7%XgaYQ%CR{8C1BRl z*893B|0|`_UypF(GqWb61KH5_!AZHBMpMRf@G{Yt-9SeP@0F?eCX4rGnMAS!Z#vTN zwPVjd5JkUZ4V*=d0=e>x$4XH+jUE<6FdLP;k{vs#*@r0RrMN}?^7g>(*SnM&eeEAB z`s}!FxrW{PJ{;vAwh%{0MaX1H-c%!CwECV(jt8$SJ=atPU)e+!O#AD8GbHv|vmP&a zK&?3vF z>glXDuA;fm!qpU2vUuwuI~GN>__c+h(Ku4cyfG1ARq&G~Z8ogEXr$8`nm5FlD&RA> z^#*I&H`An%#t{6uF`cPEKyaJ><~viY1PXNQ>$29{h!E=%~~z+Cc_kNIQDoBps!&tOPMN& z^YlC%PrEJ3qQp$9>riV!k2QkKUDol$7g+pn9uS)`h|-mqELQv(m4NY%h1c@C1LLUR zk!!);4EZ-1LZ&63(kIKAY)*Y1s2;Y-oBOb36EwZfw+s+Unu7uK9_8{PFJ}6bHym-V z4kCF;ax>k*&3AsPAk1hM)=bK}s7*W#ZJ4=T6Z_6xO|2%@O13;c;vQ`&lE3eBL%rk$%K!461i4`ub=PvU~7D}IdVruxfhKe72wQ!X4559#DTVO&px zKiivHx{XUlSeQDYPsp+7>8a3>hR9Uie(bWaC!}Wpq)#ZUG74|Nu*REw=MBMeB=Pss5bxn zB>_tigE+3&7=-h51h3Q^vB(o_kjRAe(K2;hSg|;YI%Wf}lG+I4YVi5|(>z7V9`pmK zKxI#TvOJ&k0=#AwFny!<^CvrS#Dy`hjw;6G<8_BL^>vkPs1l36m+(gFik4uJ3}$xY zD*%igZ1=?Y%HPIc4#B6Q8}1nN^i3fMPvqOS+JbKBAJ+qAZx`>{J*lsPEc~E0j;Dd^ zd%7gc1=s-dbkstaLQ`A{EhDhsy{F@huZcQm&kOKmh)TGmzk1hkMkWM6pi-wquodi^ zQDwENfxei`WtIdb9n@NpyIw^w2IFK)_u5LU*Z85Odgy&2NlX*t(Oeum{%+j3+vY%f z`gELckuzcMkrF)mQcPfycw??O`7X=D3Dm)Kihug`?4^Ze9ELN@wnJ#k2<5enna9~mM%u)Fiie-?IhDi{mq@h%>Xr!%E6+%4 zOuLI-xJaE?@iUa$Odp;tHIPs9#G^4*jUm%4H`M}uN1Y}9g;ADZA;dPHyXH&q+%#k( z)EOrRFacev?r9&!@+X(@CA4&uo8R|)DT{gV5|}R3J@q`BJH11IwWxRa9A}L-Ba&xl zoPPW0Fb36i;zbJD6<^OoRfCAcsKRj+deT)mF=zEpcnSNc$W!I;_S2%vhkjDwVsnJj zE4)am$^B{ueD^S1e*Ak-4HGnaL@)@1dH~pm*9!dvOSSSCzG^>cPTN+lV@1AzJ;+o} z>(a`!+DFWu5{m69_V!$To%kYxajE?d`%L5Pk@iC2VBXmM&l`t{j6Zvt0wsB=7YG?{ zCqj5`UxW|*D5s%*yw;vr|I*B%f;gHR8R}6pN^L1I(x|;&qN*U>c$IKofg$&E%Hg8t zb0sN){21ti5QHyMH|Mb;Db4#r;iQuBAUZIyRiXLSjt8aavZ0U%;+ChG$3}jTn_*wU>`2_|A$)J6*dcco=S~<%N4)pc5EWYk* z2J&wS#W;!4<=;j|e%k+D{d7Qbv$!`D2M;^v;NW@u3^D7EtE)vQ;|tBknPS-o#B*6HJLCu;-ceT;J8aaTjkO%!gS0ei7c|^XGCas2^cFPCKOihc!F3cs6_i9s{pDw*4 z;&X-MO9d(c|9v}H9D?&5C5ZEvqCE&gL(U>3h0(}wB-*OdOv2jJqsa#Jh}h5OlNM%! zq+sUM46?|ou0i@sXMeyTwlP}MJ2r$m#iGAnB2tmCGx%4@r3M$7)wSN@K37nNIAm=U z`Ky%?N_O6XG`eYqb#+TbmyeyxQO1gn_itFHZ)hjp1Qz5*3y35#!c89KEES6? z;Kj{<8x}65%k20O5Wa8a=y!AUh6IPWTp{w}`hCDH zupNwdY*}N1ZLJDPX~#@J!ay&(G`S7dH^9IkD+C2~vSj14ubPN&URB98kry08z z%IY^ySNs1S8v((_l?6>;2U8>c{lFWln00ASnkd50%SCA6rXXJpsjgtcj{iak?AT8K zDY);rohJw@qdKH_-g{4@P*N6DIx|r+u{$b#3UXz~bTsky=1%L9GW&5Nn|w+xyVn{d z?zmglrW-xRp%r}*+<$JA>)(XUJQhWG16he8oD zER-lwpTi_iJl$6xYTwm-M7=&(4d+tLq3tlPc>FDS{udRs-;`5gAMnqn!NGF>2rydx zY!G_V3Whe+uYR5mfU=KDoP zF-e`7h-7vHg8jyI^h~LgKgq`fiVpv297}d?;75+A$#!W+!Y;)tLgzhJg8!1mBA`nK z=E0{6AN4rG?)i8an~P<`WO?KcE$h=dc6=BZ=(*4uQgH-t^uf|eIv z#^rbp-MJ*a;?623YO4}Pg_XJK)-EOdd_U;2H}RdA*$a-za-ly#7-?ihpx24~7i{HX zi=raC{e3IH$M(B@$g|8$>*q3;!me*I&JsK}0ctmbHVf+LPfnQG7Wj+$NY1fp`4o$C zAD+8cdEBA-is>idB013HE(-29jsLx5S@C@yv@XTg)nC9lUqi+z>N^f(C0&Aa z+y~!M-uM3Vt-J2MuCrW+wfD21J+o)e{PxW3ImMLuhg;Q}zyxtjM^mo__%dBJZZeG_ zwG{Gg>pbum<0orfF@H$_;~X}%$^Q&N&?6n9GSrb-Yi2a5J1}`u%pSXS0EZzO|%nAcrbRIoV@B83;Ri1e+ zUaeqg4i$z}ef)?cS3zPwU-5v)JvCQQY`q0j^> zrynW;ZX;-kU2N*k?&^MSHTSzl?zO_F}RtMDHAhDQu@tiUS#`Y9+ zx^4f}`T*tbDuJ&2yL`4rEf;W*`0UivzUrQ|dZ4B@mD*O?aOdMcned0=1^#!)Y=^qK zlRVHDkRo_Z93We}Gt>O0sL(+0Z1Sph=@`RhnXS@F_k+wpvYjTV>SPMAua!v^1(#m) zb*0ZBl9GZ;G6|p!EfIdE|NNXjgi^MekVW) zJzJW=l#BtR0va^*UzqB9RA#7EmglZeTS#D<>w03cBFI>i|=vSub+SejD|Kn zcg~lYI7aAuku{|uh6H`S`{}p}rvW~fOY7j;A37Hd}0=aDHQ*{*?%8uNL(&#k=ru+jpKk*(_+KD!umzuGdw6B}JyheC` zTl7PEP#v>>S+5$)dGbe`T@!X4#d?CTLUU*VrcXpCtT)jraRPj!cpCv9?631(TD>=@ zO&D5=@Tc>+IUBz<0v$&w+2DnLm-MxU~Zu@h&4<8?t zfVc=sM`$KsLL!?%K^y(UHvLVt$iUBxsSIbJHW07C>c5j7B9((pjtr?_x2?gykb$F?txFuhR&d=5vgL%GsQ zKz%5;2pIkc-9&QiKwVuJ&kB{1*mf$S@byI@#HutEp!k4*_-~w!gbuA67dvdiiM=pB z(|M6i23`NrvM=YsaC4%h0yH61Aubwgm zx*d9fsyi}ifII8t-UvhtL9(^n%D5qh_E|)KxaTI%m8A_o8WQbLHlk)eO2P6X$Se2~&YECsIpQnp?SIjP)|qklu*N{y^d{I3s& zg@BAjM{8TU*>Hoe!QdIzDnK9_zuyvXx9O2K)%-pb z@y*2NYcFO~80wKh?wGtI3h9we{Y>@f?*r=z?ptz+U+>*_zY+!kY83@ijTQuz?Qnwj zT*8&sU{$WWbu2}68KSocB;6Xt3KMw}M*C|DF()gzU3X^U(63Gk{lbkjP`nM%vYo@H z7(5ccF@7D=`ST?I&*)18_dJ_FUUPG{Ab4N+Zv;tB#4|&i{SMJK;p_tOvTgWBV1>zk z(E$p=Gm;>Xlg0QBy@j_ZNHJS5Lr3DlP5{z7?~CjmFf--^OPU*^@0pH&b0eD}%r2i( zdh7_e(i44a@7AVk53CS>aGne~F*vqEY;#|GZBJ)yer&9_IO-kV{P-#yb0(DLm#Y`X zIbZVpgq_x8)``vm-_-AX`YX2{b>6Xwmm?7Xd`Oz3^_Y@fQU=Ou#4YKK4YTu|}6zjrIVuje?DG{yKC1ZRtE7-5&~vb=CL6fmacL$8F=)XlP6*Z*Kh!ijp<6 z4RhUo!Z4?{1+jE@a}4lFu{4}((P~+;L+13>stFl~)wT!X@j1?q`_xmg%ptb1&bN6b| zuAg>{CPlQ<_o0x@&=jF)5$}m1y9JJs`}5=fZ|I6^CAIMP8m+<-C_$rl9Juh{;d8uuv6&7LgFmgf5V8Mlk%0_@C;)`CSc{QC zdr$l1aR0UfKxU)Ny}Ds91q^^W(+I}z6haCEH<%3%QnlcLE;ySAzt0*-b-h>M3Jib( zPenR`e!ytud+X{=g$*e1&oEv<0?@Ck|3@+$Q9OWr_`gB6{K|UZ`=RqaxiJU;xkSeS z`VYzFy(hO18mM?0<0-=Yklc)Wa!rB&a{B~waDIC$5ONLxTGRbslRYz_I@ zn)knJ6*+jtWL0p0ZTkMzWA8;4;1!D*InoaX@slIvYQQM0wNie;oC|2fYnOxtK?Z<> zg2J0b4>|ZaWkN_b+M=%~-;axJ5f}Bpfr>JG6ui6&#YG;=xU$rmP8Wc9{sVBF^8{L> zkewHTEP~Qa=l~kvTy-&iUzD64V96Ygl>`APTaPV!0C;Le0Jht;%7&rM`RqEH{w@nZ zX%;|I2Dy*Wg3@CJD;^;G5ix`(VcjKh{xKY~ink~MXme{H9Yr6EkffC3PJ`3|S#GIp z37N^%Z@tBn=5h^;hsn(Z`@4KVfYN^6GF=@&N);Ik0iY8%nT;;u@61+O))HtOrx2^k92(u0tQh*Rve_;rL){0DJPMp!tytl0%#S7i`FgQVdzZsnAkN1RZJU{m1RvEFp(x2nYruSqV{f;HIsyrNSc3)S*6^dE>m= z*1_ohRPI|4Y5|*0&(dC?s2qK>Sw|U%{ivJW=yGq5k4lv*`zc=C|4LDUB+`PQjAQ(M@xtEjTgp0B!yF;@Dil?XOX-t~ z3pD(@ZrcQlUG7;yr!{!qEB4q`qK9np3BRAXTiqNpP($dyu|XW?JNhdwHM4+dC7*M| zn4NB1`T>FG2J$Wna#~>P7-(kMMTN}3msHj zhA&w?l;e?*dms{0b@3FyhPIeW^@>WlIZ=spl4Ap9%2g`-adrpjA|DLwa|fGh!oP;7 zO{WZ9e0>-ix4%wSEG2eGW{JL6^z9dL9BqFS~MC{G@%#?r}^L%0RcIDkL)+_ z(lx8Td`KagyYahlLV&45DAL%so00Wip=K}}`!~F_hQ>CtFM^9L9?$qFda}_?+dAc& zy2|mcK!oxEYnkp@J3l81Z^t+F z@8*en>&2PpxC5M-?0Uj`AJ_ts93yDEt(3P)y1us}UHI+KnH~eG6P@Zl*8IE48Afm5 zVi46^Dh$fcYEeP5ClYf&x&s-WKbY=<=}@`Rzt($vz!GN5yL~!3VQLJ7l>xdAKaxkH}_@ zqa0x#wmXF1M`q-ybDp*U=kN+{wv6A|43LpU0e}_neMH74Qvl6)-?Dl=7=-y}hv)Uz zMf_+%H)eFKXb(F0-$VSXF0E!qWB@HVYK&0--XRcMzWqCrT{W}-zXRY7S&y80h&Xc= z014K$f5DUuz#vog`?SK}=cy+_kJxvfYSQt+V-4ue5_^;L8 zf`4hi5@CMtvOw0X1ug^pK04_V7%(K>%LO^|4_g5KkO6aI*B$*Cb@@#S18~p*sR73a zG5z7@{k72}6e7fM*g>-F-vjE)0Durj39<2CM;Fp1JP;rbU0CH0#Q_4|gA5P>eIDA) z2jT>5i-7+(gdN_^@?(^gRJbhOl_Ao?ikd;DCWu0cs^^XJ#PSeO$VRbd=8k6$ecF5L zw>4Qd?R(JrW9qoo^`Nn`X)keFR>4@|t7o@il&V9o1)+m6I_P5oX%0NRx+uy4e=$u7 z-4J9Hh|!Fn=opyqVad(&q=bq#_0V_&PklXgTH+X0wfxSszCoxLqWxsE;eLK_@lYop_ zkGmu!06Soi8p!gD{pnpbR=KSG>iSDEUnBhK{y*pb>vV=$AgzZ_5CT8W*B~XoX{jdn zA&9~qm%ImhkuMhKRP zFoRO22-lwJO2?4lS93X zJsMZ@dxv2~{CqdZSq`mI(fE_X(T?;)&?(+<5s>ps$1FML{P-KkBY}Aa+$r8|D@nFd z!$1XZA^+enXS^%=ob4OAnkiWy6u7Ft^c}RR?uFOA(UFm5#-cA9YzaUw1a&M7mGlow zXb<_)WW1+obcLjqd_)_I>PhXC%i=*g@&%rVx5Ezs>F6r~f^ zw-v^}cga6YHU#M*2pq{&f%diDvEgSI6Zc*UfM@ayI>{M+Xs&!qJ?5GI76elr?%})3 zXv5!tvI8cRl78}5D^!(*G~Mlowl?B`EuDVsE#9o9m5b6znO@P;NUUR$|9n%dh=@_a z3H${KZJAVlp}EiGtGgZpB94=I8Ks1<@ues2YAUebn%T|*m+fZhvf*^dp=yRV0zW)L zhP<*!6uyIUCr-I(udcUA9S){MS>VF(Scx-q?&-819x8`g-%`V17+kjfj~9FN0u-7D zWjkig`Tj?9l?lj~kLYX6)nL(oh+#XsOaMj4@RIBX6Z^k}H0v2?7h1T?JnL%kUt8D% z7NO)8r_j7=ao8m}deU>Ib%*q+vO)KIooHNQcJ!>yJv3X|Q~UX_n_{LFT~$VqlyrNU z?>rlYRrTFnVICP1swmfhFZViOXQ;S4tC3I1n%q*_>w$ACX*xfi^zd}|Of>dh<-Q<+ROXL4Bj*jGXlLYZ2gT2v3{%JmXMKT z_FNE4?{Ml@+up*7mYM6WSRUVXY(h{T8k<6+@6P4!3uS{~iYv8h_C4trYe}L+Xn~~M z&th+^T3-kG>#7)i!nNOtsa;zhS61q9^kKNBoVti#TUQSxNz9)K>u9gcm${<37 zy5Zw7a}7LIu&s{rvUwlP+vl7}ejHV^--m&p^TO`q8n- zk1;u>_KusL8k?+`0{LWUKV~Tx~+#zS|N=e-VWSHg(R@Ksh%AAuqTMUkp~A$)uy zRL^iusCz2&Xm`ttwD?}gL=}|6*0vK%I+;-WtztiG74{u?#D1kHe(cm| z%hDem_+sxpt$k$^AYg2(v|k)kQMjKIy)Kbx zNL*^={nj!(N_IdTECq+QQG4Yo89G>kmuVD|(EdF>$C+>Vq?0Vq02iv7AzXY#qzc|Dt<@WPZ6TrvB5 z6yA=5ae10jiG%K4FPcZPo>a2ANBP@LLdeS5sFq~|ITLUzKRibZ`p2vJ2tBms&m&zv zTF~@vl&<5x;c_!F6Cy1X4{BYXd)xOa1x6+UvA4mKIl?RDxwbXa)Dc~wIKCLOroFT% z|AGgg-{#OZ0eLGld3X6De`9{@m3oU?6tUA+H8mGKv$tdO?#(z~DasztPtMA!*dad= zuG?m?9@YUw&S{nQgJUWqKQGbivgta3gIVgpV(Z%sUpAk{w@xu#*?t(Ph50Nk#dy-= z13AZX-BqWoQ8m1{`~m?P!5|JxB34FA)|HRTit|;IpB|S9yhqxIMmXPV=ChHjb{u0U z#pM?^Yb7IA(X|h%tHypN2leLEP8@dq8Ez)4^&p=DQq^g%`m+3yJxNN@fvu1)hASPV znRHsw=bru<(=WAxwz1EDFx{50gD-0@iXDP4*^-KIR?YF{_+1xda7BkSoPF7IqIzE^ z2N`m#8FekvsCl#HfznTLj8Et*+k^8$By%Z2$~dZ!G67fg0A9-|(+FGW_$c@VEAEB7Am~T2}%$!qx*NgHP*$v=5kZ?7X zVmBAMei@^=VLa=;;aJuIdFw1rlZ~&Oj#_GxyxPSg7>d|p!_>Y^Zm31BXi@~>fzz@n z6&3>jWvkM!z3YfWtzem@H7TXs7?$=~C9|Fj$K?A2N_ZE_cHc(4QC%r10drq^8M&~P z&8F`Ga&nW_cH>zF*#)RSF1^r5%T4BY;=_&W`JSj%&VNrHC3ZKVM2TpOd6w365p$;G zy~ZK7-@wWHg%xd{I=K$VWwPpS;()IJ`8A<(f6Ral+NZ5oIR;NxInq~}t&5@g+K$|; zNYj6;A(ZIJlGE25t`F6ea9ysh*He!=2q@dgFhiew4t9*NOO21qFkEJIX4A;*($own zytAbX#qpC53wzmFM$a1@E&VC?NpaqwBc{H047obuGMlxW?H(k3VlN&>Uu%Hu@#>P| z(0R4u;*!=Sihyu_$D8Z)O2r&4%a%s5LPtk}kvdY_QFGP*Ob#AHJ)#Ghz9w@FO zE4`z>GK01u2i=7pqXyw(Y?_oITnw-CR!hyvNEr^lj&^lhV*nAK<+zCQ zR@#024$sS>l39QDhFbAHvt__DimVcPwJE>9!Zt3dJYF zaw||v8Xb`hh?zqc%*k&#C?MQV<+mg^aAvi`0-BP~$i)c)g@;i)GkwiOH_K5=(KtDY z-?RuD-SkZ_=hz3vn9-;))$MU(nDL_F;XyTv8RBj-;^F-%+vRSkCLqtj%nlOov zYFBCEah5e< z4wpu?;e||8`RCKLo%+S;)(Ntl>ov&T+&&K7&94=RsnLs{x%EtJU6I*V*|xq!>lshMqyF3pjdfGi659ZQi%vE@*~*`wG*x(RwB?f1ngG<{OsLCZ2(j z0pW~%K)cAC&DC$ z>I+dwVeZ7U$Q(Ohxlj@zw>;F5qenCms!A9BzMh)Z+0X6vX&|L7=27 z&B)s^bp%47*xP-DHaL6MPrPRBQ@!-worSmkNfuuVF)X$73#<}*HW6BP`ebY)KA+g7 zg)7J%ZWTD%w5Zz3qi86}(JraH=VedAN>Oh2W7ZgCCU&d>vdFmtkBd{zVHEQ$UrwvY z*}`fkk(jj?`Yz^~eRpEW;(>g8;@_R*Z+Hi0i^?}6Qa9VOkb;tQASC>JVq&14TYFW;z0rrh=~G13ApqIg(etLcJ{S3_euH#>Qc@;kIWnBR*KpS0Y(+sI z!`MWTojl}QcJD+s-`X2*no|3SUN(y;ArEHHq;ubtz4)NJ^JOYxh3L95vW+yG-ScXc ze)>L>7z5023NAgDikgmvrf*(AFD?vApWZS%<|xFbd|+NK^e`CJm2Cme97foqU({R+ zgnyh5qY#t|SVAZ*wLRT1Oe~D~xP>;!-|f#Pq)iZWJ=Wxad@8*j2-ScuSHp&k%}13l zPK#q_#2`pMW<6iqhw&ZS1>R{IYTc>(%jPRWknDUuClZ6JP1V+`gj0*HZ`Bi8k}*2= zeIjfIWFM?@osR0qh(EBZ=%EQxkh9jkE}ubN++-)Z)BxA#NykZXM3^Y!_&Y8{K7j;m zx|*T6=LUpy|Iw=)@v^QbW~j1t-Eezx?P?rO@1=c|a}{e}c5!djFLPUS`tf&Q;48A@ z_XiVg6Gq60!?Ib}%_Sr*e-v^?kn52d@sfFMInk+kH`1T>O?%_rW&<8E`CYd2|t_A^X0|P)Us!#H;+diqHP=!>u2t{ zH$G}pl9*;0B2&)RkBcihQjuIaeEH|r+GVxP+Ti5X)Ko^$L*Dyq#Te#eKT1!_ecD9o zp+Fi+$RGRp8;!z*DOq&J+(>_?y8OTdv87{7r@j6U3?i;VD9Mp}LeZ{}9xeW_iQRsP zbFpC~Mrl;aGIv`=xF%_X3pCl8N4ow#WtIqJL`UsggYxzQ>M zGS3t4RjH*q`|LTqsJ*XnTli(e;IZV{efb#MByBAdbCJc*GWArza(+`a~bWU2Gv-mj;;_-Um@Rxa!qp zAe-UWG;x7J@L}w~9>|74`_c=a zD%}BL;BRZ` zk-Wm>D4UY@9&XXEHLfTl?Ng(gYPQ$G>h6J7f0QOOY;&TTXT6P5Y}^^P_1LbH!6!BG$W>`lIWJtgA`U{(d3~&|J@VsV?{ig5#yf zdibmn?pakoY9$ab0C;P(%oW@uQJAy z`oe0$J8oyXSXh6;({xN<>kpMf$O(7XxT5L;?{G$-q^>Btu@$2BLujRayiU)PC@pNs zM^swDYg4dpXG6P0UMXzT@`BiPd&OHPTxqiOa;=45i(WC4y_DtIu#a=7H0$P8b8f39+&(He8%Q9F(s1=*)yBJM=s6 zkA#D26ru*i6U;>WB5FJD7e_CtK26d~1R!IEyX`}^WaSlhiao8n@Y1<`JVpNN5 zSFS0(A-X5#8;Vq@i_afiK_fwufee!r@-+QqN{blb_ zxwy?N7v+05fv>JNT;uiXc2EA-#o1HX7S#UeXT(^W@k7kTs;5>5CZ~(~ZPQMCCn@vm zD^L;vnM;tM;X$^4`9@Pj?l7vd)TZzDoFo73fCv1rm(dXX9XFwA1pgJN38ZRQIuyx< zw9EcAxbvsm`v3DM52Mg582Y=w^xUruDsjAhc0RhjU95Vn<3xNa26 z4k}HCB{hg-``bbXWvY$@i`hbO!9uu4)g8EF8;q#BP*RQWkYZZpObB@>8chFkdb=qE zWE%1d@!H_uN6HMV&}ftv!I^6iNLxymR6ZK4B=Y@dw&OdaC6nR?9@zd@x7a??Fk(6y zq1zfzb5h-gX$FF0l?zhoaxjY}spqawF?p-_)(S{Emz(7*P7rw-VT?})23Y%vSTxLy z`Z^Raxvfelv2+E4s&^ATbZ=LlBnRz>?`|Gy?a)fi z2!Y2mIW97>I7JGS9=Dx)V;$Vdh|7)!ymJJ)02*4B_z4EBij@blcxK`^AalAE+59~9Ht>xRP=3FiihU9 z+ulaR8ZXUa8xf8b;nDSgH4DE;cu|J<`N=sEa&%hcA|=>;Vvyi3Ifo^H%t95M8KOB0 zyI)VkFz8U8l2OO$$xV1o9L`!4&XGElqfq$MWa*J4)9nUN$PA-Uc_ z!0-;d_oQ|x`4qs9=N+Dee(Be^PH zbFL_*%uzEe$s9eMilu>ka9$U$06B686JPB_g5gda$VwsdYAt2@S;8wQ8?-$xuB6q8 zet|hP_?D6U4_qWy!|HA`zV&bLA5HE%aN+lyaqabEUm0FVFY3Yt*F8>LgWyMX#@;E- z6S8d;Lv`GewNV(r*>gN|!8l&`X(OS2vn2&tZK+zDvnhuip_9|yHX)#w| zE#Q$_e&e>jG_0vf(F@^vB3jXaGE_cs3MO4b{SxRI!6PY7Dy5;)-NHlF2W$6R-z;6#H&x`+C6}bh~k|DCzWa>w5t)FUj5I z)fvlR*V@J8LS^rV&wSPD4rAnR<+Ebp!H}?%L>a(Ol`5IP85v!Q8C5GXbehv*$V?63 zY)eITcT!<}@#J=WS{{wFJ9r_ov%yl5oWOz&zSWDyZ3F96>Fykf@xjOF+V#h!8q#6r#;ylt7{?8RIZOKwX?@8(IDF4ZUz$ z1d6d;eAin`=iYZ8a?dZccqQZPmu6=>)9Ij5ki^L)-3CijxiaQ`b1Lo-QlG;#_)T5M z{3sLML$v2Bc$%J}X{rLPYhggN39DOH?^VI`gsfM6h8uNMoQb?_k9fZpB0nU+UYeYM z22p6=GhS^^kO^`huW!_&jgYcD=&ZcEYi0a=Z9E1vqBT>F=!+W8k@%I*Qq|9ZrYuOO zAUx5;1#>PoDL*iVZmzjBd!z7bD?7kLlFP1kLF#KNm%8ec=U8m0RMRt!SPLe9aU=!4 z3PT`ei0TY{zYz~H(OCFiiPOhp1__(TxqKV3yJF4%lv+eM9wP=u=(lDW9}h@-jef2B z1%f#icCHmC6b8wdb#C@$XCzLXs(KiIab-T2QG!(ev)h@py?Lyin_%e;{Me;=FXrGIkh=sLB!UTsY=XDF=7A&OV zXa&ZcJ~NG-X4-aP%Vn9g=~Za}KUH_*s?@3Y3v&&4MN&G)g7Z)Qe+BE2&43u?qi;0> zzCW2Vg9RCn@!ZvgzmAik{kW~2tFQyku>&z-Zfdo+v_>TiFV4AI!sW6^kOcf%&s-%} z-LU)QA_ZFkFkeA~vWJx@G#$mWF@|0uSIw5%a$i)=*kiIv^W3pmA|6}BX9ji6%@!~V z2Qoie2=!Kk6y#RWf5!*Sy0#%#_$j}sD&LBS*laPczF!YDQe^FrkFd!bXY%=m<7ZAZ z&Sd+X=I4-YsjMA^r>asDVT%6NSP z9Yh@q7C%+4mBH!B{SlagX+~gKXYumy><46Yklw!9qt||p+<)eV48cn%q`mNB(o~KV zNOTiYE_2#$CfN}~vOth6@yW{gB-=%J@MQ6=I+_WGoJ5COnEen zCuOthCdKELO@~oT606SvBP34F$~~d(E{}4f;jp3}LKm%KVP|9@2P+4rol5wM&xe!9 z#>1OhBv7WX9>sN=yWU!pOzsU?Gv1Xegr=IrP=9}22FjPry3rH3#wGCeM z{>Y#ub|XA}!gZ;@74HToDtVWHu$Z*adg_!c0_=#d?^vd{*o{~P#T5>oCVbeGW1{^B+}h~j#toz;7(ocM17zVklkSxNeFQd>^T6LNRu3nPYo zhu?-@-WTj8sM}9W;Q#z>5z7yxT?VK2yhjUCdAWG5UT_|bUmNQ97}L^fS;pkX#n;WF z<D#)Nm-$TQ<_>X|$H&%frZ`ElPaL zDfZYL$3t#CPo{zX=JP1C?EKNIZF6fyuJ7{JAE}H)-&&>&p&l}K44MAP3fd>_aZAOA z1=an3SaCyYXd0(khgd>Gd-sMj2c74YmhF1l90Swyifm8k4g|?(N#Iq@wM*X0OEfM< zFT?(ob8|uhM9i*YIqQEKt_SH!PznJ!z4%KQ?SubLCIA5QV?*kOJ1uAVzi`A~dl*n% iEbaFO@&Edhx5DIJhBQWs0*Mg7KUqm7i3%~pp#KG?=w4$0 literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/linear_100window.png b/docs/reference/search/aggregations/reducers/images/linear_100window.png new file mode 100644 index 0000000000000000000000000000000000000000..3a4d51ae95603e37953a99ddfd7942e107ba9bf8 GIT binary patch literal 66459 zcmb5UbyQr-(g%tR!Gc?m-~@sb+y)IA+&#Fvy95dD?(Xg~NCJW2?iO^=!CfBbe&?Qh zPR?8Ftv9Rxn7zBJy1RB&{i>=vR9;pb1&II&3JMBEQbJS_3JSIp3JN*~;nmBVQ1K2M zD5y8o<{~2Uk|H9c@(#8p=2pf~Q0hU6?uc9}Yje-Q>*b%@X}cOGAI2!p`Nl+O=lG`B zKgPzqPBfAr3la?zQxXnNFdCE(!xk=84*ukhRtAAQdp|?Yfe@c8@8jJn(ClJ1=z7sJ z+nEW9sG=Yy1`3-2%3ny|$P(6CSDe~ZcUcPxn!pdvKrnWTm>|#C+#Kntzm6@?jAA>e zH>vtr1M;i|=HnAtf%c=vINZV5C;qzb36-<3A~K*4_1Oc1+t)~a09RL#nF5~-0SdP( z2;|DVi|ew~Gr7dmG|Pi93kN0lkvtXzowIs2M)>PE@+KEmXc7E{&+7~Nj3@x&d`;#K z)x8=FK16-}diC2m0gbS2yVW z*5R1yIxnM8hY8-?nQGS*@S3N?(gm=8_p(B9wbBEiw_ z;bgA$+$4j9So2Vcrg!$5xn?!h^hVVzaD-&(iJ7w7We3^^BL1Oi`4<{fP@u-*v7Jc1 z|NC+1Hb1;TiWw9DtibvbzZz?Z*OK`JJf+~P)ov__hopyEyLlT#;VceP0xz)`YJ^A2 zV$jMyB02i)552unB^WpBW-%+Rn=fbxsG{}x*LgGJMFUD$rFCY;4TB`Sx@iI+gUgyW zb&(37>ID;;YoIff;?M5uuA!&eUa#&8{Z+$PESo86-2n4T(H|Xs%wWjCVb4}m^4c2O z-H-U^z6`t+fBRn9=WFmX`oV$#j~Q8DBa9|tWz>myOMdFap&$%Hbc85!rGJDBbnFxL z_meLAr_>?e>*hX)K>v-FEQ46(NqW$d-V={+N_wOrrf8v7>Ud1C)KFav#P!+4?L8it{}NWxWaCN(h9QvaivdP6V62xg+d*r z)UUBF(ZZ~ac?@&>stR!~I{n*YuhNG5Nu8T_2P+VPBzmkzub*?{{&ebw9Z3^O2uTGA z83`?nCd?@eGt485IP85GJIq_5lpNU_svC?bEC6I2sA6YLXO z6PjtVL8M4ThbQulFY(8yzHM5ifvu)cpa{kq+&y7A_enjN;0 zvJs;Z{*q;%Z{K3yXP=1;n=OD%lMUW1&)mW6c=BR=ctUX;X<~mo&dkvK-0Z+C#2jz$ z4cG`w4~E&3ozN-vl;RYL&d1A}%fHT%D_|SwGrPU z#3Fnm%p*AQ+wpf(!KquRhpDKn+bsM>1g62MK`D_buqjii5N0%%>N>kxvs$;hf*R$z zi+cKchZ>7IkGi5d)mqfLs=CvY8$h!Q6zWK^|iUp?H=K3*vW``2{7RLgI zvr1!{YRE72-rxRW!$Rx}>Ykg!Nj+uH(mfE8j7dF*PwUFmW&;l&MrQ zRGUF^sW33?fX1^jJ(vOy@ePpJsJ9vN>nLZ zNks{5S#as9!eiOH+?{F{9cz>56XG#`D$jS*uBGwQ)g{r8?UjSJ>$6pR6d+2RSnLl^ z86O$65ycU61_cIzNnQd{JfxW55v3m3(zIq z#V!i=>!>$uga)r=UbDUqiEN2jjvS2KiA;c~XcaZ8NviIuBo)RL(VDLF%=1JItv2;W<<{vo_lvT< zjLTJhSwB|$I+uMGgyw!_d~eB5FpSt_rFNm#E7mm4s0TTgo^Jp_-V66d+SuB0+QC=I z5P^>8J?JY~Xn11c0&WQ&w>g~6{dJEO=S(9ZT5^Uq{MPOEk@ka@Y}e6L?wxrpXA*P`o+=wP|1v5e{{$j3EXpJ1OLpP?t*r@04CSR$A*m{b2Me`lc! zp(i2hp5>m9FZaT|B5}e$&`OC1foyUdWXZw8x%VQL-`~WlWmeh6o?!igkY9;yeSC;$ zi0<%AIkp@BRYg-_fczo|^2tCg?NvKnG}G7Rl6+IYP& z7=;yePvT3=^wyWrhpdMz7Z^*&!f3w{*b^E?xlidZc_P^uAM_J!w6=AO-I{>>zDto` zk$0+K%3*R=t~Gm~kHL}W`eFyanssF_Y%h7#VC+@q+rjYx8eVZ%#=B+FA`KH~Tw7I( zS>dam!(#3;?x0=7-S1=3yJKb+D z2f?Ud=(2cz-xh<&tfV(4VkWAUrIb(F_-p-Y2Clk*_?`N*iZxtr;TIi8uT8T`4%>GGL1U((xkO;|mVRgt2Qy~w~Hy+1~Zz(svKF*~i|tI_W? zQJXq!oUPuP)mt3caGLv`sXyCWhNy~9VO>(A$f;z{I@7w&@+o<#t`v?tKev86zh4Pk zncZv@eTF1J>r*7S5>kB!zpVwkm#)tHif1M^1Gnwkbb=+}aveaGi z9HZ>W?9&{XjlS7cpU|3UR@n^OL|fbIJn*nRf4U63?=Aj0{`0c57p)hKdx~#ro3)M= zrT(rSL~`w&aff|oZq2yIy0ZJgM6P%2J@c^04&f5GjC`;@zb_@R5~yMrU}~V(WQ<~f zW2|H-eloh(Xv*uDtlh8S0pYerX^pL*uarC+pB;>ttnZj0gnnA|!SRa?J3E6}zh{T~ zDhtI->Q3bELH&HE^Vy8<2&$VnE)bZzxDIGQ5yPU3Fzl~aJVBp+KT`S-YFc_=wgaXF z^W%r%Q>XN)=c%X9n{yFyNw@f%9R8Bo@*I_;{>QmA>kLY&ujrmGf-J+h=}>vYl<=5c&yieiD%A3=r%tpv*|aPAcD znZ#7-v&g(OxS%l)&y(R&;pDONV6)aSboSKvr8`m3z%kx#?yTmD54^o6b5?u0ve|!t zb{h@P2B(WEg>Z#f%UAGe2c<8>2Za#HHi9O6n2(4M2mQ#Y7hK{Ip;)oOlE_uL`R)`I zF)LJ`T9(o{lU}kjeKKP)k;7^KXm5`-d#Wp2XIP!5Ii)qFG3^U*ETR2Apy%Uthv982 zTw-z>Qnn6p%Lhd^T|TWkc_ckUS}eM`&SOOkcZ!3Z%)!nw+vdIFftAKqJdbB9XLlLz zqx`nnyB&Bmf@6US3wm2x8%DQ;mAFNZhLP%(omvBU%l^Um(_;S2=N!=W0b3qPZcvjcXvc5nsMyzQT*t}mZDwjV-l#k=agd><&h z$;@IKwt{*mwk+<0w{~`}?#@M;<$Y@C`Mz{UhJNSSm_EhC{szz=)KQ)kRk5HWa9vby z6TTA*)_A9BVvLJv`C*CwibYn6k^9)2&%Q(LCd~!3qlaC9y@8>M&VuQT6OK8y?5&k2 z?I1?L@SVC{d{hJYiA<(ev{d%CbU`G} zj=%%QEA7$ba{;DtUFDz1gG>zIPBsNDPxt-7r98DhZz{PejwBB41NwWf%xZ(@$cOam zvNyJ=YqDap)rt3UGsNufjgOZp@MkFSBw1$k))rMxM)?7f}J!eR53B zS?;*2#^>63ge&}$j>VjnW7TW>L92eNIWsX!b~i)4YSiKex}Km3d}ESK!g5bfhvn1E zUeAYnG_1;aIHeGvnXHlw+A#JQ@jdwdr{U?#!xlS!C?dB_6zW4B`dmHlpD#rv|Pzc&y08Z=)Mk8{RYpG^z-h2|>!S zqLvmK1Quyji)v0I7u&B>BUnDI4$so>SL9iJvN||qC3?(=)J|OP<8$xe_R?sdyp0Et z10;n#zIA*P_}C_j+$Y?J;(9KbLuo=4QkW$%H>90Bk{BNEsv@t@OM69QOvhVRQ`(`( zAb&njQ68m2vZPfRTeX*8Sk6!&QjYB83zj)R!{=ijW;tWSWh3VN$==BPJ+(27TK$bi zou*HNMZ;*rX{|~<#bV^bniJQ)*2XdJ-D+cW0%Ql$+5ctG zu=hM|j?C5=;+*|%jY~krcYTe>j!&K%;q5fB7r7|i)^09R9hQS>a)?GCL*TC1Vd$Ii z2;O1m8~p@JHph#B*sy#=mZ)0tTIqhx%@5I@{Ri*9Qq^VUe1efgrlyX|E>@M_aOYs$edF;$dH2XvM2B!CkN@6d>^c$@^OlSM#&~|mcm-kY;83Boc^X1a zJ!O&yO!w8(tXK_GHk1L}iC4Bt646g>cnJ``%c3~KtPl;4LE zK+JP!*EM_~WtJP|uF)$#LF@sF)S~xxVjp(DlfwO^vGo%hM+5pfe-FdxSKMpNu@JQGIfZo^BAP)ORYa3Ox9*_bP6#k!VkjM(kp&OJopWM zSLjESnOHh#AmF3OE8}1bF_j;x?<|WDckFQKrBTb#Eg90emtK2*2zlKPFeOVSZ05R_ z%j)k~@FH&Cx~+YvhC4<`?%c42ZV>fzoG6m6oN>lbTq^+xVEUi(honj6cRIUeD?1 z&Q!LQuFK=bT?TgeIlvxzHpE##*Ke!W>Y5Y$l4b{6_a=*Oyg z+cJXqpF5ww92^ds-c@sUeu3MgO57evoSs>1^uL5B?CTQdNNgHIGrQ-6_vnTW^rIh# zGUe@(armANRy2po;kLF;)Rjp|Yf!nc515B&d8{ zl8twfAT!veU+|6dFvc+KFpny1x}SGgejoR*s*TRg#*@?=G;9m(RBT6px6#Pl`?v2Q zWU0)l7Jend9iL<*y;D5WeuNgKt7bQ)+A!&0fl$4vzo2}liA$~eP{K;c{IPwtdgWef`KuI_HjQ&?6rbWNw)2ll40Fo!sH>~3 zK7V!_rpC3Yx*5^X*UK0>hh2vMuI;&evjXC) zjZ%&wr>q}PJtwJHHAj)g15W3ng)%Q;bMd=FGg1*@e&SKH z$1(kCaw)gwb%lSCLUZb};v&aV?&F1Q@+Vgxh}rYE?&jRRWGo6+TijJ)2m)4w63kR^ zJ-4Hjo|10@BYcHZHC0u!4gG`Wg03%i-TfXEXAG35Kh6mZ3n~0EjLtfRD0HZZ2|E5@ zP+D#@jY$F{?Q4}V0{^|w6kHfK!JOk3U^6FvkGJ_jl6|iYKi2lw=C^=a$oYs#B1m%N zb7@F(5PSlKq;v-1m1&iuzb5sfO`o!Pv5hn^Hb^^}?oolWSph7V_(LfTsjJN5wRZJR z^;!CkhI2dUn?_sOn*`fc+uZ$x1Ey`#Y1t}JbT#BRGT`zPv{)1t3w!^SG|Z|vdqud( z#t%$Jz614iZ{Gi)Yb=v5Z7W$(>?ebiMQXM;usWw+(xPNUf+OxfF{Qz4EouxcM$B_B zxpEWmfaZ6%&&~w{=tPn12`}W&hOdiGxR1q8m{8e}Uy-qItur5=%nqE;L{sP)NN?mt zq4iZ$=gF?eo5>AQg%rImSjbG{kM~mju&H9=(4;xqynGf8e})J*hm^=DKtD-i(^>X1 zorwleUf(mnS+T3^xM?8s{QBhDux!p!X+Mbg0vKVdXq9FC)3)9%;@Itk?IP7}&gi;O zx8LUn-s6|O??0G0#ceuj-(?2JbUbu8<-Id8Gf(V6jmwR;2Vw^w4=f3r2swRwx`=#N z<^ws7II3+ceM^!v4zDhu70hA%@Ryb!$N*5sL{NnI>FJgq)6)t2B(GzBRFU(OFQ&6U-i)MY;L7}{Dh>KoY_7&E$A+r1nq zKtb`j@x1)CHg?h{b+fjzapZC1C;!g~o|pfByk;UN{m&35OMY^78F^9>TL)uOc1AWv zW^y1BDJdzRgOLf3qNv!PuCA_(uB?o<4yH^j+}zwu%paIOd|-GP z!Qkj__ zC^h~^$;Qn5cgnx@{7K2j^v4GNwxR!%)_>lWI`1epyf>4s8LdtH? zhnetR$|_g=W0YiMfVgo_M07+I!8C9H29CTMI^E|v9XKJ^nHA%?#(Omv9UV2H-YSXO z0JuU)L4`8E$-=-@Sb(tb{sW}dc67`5L;RaOVyv?D#FR5K|>`hLwv-1FpiB_gdh$k_XW@RBKjF(X) zkK|g{CA9UB`A4+~d?9NK<(2tkMwEc+_5yjd-z6vk??6bq-~ZMy8^Wr~8^txup7r+w(cF9tmSH4ErvDv&Z;Kw7@4=rHxQXrS}25$9?*?2mfq} z(Z1*0_Y2d;tS}_DPMnDz{@=9`!5Ahi`VDfpq&u6R|d)L{5D*FZNYyg;`zS#@Z|^Kd|~nQlpDxg)IU zal`f&sGGi@>J=(2?&xFmb4f6I^}YqyLwfOIJPrI^tFy>=?H1jdCM<7*LThh&Z>8bz zp!Gtl{QS}j*&%77w|^JQ(v7q8Ls(X7XTVw9l44glZ|wGkrpIF}uleZ&4bR2p8aT*S zZG57rD}UlTswj&}qd}^2w|b~9L+*zQ4#67I>H53hwLTCH+A?x+@$g3U!eKv}Y+#uh!(A9-xNrkS9m1AY2?Y4`>rPPa9O8Pw}pQ z7d*((X2~hVz?d5cHgr1v#J^3lETyK-=ZWjxTKW#Bc`uK>rpt>z9;oLMNKf^Xt;P2I z144y@_+ET+R?#!4QPa|FXqhc@i{0XreT8?^JNY?i>x;}U_p%F?8NQ+R3{@j=9>e5R z)40dF;SN~MM)u5T&H28z%`qH>(1~aA3rqWG%awON zedWNWCF7+IsY|HQ(M*VJq)p1Jzmn$VIGpPbeV4wM)};Muc3!=*a0jVc67yP8O6 zKIrLcR-Em8Y788SM_lN6+!9~a&K&Au!zcKDuX&;ut!QFeEvw+5ZSI|=Cjo+3>Kvh3z1=qo)vXH(0h| z^Rc=_FqN!d+n&%l@_lZe7~Uj1RrOS|0(*nYb>1|#)WKAu*%0cAY51@0k&JLlYI(~- zb|6N)B-5%>{uwbNbD(wM+bA{vOf58gnOaBPcbcap9(HFl)9ojwY4?^5$Infrv9?`ew;*@8Xrr{@;Ly^b^9gl#OVum6#QyRQOWGOy9*iGRb75&g<+sSzq_;SnKP!Jpy=E(@qSP|AjkD#gJS+bQS4Y3`i?1COHx->KyEF?oaV z^1V_89C7C+b(QyT;lzJeP$-xlay!0zC&Az*?2JMpWB=ivYK#6mIRIa}XI$9XW{C+1 zP)smaZ;Il$8yEM7VtTW&(ayfGF7RSLZDVYhfsfG`z?!TG%7huD@+*1j#{cJ)A z1bv(4_R7LBLHZX!ufstjbm|ISm(_Kvc*ap zyN<(8DizKeLW)Z5);O2Lc_9k1h#WVZ#>Lu)lkaE^E=QV+UA|qNGac@Z`xE)_C`4T2 zwPka=J##lZ*12=KPk}x^sp}LOM^f1mfvXDO%Ybq8-*-|*2#xE-X*Dk>okX9zv9S@O z`E<5DWd3+}sjRN9J9>Y$WZT%?X}borm?^_4EGRI8zA3Nn%wy_$n6y1WDEpBNx589x zBA?XwYI`9|=Csv-Csr#5SN*$fxahw$7X2q6FF*hC%)rp_=LRJlCi3@mPTL|ZYFYU& zuTV;qie;}n`lCtat}!t&OBC|O7MEMhr-~9%Qc8l%Opl_7`K5~GvWq+}_a|m6wJzs( zvweZ~B-$TXS(?*0|O^!XlD`*~)xK=l!V9_D;HO(bkrc$MuTqUh|qyKBVh8 z%Z(W1dEGR3dPQ0i0P%Ov9>1FhgeiWriG9zX$j&2X>5C_uE@YYCJ^`D5C(mH3_SNP0 z&nO>>h>eXsj?B~{NG+Gupu`2~I0t8W9_LbsM~#Owb((cC!oSSM>kJ<6F>c3aHJO)* zFB}PgLk8FO6XzF7Cr@&&c~K-^7PIPm4J9!cAIoF|Gmk;nx^3s9yhq<4MKbB%v`<#L zQdXC+#W`0!p+~$a@?R&Ulo&Q_tuUFXxDXQX6ARnob8$ic(?Fo0(GV)sYlV?PA|a@S zFDpp_K3*spSkqY=FR-Nzy@<5Lad-6M2chkF7QZLWyLZ8~7I=7g#Y;Nw+lfvSAMqx9 zpUz1P94&FHG{T1hj`g0-@K(@}XX(9oacB4I>>fh|9(qobaDAv{(nNV(kLg{<(vgKG zZZ!9py>BKsELaYK`|_T=BV^%mb07_(#eby}&J>>; zHHYX^8;i8(vy+}Bx!q1kGkINQkxP%3nyGP37PM30^p5lMT$bUHn?W9zlSk@)+oY%Ygd7;G!0VshErLc4(C}cCi37O?+&W(bY*3uuJ-ut*8PCH zKw67t`%N(&IiIgFL&BDk1nl7PToFyT9Upb9!-|5UA`1as4*MO4y~1S0>*JMBxU)Gu z-%KtLM)rL^;NwZ8r7Ql5vtw3Tm*Ot2O|C0&Npx4w@$F?eQ|?&e`cldLXM!0wZvOLG zBo9j^&$ePm>w8Td#AY1C-m4#6zqwE-e}s44J>^;6R~ps^+KP&KQ-qg?3-LZrx26W! zi+7g?N@8Nc9ArLQvEn6`b5)C5w}r{Nd1>sH7AQ6A`bX9EE@%P|HOjTVQk3Si1dakk zg{f0?wWPy4gHhkOI1i_a(JD0?2Z1L2QEwZ^d5s6L87}Iej-tC&`BdcrdS$5+BkAwX zZj2brWkY8f5|+b)mbCeEH@p%XT4a6O3O8NSs)n;TojX!_-~~?OvY34Sr7bjA>t;iW zd3kva&&S)zda}rPj1A(SJ9pA7OC;mp-H{*Is#0ogFsE0qK;ykNZFK~KwwAAR%mM-YnMupq^ zs6u*k7fyNiyK(z7Q*=WY=z3qyd?3)6Jwqkt;~G{~Jau{j%VBue-&K@!1F>c{Q3;ib zipsoeSwhRs&dZA;l6)5ScNo|Il%oss(C51!EY{tR)OOdI{#|EYoQ!`B#S5Sx663#n z{X0GBI^s(hkTou>^E=l5e^CLN&jNAx1Adp|S1g(w@8#jp$>N)}GLKXh!cQp46c3lTS4zLYVansqdaH1@V<^nSSJG|O)! zT)8~~5U{rJzty;9WZ)SOt!i~ZW!52ep{qb;k zP)#{QZs{B~x!jatL2&Ku*P^Rt0U-*{Ld7}h{waaU0~)1W;W~MSeU~Zuwq1x}$(sB7 z=^_#2vRscFcDm@rYZ5UX`o8I$HRoy0O5jX8_#FYu4#deFa$fRL3f`hTa083CZZHxr z0{NxT^U65_5ypqIhDl&)b&1G$brX5@q$17d1M(1fx7wKrf)%WxU{!B_ot*pq|$B! zGHHxxt#nMnmy%2}N#U4ovw32EJ`Kn0zE=lrCsL#>4s5?2>AEdU>h5b2q+{2+(8{Z0 zUtoM`Bgo-bDQInw>>q_U@`&#B0++-pvl_(!%6`g(a?|(n$@HxaK8~gM7pWp417*tX zss}tCHfO?R>}EtDE@xVqRGz4P%}V8*%T6&hT<4eMCL4J5skwoW|pfiBhE? z`Vp@1n?tLA`3VeQjW#ecSCAAP0OYYuDR-)fRsOMjJGFUmVTVO{P1VWCYRV}iaHd3B zBPP(v9Vtu8U@N3w>T)xQvaL=o5?JGH(}u54bMpcLyBbe}*jh*Ay}gDyUjy1EDNkVkXQJfc zg5@oD4TYOJj6rK-O2HEPyBy^;SuedRadkq5$(8gQN%ge$PG$_Z{S>b# zCcn7ze()1j6woSFw)rqq+%Rqm+&Hb^amH!uo(>#b5WymFD&AGm!Ty^&_8*|IQ-DgibDow^dH$d>a_ zBZ)UHF=xtnr;QP)XAa0m{ZZ`YXG8(EDw99dN!Lwb@QXR73YSb5;Fe`+D<^O-`(+0= zrqkc^x5fjvy*fvHy){U*JOvn3eyi|e$Uzm2VmyDX?Ur~~Lp3L%H%*mWtGkmg!_>f( z{OIu!yd6HWvA^(7!-4{oVZDIO%$~VnC5HIt%>@b%nGwT-{V^xwMfyH^z!1CFKj1{r znG&j}nFr`x!EJz$K{GgIX^vrv&{;8K?5oGvr55{0gly^GTC>24^bb%`#(T-&%7@~A z@KD4-!Iy8u5v=ugcJdNjjYC&p8V-ikLvWKT4&KtzT*5YS_Co)IR+Pp+1wjWd2@VEB zZoNTfX$YAexz*}8L$%zzx0(d`pEY<_M8Om1WbA!^!LJzeS+441;G)IKp_faZnHU3Q zuQ4*CC7?HweA`n`S_xBe=e7_WL8E!P87q_w_>i>=Oc+0r>&kB}=fuLn#TTXDrzqq;YTdk^re(=pR17F}Z1lcRcjLn1 zQjV!C_Kk;s0xjpbS71PU=ygs2Gw?qj{{=ujWTeNce0O7}AHJ|6MrcMsA%pOIZx}`% z_vRFHlPxu7x^&=6F6l@nR;Nsd8p|2+a!xB5

    TUXkfG)(e^GcB+ND@+BAFd18jG= zDaaedBNg!f=X9{4?VAnW(2X6Ex99}@sP3aH=a?95)Y6gLxshyBH2=c6>MYaBqdHh< zo8=kSROFQR4U(*2B8lC2?9PhvNmqn~el<66*KOZ!GaH;cv>G+$SFvxJu-vd@p6L^Rn1j!q-_!lC4?HSXxjdQoQ|`sni(-6| zzK58_yX$*sW?uYM!FQzSFo3%7^9%a@-fle#05N>ge-D{vlKhNPOPZjB&3;WaAD23D zq&FQ+?2M1nW^reI>|(eLjT4V5_GGjc_naNgGsmZL`VNaWKBE)1*BnhfRc4S!5uTkp zmoUC$);+?ugT^WUtyASF-QKEldg3|I%(R;G(B z?AU183@P_AxP@!D>t-pwq=yr2WV}UQ;mje_v73@Fvusq{Z|NF0&g7k%*|bscSQO^b za*9gw>I^|zToI0rmMuO8G3FOP+vDSCsZM3AfWW~fK@IvnJS)$ma6@0?7>nulrweS# zx;&qN!FQR-!Y@FRLs0gkT7!G~*S@sVl8V$BLoQ0gg%R6jdmHbOc8t)(?CQl$i<6Wz z=<5#w=Py&-o(>oR)zl60cmUx*DERpGI08s&spHpABvXH70SHPkOXm?^M~O@e3?nIx z$I|4MMhO_z_1g*tQe9v#`Ic#{52$!5RZ0>{BYB&quj#3R3bsDiVQ(+)&*rFK;i&d0 zelB#`scHFig>{t6r}-p8#@^+SQ#F_4pV;Z~5bXp3T~Ds* zY81#6xZ}?5d@0Qi69l=fu!JpMyv=0Xmr^SfRZ#E`I`h;Oy|m6(C*+tgoR1k^20Pi8aZ=G!YoH?0?`TBBJ-MB!)OBTI$sL9diVhGg%&_N+AVb&`W0 zsu(q-XD1!w@f0hCHo<+RO6G&p_L^TAM#KA<}Lk$`Oi;ndX}Vcd5YJ3=NTm4*Ju4+2QD)#kFBlA5oM9b@cnjyuB5ke3|+V)6&_74d#MUm znT}J0kC$T_t^qO2s3YQ%t)QtvbQVu6T18@fFk+TO%drog^z#;qh*y!)keuy zq}vz?A$P2oZuI5*>3j6}3i3e8S1F4UZ$5I9u8q{iwBiM~;_6SP&XcZNakj^vtbQJP zlv3^7a?V2FrHjpwefH+@mg%{uP$Lq2{p=Q#FkdMhO?M9-t5Um|&7N(kVyjbPxZAGm z2d~~gOHQ#KC`lH3U0@WstQ@xf~H>n zv%u{yPi-gm@p3z^AW)mp5Oc{w$W?Zh$Do|eF-lJdT0k4~X5&VaN7*iHGM+~<1aNBQ ztg&)LSQ&3IrN_jh_w3jn%ZXz-o#)1Bth)j#%l=H#-#9*`LeCy%c0M=;{>2_n(+Emz zvgH+;z2a12VhYkd#tL9k5`^|=Cac+ZD$3gPE8x%_OQB%4Q2=%l`A=l~8tKS;-r`MT++VE`SNE`9X5=!O_)nH{)*rWl7z6Q|*_2 zom-Hjm-z2~;600r6)Y%#*VX4mR5a7txEX9-373ffD_RAzL!Z0GA)>TdHow!2xq%6} z+<@V7&xDbB4`}hq25I?jguwdW6gl;D51HMJPkrR|)~XyZG5F2hu8i6p#H}h}pb|uaAQjh*L@E zZ`MKi88?7;(aV=Jiyt$96(#e%cS60moS1e`)An1`%%2~@pGa@Sz+d|6$fLWRw4PRl zBs<}P_hZLJzw66X@qT<_jvqnm%jD1&KSNntx)An^zu02)9yUi+r-*?0OGH4xA>^3G z6kj*enQ}3YKfBv=b^FV%ZD-7e{> zXYbNgIj48q+_lk4kw*jsNRz5FrF{va z9-?1fkr>bT_BA{OlERB`&gz;K4IVY2=X&*j;l>MukqT9F>}H33y}W5dSOsU>8By7H z%e~*BfBSX=MVjvflgIx)aGj8=35*YT5EC4ZQ?i)7)XF|~+A?#YI4`NFoIcp5#P{@Y z5MRj{$yE4j&EG=&wstD7RQO{Z&T|d-&+*hx+0;0u`l-L~ z+n5h5ybl#r&s;Uv1U>>F*iTfIgF5uuh4Pfjo_aA))FH`im>fZk_j)&PyoGW z){oJo>#i^x*Ar~Bu94O46S&Sm?x?MbVOwp)nkrmc|4PbIkRkuV`=m_6{(G)JLqY$2 zDZdf@k*A#8-d~?4xcM_FwYE&u9I>Qp_iYx~_f8xl&Fog#JSd#8ohuaX_^Z^8uz7*E zS=&*H-&PUeiLqWH8I})hQ2u4qq&^=O(F($X1(b90B+9?-aFTZ~DiNwM!@K|dDLtk! zvr`*feb(t^_}bLj4&~2WqKv|yojbMnDd&0dUS@znkA7TK-jTiMZh~EBrHo_lsI$YO zk7zAW2V_F`D;Wog9x9xP4OTMTwyc%DUSoI<>*0j=CB^m;4?`8|?gA`hP(JeJO{AwR zeZ7s0P|#Qp3WL#-Bw^Ti-NSjlEZMqsHWiO8#mL)(@?U(%f)U7Ctu*ENHGo9W?)?ge zIOwONV@A=9qJ7}fvNNl{OK+(Eddshz7Vu0jN_mWQb4QFezitOYP9VTiOtzpUd=) z8hTh>FxH0suYFK{Mhh-r`{L!bGVAKZwl1%3rCO`MPyaq#dqlEMe!%sO+9mnzooD~- z-nLHeaK;D~e=F7_Xj}fA%KPy6y21g)zQ_6{&0P@-3L{Heat zwZ>52?wQj)9u}roQC%<5xE)^p35@MX$kJucNty^~`fy*xHM(F^Hv;U%!M?IL;8%lAOG=zI$1^0NY~k+C(2}PfH6QT_Fm$Pb<^!NijVseY$mP8 zN5zlP)>Oj9H#kE>G62XRjAHA(A~wlq_n*94(SRUzZ4nN*+>WVUO3FEO+4~<{RmZ<51og0n*SciAj{;#&{mqLTb7%GEM zw<(NL9SXFo+aobso4qf=TuJ+}W6#gZtIp;nhZ`w2llw?0;;+oKCY*c;qIMNDq>1$| zRYmXpnY0Vp4Ayip^3-1HAVC;&|5wK#|JqHS?h7$?M)42jEGMz{$kqKt-9HQOT$tE* zgRs9PY;`ghO)xe%G2EJG9u}0TzO15*cir)FLNUi{V+IEU#9}T+#++IclKc^FQzb*X zBP4ngKQIVHI~0S*%#rePY6(3QRmF~x8g}f}F3!t+McFvftYaRt#%oakwJv2s+=lUA z3>OT<_}nwO!25h{;h2GZfB$OgQry5Rg|knGKcLfjabIiA6yl;nWl(+2I;rZ&IGmxw zpsh7u%CXFmGPW@DK>mvduqNRarY@q6iGIGq68dAvAMe+VxEwh_`gtduMQt^S_NO!3 zV?>e7t_+Z5DZKJh@KsY-_s9P^mlZ2=a`@QGJU{tkzB(El-W(lZ33 zj1Q}xhH*o$L<}mK#9!@@zia5Mc-mpGJ?KuGQW2EFe#V+la#eR7+#}^@ILnmpr0`fI zzNCAHj@~mT)o=b3;kuXl+$lTO`AAjCt*K~(JDaZs{@%Q%>bzR;JaUuuAw0AqLE7=1 zOkuPuc2apXGAX(_K(w5;v%U^^P6Eonr0_6kZZT>-N|rmV#_36uN$4k1H+X)j`8E^x z(|3odYI^x*%833U<9>~|t?#^fr$`TyyUv7mbc|Wi%){mWbQoXRU^ZqQW6vPY^mCld z4V?zU2X1Tej)*p?IvKl?tPb00TME=OIcH^4!G!7RP40C@?pJ5+>yld2l^KG9fVB~l zvAh}agQ*(As@%01ehFlty5K@ltGc-Wh|?PJsB|e&ED9`liE5p9ZE8Ku9yW$E6}k+g zWK9$xXoQI_K+FOkr-IrP1j?>{@al7#PZ!9V%&z~>mNa!u=+PZ>7;bmXbQ#i2F;Ygn z>yuOi4G_N?cS78>V_#_mXhvGie>uUlqYQEU&vyJJC`oPjN9)K>?o-x!9UI zmjbjrEQ0B)B#*tQOTlkRLdQls-l?rYhMl$F4%H{G!Kq!^t?7E7>(7~wrx9U?8a&vI}_WsoivSYO`M5sf783q`~87T_Bs2keXR>?YumIl(S_@v zbjG(eT1i0ja;l}RL9rY=GCcd|Lnn#hapa*kIsjAc33ReeP#kjg&8Ew1BnGWc&x5QT zru#patHn^0pn1Y%JYHVVdM{e%@fj!2Dd?012w>d7HUa|;2c_#?L7_WyP z13jxIma^eWVluc8Uf0qgv)E*_vwaZ&-WDmdS?TihBw>4mG- z`5O9cV05ohmRB`)+svwW_X5j3I+B4u_=FHW2`V6|7;E(*6JFnZHGky zWs+EEq4ZD~uUc}^2(>R=u&LuFtmR{5&x~oSivZYA!@6p04tepTDWSi=xaN9 zE9Q$-mrV*UTl!QuAnDOhe9$ud<9&Jfk=ng?eDq8vgi)1N@1IfZ&l}ax`WFBi?eI#q zZgILJwMCKIXa8Jz3(k%HXjkcA6CK`TH!tfW{F{X*g1t@r%5;gvFFbtbHahMsSs$W0 z!&M?jXBbXb8b=QWy5uBm@v8e2g{M*ti8~w0Ta~}y8sD|yyukKsb>#ay({&Kg zo!`~TRJdBLP)KdV!XD8aUl=+x12$De@Je0_Fn2+urIzP2N5f()1lYSR}W6JGyCmKp5uhs-y6+ zbg^bm*#3n9A!k6X!24VL|4@f2R7P2-te5j3q9%!_@dmGjmSk%gAE`@|3hiEkma-Fn z{pTt%0(Vf`ZkeKDb;QD~^21$2wqx0OgnxB~YQ@V*I+inZwgBcu`=*ULUVh*2)SJgX ze(>MptS$g5gSWn5Rx|eRNr~bcF#I2DGvk(_jdv=@)5j?wL&dK5n@rIQuAS{054G9o)>x z8dF@kVokE#m-e1>lkYhXs{V;_Shht-V{y&NlR15SeNQ9oPOT685?<%0tsqz_D3Vmt z#Fl3Xz4E^2T{fZ1cCJK@=j1}nKl2?F8vj8D2v?(T9 z9Jcw0Q9#ZRlfW_!cK3=9!^BHprR_)N);DTP)TouK=(l23;?l;onG%2V@lhadVA6Q; zcOr}>(e5Sr#`0O&0DnQQ>?_>C#WeWMf37tW6$E;oR&Aznz;nV*8T+1Pok?w$E3 z`MLJg#;$l`t%`E313*q{-Q@|_uX-21dzUzmx_X}Z$27qg@1NN|aEStYJOaMtJ3ufAuZkG;6tj89=-HcK8JPNiNB_LHW&9D0_L!)N9LWOC#4_MLQd3KRMqZT)Agq zz(=rm;%C4#vecI{G6h1zQbn1U>w9rHU0I0sj{V=Ekt^GSf2}3yVp&V%eY)wf0+Yrn zDks;v&~(7fgLvbAr5M|PZRAmwVe%4*UJA-&Wy9u-4RMF3E^rI= z=g;Xum2H7{oB=rzyCXkNHtQ6VpW?{-9~3mc8dz~nf{aYg3j;jvQd+mbOF%{Qs}Qrl;Kmhvy-5?~3_ z|1p)lAq%S(4>OQ$XU?MSy+;N3r@@{4rijKNfH$tXAMAEP>b-L=Tn1 z{ZN!pHh+iVz~_iw^aerd*Y$RQ-M2X*Vah@wy0yNXPRiQ-fhi6DP;K-Hy@6KN zTum5P02n_f(}85?r_Jk5iw}Es<<>z=vu}IH@Q2y3PH*tvLlZJXOR*Wfni@?U@67~A zuaNsuf%`B$hq8Ki58HZS^R#;HhUL04*-Fa0=TcfsB;2rDj4bq$)m(%Z*qfRpwJv#X4>0P|jTP;r>=Xom)1$&gPVwWaQQHolnaO zrYYVnoI_x3@#o_|dLxmlM#7wCb6dKH+J(toSPfy~yg1smG^(@F>2RP(e@ z4;({Jecm^XmM62A$uoDaPz6plrq*8pKpxGh)zx~tj}I%TE6L#emF@@>n4Cg`dYNf? z-$Vp*!ezk!G~uLaf^Br90x0Kz#=F{W!GywN;sZ3V1OR1jt*p&(*)2IyAup=>$kT}L zA6V_dM3ev4&12VrCkHcKY3-5xq1<@}Q`fb&yCi?o3MswR1;nk(0ZC;y-rKz9shHl5 zhFd1J(T~k)S#JRx>s>YV6j*PPY`^;JpSBF5BiYo1GBmHOhij-q%J3uXy`SixT)5BT zI}h}bJhxJ7!$0*zjhi*u(IzMRa()$#(^Z#49#`*G0rR(|DZ_=FF1Wi>xiA+_RQ=eN zZ%(jW1IJy&AQ#2?;>&GVQO>dfYX7##*j{$9+t#84Yl69$rMWcRqV6mG(h#zEez=&s z_xR8_`)&O*%*MAjq<}rRtN?I3m#k2Nb6JnP@UrGvoia;CvmRhvhE8phVM5HkbW{F z87wb3=hn8lwewHs9!)WMrPzPCi2~D88xOhc@M=;*DiDsF;Y#bR2?VX!jFd3;l;VTO z+8H$A9eQ2v^$d;DeTAl#MNg_@+5exL7K4G5*2J}qC{Ik22q=Gj+&O>75dAA0>xYOj(s%(=GHYfnB={eAkjySg zFju*3f_|UFeogDEmKse8j#GZD;`QcQQoVeB2tkZ2N#k5zNU_4aYUX`p*}m_cCjF0! z69QnwBI_+rRnGLv2RBB_&RD`Bjv#eyw|`RzH2a2^C|du>jx;;buoRf~DaKbGWJ@MX z@S5+3cdNXhebP+;&uXb1-12>x8+@LFJAY0;D`Y4}gqn?z^|(2UiA=o(q&sBkOw1Ec zwfoCI4%$#r!=rp!?JM)Io<^^KdjN1;uP%@dzb%lUJg-Y%8@CoazM1xH-KI-u+BSi!rUyJaNwa_!PE^dXxzki`Ym72L_(=Mq`~rMfTWm8K;tg^t#Z>TrFn zK5z0xTuD7@f0PSjOX&*rc}BCO5dided4gI_tVtIRSt(#9{`ck8P)=3+GCt}U>$P;h zO$B=CgB6vh1_9X@2vY$wmT=Xu@85lpBw7k6a}vm>PP>_IiZcf*y6Wz6|h__UiGVK($BXThNTh6 z;0O~tzfa_)q%UpSl@3|itbGHVg+6PE3x`H0>Q8yOgj z26&Jd3bv%Ay+5(st~^z}6$cZSU=NLc!NO%N-vK59cg|g9^WSp0_>)}_#ePU72dhsb zzY&sA83y$?TN?T`&8GgfsS?DNuDp zuQZ*}x;9+{CrvVB=NX54GBf|=1oVJ{x8LIxy#Xf!%raKwP|w~L;30CkLX{|`cFoE5 zbMLG<2~iru)axImdOi+r5HDL#@@WdJfWY8(<_9@l`DYq>+`qrummN@W=k$_JTmj%t z<0YnNTjzdDG2GxeA@4}+&m2rMo(98u|S+~%g)kchZHl%Z1b$x!aW_; znd0F3YyJu0m573XqW}JJG9zEJ&w?`{u^UcK1?t`Cm}ZuY@x*4QI`g1svTYl^-$5!U zK+4YLYqhqsvTq&nf@9+r*-!aSkzPFoQ1CyBCh&r+2j^Qy>NgG#91na$4HN$u8q!s{aCKJfe5xMI&`WWv;4{l+yP~HACOZeFN9MGDjjp%%V50#Vi7v3z1J2Id2+tFKs zVTkhAv@4qSLGGE+ z$)7f!(fSJ8sOrjzLHs*iV7llDJz${uiQlDSDG(x^V0>)3vieQ9b>Md-kF-XFrIRDJ>wtYsm^vC>l@yV z-~TU@4VF!8-A5$ssmIoA&nm9(%cLJi%Z_vkl}P)s@X9$vwG5O-`=~1Sm3&_lA6I(AM#^sUnQ! z&Q*;8$Hs5racL#VAeps6jjhj;+RngBd-693jZ+74lZLGwZxMvZTn#aZJ*JNd>8K5G zyy@;{t)rzD87x6=j`)_{vOZC>6gAwhX$`z#^2e5R#!K4z$-$c+{?-2>wAd)!9V0J+=(h+5m697+k~5Cyehls)|JUObq8;HDo&D_dN0zDzq;cR#TL z(chX!Zzs?%LSNFvwfr&UMl;Qs>;Vfk_}>RUk@h1v75`@dNLwVaY{uIz3o z*^}b#`1`ugsdjX{27BwL46D{@wA%iM`ZVs)zr#icpvXk-^t zRm*HULyAP>Qc#9$QSx=o-VKL8pS0u}PdMiSn;4Y@L~U8lWsOiCG0e~K|3?MpN(;fT z8@`>veKV)ed7ACk1ko_lf1gb=)osxQl?Q0)5sYUPc3K&UlhZ9;B0x=m_mG|z;3 z;OJ4AzCz*c9#`yg^yKC|-i~OlP1DxL*`J5~90~0MlnmHm;E18k-L>D@1C#6Hz_5ER;M_?;-_Flm zC+mb`G(zA0@_gu1`UPEJ#qPQ-fGIA5J12DQ_y0PiUG!LF-KHCad6u?#ET8oWU6^l{ zI7$;wi5`^QqWZ9|Cz{1k_B@Ms0&WYaRe}G18bWx$3*_r`QU~yJB3SyrHAnBtnD1@8 zpd=|pU-}W=olzsfZv852rv1uQXqT1eawO0sm_J5#zn?v~e>+#zWcu0m+Ahttg{_VC z+$^GKVYTBIUv!FR>QA@9OQ(Mh2{b$yv)@g+dd^mIC5V=!we)M_Iiwa7S14{A^EYPL zpHyhbEI=pjbX=wN0&A_5y>!6R2%cP7h#-dp0sOHFMEKAelD^Z&2&KOqY#k(0yz#NAERV|pr0Pf9vpLb_=l5WF4W7=I{HEcA^sCL2o-0Bci zSKrp9m#_&4J>5q7v)TM}6aU3B;4@5I06Y>IUy85NUpSz>@Ink`nov^7uVjqMl}5X9 z#Uj-OTr)qBm*sI|0|cpz{dzCY`zxY7jtSb9`QH@M+XdzFPH|5KV;E?De)$u-1vQ^nDAt5u33ZaXo3e?FwrKLclv1585Iw57 zPrd97sim|d4%g^1yLH6m5m@ARxb<4ss)UBL1eN+*JC){c?Lf%Lj1GcWlh0D@7SE)O z%26(VEllUiW=KFlUOp?Z^PAp5;u4cj-&ONuS{t6ar-O~9aVN|~P_V%erLN=`N9DMY9Fq-)N0iBZ_@I;gBL|CMpM|4;1 zV!PV@X0mwe^pbv6kms$-pd`?hB+TFGM2vMOV>-DsXr;bG=n4oG5?0@UJ_s;D^QM;G>26G)gGP^N1&xR!I(9P0 z7YD1RI}pKaO{K`vC)yi$HJplopMWr$#hLi1YU^<(nyuuBl(t{%;a|xO5u}Xhq~kU# z1gBxHWi>dYT=|4MmFMTFMqIUs-M$8iw@XqVH;HblePPvz>m-%mxZuqa}+eufqtU( z1sGQ#=-hFBSKC9txg`5w(|^LmIf4`c`dJ{Sy7mQti`0aB*Y|vDRmH~84m3fpHo~i~ z(Jx%qBK1VxYOuUt8O7A#ub4iS!>hlM6QZH>aD+oaQ^E&atFrBCF}wxidJTS;$WJ0L z{UQCCxKk+>$unetjpUz%FinV=AqOXghodL`F@u2Ws+&p|fpS8diN`gbiKz^k2J6zD zvwhmasLU=Ws?ItQYY)Y&CQkghVoZL{iy)Sus@I}Zp@sn#hrwrOa6@Ui?O!2H#Nmx# zw_X$cOW`fWfRJ0vFxl&W+R_CoWEgp0XYJQ}pKzXN^^z?vkPLNshTSA&Z~ElFlpc|!KD{^zvQtgN*lz-FE0B`UUD&Xm3B4AS?ej5 zkl^XjvZO9z&!OTtYtqf`?2Q%>aX#p6ZT|(7L?j;;1(8PJEP>e{wTb<*%gh&P#Y(YC-O7YA@mrQ(ax&LZm?F#7r%|;O+QR_&lXMI&aV~4 z;K}5-H73*k-#a1?#Jyu$=scX`^*iAtXvc7|Y|>!SwIA2;>(v#(YmG8EJONE*+mL zCeS-!)B42=Z9B$aQLc`)G_r>J{9M!8@qjlf+a%YPYv_>})mOpRmZ`&$Crk^R0?-Np zID@J@QGE0S+lqE5nkSZTQhk3t`*n#uzeW4)BjHu55c~2zh;)yQR!Zk!{5}80s3Gqd z_WIt62*#w>%5TJ9NSHmce7*+1G41hW21%{CugROL(Mae9&?z|VM!OjnzUGQ({YB&p zF{lamKYOeJnlZBc=6#VE>o8iIa~21>8w-I9PhW~!Q1U*Bi3L{& zZ+dp4x{yi@txmAwT`Y!$5v$3~B9jLmJ48@mfA$%9fD!!X+nOZ*}5bvV3(& z-J=^&EX~#2zd$fPB1t3lA9!a7dJe}hYE8x7S#7g zD6JD7FD>Nr6+9X)A;zC1nhqH^1^bfZ`fgci^{Apfg9EBzxT^7!84;p_!DG3#uj)7CyH)Q);$@|mp#(#m<%mX$H9if(u0xbxwkFF%Z8WVK3?0`k>Ehe zEHaI7aXSf{j zHRgY1m>m)aR4}w*%UxZS{j$EE=FaJ~LW<+(cBT)zlh@AyzLRGAdqZgOCcX;lK*X@n z&M_<-8#qV`FAqgPvQv(!+koRP0^yXV%4Krhv|~${VBSQH#0F4i(Jjrud1d<%9DC&Y*p3h4BYU& zxnI&6UKG-k_HIdS0+wOY*cZGqM_y7t>oJ=Wjhpro!~VxD<%mF;i1+tpYN!0ZkFHx1 zdoUkof+GNY|7>pK=>f7!6TY_U9qAkkWG2pV@Zn^hgd1GPbga4@2a3AvR~0ZkJC{rs z`cfOu3h=x?<<1FAJO@r;z=(hceY5$18L{Q>tW18!&i!)hTW&XpTZD!*il?th7Oow# z*%lcq73{q)Go5^G$sDKihrcw+I@)x~%gbtGSMv~}6zzmx)!58X5dRd7Vn2Qy)!KR) zhPt3G946v?r^hW`kVl4yR%tv-eYZr@8B zp{u$|iODP-^=o&ArhzwtQ!M@eqLPa+7H(>971UrB`GS{nB6NPMEHjX=89nJ*7Oq zqKRU})oshgUv19GrEOH?f_J^*rMCt|{(N|+vS}H8fLP5Fy7x84NwR0X*;;t8qhE|GdcHfqj#XEs1qTPf7vdJ8@$h#_Cyn2v< z7bHEP(l$pw&csmiDblVm15BG+%hLSEqn|APlSH>3azEL*bVA-dlJc=uvwV^E%G32k zRG$sR7n_fzCK}q%;f7F=oY?{`HZ0#3#Z$%K^A&ONmK!sPC4_PRW9748oMk2s!#pBn zQ6^!8JLMa{LToUac-+@F6PqbbU?sv`3U0ClKi!HZ^yEVQUJSl&&lSUg1Y-swK8Xf{ z*G?X5($Ns6G#o2TcYwzIf`Og2PT##)VSoOT4)wg;C;Tj*R0r z2Yi5#I_9SSth5csG$sMOh`10vLy@4vh;8T*4jrk7Ab;#a@Y~|fH~q>DVeGBPDQCRd zv2FqJLYERE$qwF5t2ree<`)$&w_6D>7ExF#WqdSSDE_q9Ah`QMq)x9FS|h-J58fGu zOPZF@pc zWn{&?!M@wLC~mF_ANkDIIq}*ELJc-h=?lrU@9SjvdAIdcnR(Tcxk1_o*)4EtLZ7u0uB2Glcs0SQS-q z1ogNIFu75|5>>r(r3L-+gJl+PqdkHj@x~Qp;qn+Itt!Yg3Q?qtYLI)I;BT2N3xepf zRD(6YA1EQVdw6WqTzzkR;2njgWrgZSbyL=`oR!?nJn)|Z+-oUZ>`_{}zX=&y5EMZA z6Uj$)`DCxZ8xyiSET^=6kB>dwgX$&5XlNvLO>1gw<}YR z;JPuHjCQ&vnZNNEBtdt*f0|#@PB^pyeS`cT^QV*)z=Z$%%6B?_ZIqzphysunItwB5 zxwImOvKLXtQSH&vv-keyL)z0Hjgg3OQtYdr6-jz^yQlfw4t@&kL|uVAYL8gVB=8T} z0b=*3`o&T2ypl0~R6Pi5+>R=#d2&A_XvZ0gm$s~aM~lPg;e5|#0lQ#ePOElw9{Glo zVxHvUj7z~LWOYMROsnGu{6m_(_vbIqtTfmAbHy8ZqX5MV(Qo-CQr||)vM>hz=}RRR zFh0wl3@o1CNQ7B|!o~7wmSapiet+<9&QV&?0?XGRKlSkJ!EDsGH>+k{o!YTpd~1_} z6>3Cj&-~?oNi)6-Cn;o-%m(zp^Aw+~n_bo~b zMr1K+S50J*Kq4rr^>45`pc&?F4_}zgTy=+3UUXBY&&x--^#er`Z@)q;VQ$)qHZj+UgsR;p1YxSW5tyC~0c<%_oBY z@g-24iu2f0KWT~b%YKM7`sJwU=9njsNDV0W#l2fyATR41y!I5(*@s`AtIfb;7Hs|9 zM8<&R@CQL?@&39g9t331j$z~9*mKF{1m6gKr3aS)Aq2B5)-?q)RhFFK+-pPxM|#w} zLifdDM7nf)`%zZCLfTP?zACNis&%<)vkGQ1_Huy%x>H6n_H?6PL&o6hXsipU_jpx! z+N!*__J7t$l%lfQQ2^kH?cJTmL*;UcW#JcXK?fdEH61M^&!6s!GFjnKLrwI(bm#q` za0u6*QpfXSjC+CYw_V{M#n)1w`{mJTSt9@OjZ<2Do9O+~!yWSg?$=%MI}P$zM0hTY zuReDh^GQ5N?Q7@X@%R5$7;tY75O=jGZ|E+W6U!KwG)Tl>7(q|&DOg7N(tt98(8XK{ zH`O!1Yke5Ux6C$Jz+)H>-}Ay^hE#NC@r9sMD)Er#c8 zN$S|0xRB3ny-t+#Xt_=SAdd(g_%{Nd!P3xaim`kGOMwN?bscu8nc`ocGb#X#Gw3f) z<@~;ODTxS6WCF#n=xhAxX~DMgwer0WuHh=nc!?SHCmdFgi{sdz;qLPg#JW;zTa~4k z^$-#bx#39|fjV<6wA|52U_LJ@{P%X|itMc|E3fkdW3SDNyDM!9HJv=FS??n(zZXVP z1YaokUJr)>s+-GcwYI%&zO?Ik1=>>UZgIG~^L*lnqbc)d198GjJ<<14lV6MoFWj z;#26sCiBtrktCb`BTTxgnzX);h9FeD$`8#h_IssNHnVCXJrPU*(|gJ;n-_zd+HG(! zb_gY8dPG#Sh^7@I?Tf{q^y6~WGPclkwY!iVSTk*bU@7ye(uUo#6iGOnqi@7hZ7lPd zg-Y~b$Ve*N^S0{}Lzo-n>qafuoD#ja7prp1lc?(2@}ngGw|Yj95Epui$?OAJl(J(s z9$b*rp3*yQXB6WImBsTx&#p&j^Wjf6l4j)3y$nI^XJvS=u1>9jo9gThiaQt4o(nDM zCq z(+OAqisJd9K$qF2$jMIVF`94k@UB9f7thLYhSZ}-Pi~_MI9-N zG|YF^iG=4CUH-SBm(Yi}8nq`sTSo>xAGiwTFLN+B=plQ#^^=$wP6`2Tb`|>H%S1oN z&PmubbCXE6RB6j)XR8=8W@U7W?K`Z;1LW&}75_eO=6S(r*UC?cy0SxeeSB!Z2>o-O z?BX8cY{Z$xBy&sB)16GME*a3+#>#n`<0PwgBtr3!MEA`lb2|o&A|uaoW~MZ`4yL@q zC;b|!Q>7uCl9O1oBoroG!&{4v=yJ-Fbmv!}UbfGB`re z8(D2NALmo|g(IJ|yF1N`T}3)@oWcWl{D~{LDVx4vX_v!wyo59nCt21|E=>Yf^ehj5 zb@A)s05kl9oYDND9B{Dl)(CXMD#yh8-SN$tVaU-11?KYdLY8#w`!LH)y3^UlT$?nT z*ApktzQZf@pw%;Q@d4|EekN7?=E>F~zeD2hn)eBI&UuJkj02&I3O^vdd?X4yjsGx+ z3`zIB{^LfR_EcuKi(sV^vS-81na`pR>VH0rB#f zraWWLo4kbbt^!zM`kXn*U$J#n&Dgg{&l7wrX;WbmQMR)9-EV0ESzX~o-Q`Hw*>oD* ze2>YvLDf@z#pf_hG^$*~dpy*oURLiu#h`AY2MY6v(dUBjg|Q&yaDRj$q?HrY!wvMZ zCeFKaJHvO2BVOOE^*@TQ&4^y?ZZhjgPgVpS;Ye@}HbknbNL?74G8}B?@$`-< z!7IaVe>d+BLCA@>i}-NlI>-5*4R;p{LgNh%$9)d2^9*iN6Z`u_- zPPj%_?0Kh&SkKKXixbmVT)JpdKDHHNFc_ZfYDgPqbT6c_`KD%Nh$BQ!A@M7F+>C$=g+p6JTz~pO~ zB2gVpu-(kWS*5k@b%3^MWT(gVT~}tjP3Hmtp?Pc@=R%S9`1X zH92vd;^q;(25NM-p_Ez~M}CECOL`^HL~&`&p3pa=GV@%qBNsm?I|H651Mo zLc(q%MJI_G*ffEg18CTDWz+*x@nr=T-+>n0hG6(*J$Ldkt<{wwDw{i|$bL<@-goSp zzyPc4+*UhfPlbsxt8I>@fyp`Q)vI-_N>7WDKwBCPF~dJ(8UUVbR2CZW;&&y19PRXJvY9*4gaJDr0Z^g_xH zChgQ(&hzwm-1&45^><2)XP?wqX~T~`DR%;OG7FbvZgm^-6kyY>M&AsrRHY@4Y+bG! zXN+~44(YXb0EU;3w}%(uhKHMqqtxa55aAl81X=g&gciuxE#=gIdgr_rtMu)oVq{3b z5}~sz4#)qP)Hb&&-fc>Ki@EBIYCi{Pl~zCSTq69882R z`swZnzDIM|FA0~f-p{CN8i>W99%$bt^oFWS2>nLHMQRi4mCELRyVRP1)AHl-LDiJkfhLE6t*>@#S?MjkHkWZvWKzOfLxBPfihkG&<_R%qWwx_9VS|La0gyu9-!U z(JG4rZE-Z_A7rPB+C!$B?>D3h+~4oRfgxIGUir^j)1GQ}SX=)nc{} zkRI(g;sJdI%v3~${jUf|0u> zr59AXk01PN_Vswz zawBPM#z6l`gU>UDaJZD23 zV2Hc@10$FS(olCXK2HCSJ^Cz|jP2?P<;P)qXC{w%rOvSen)z zLZ<(3fm@Jc!+@(yfBbN2JrlVtt3XZ1VjiED*lZ*CoWYSvmj^mroSNeJ#nD1W#txX$ zbqdQ(V;_~z-KK2OJ%v92a-z^x%M6r`CuXgcs(iRs#4`-psHQ4GC1RRzS0ky=N3Z-KgEGG0o3&+-=XeY4nSC#mR$&M29Bv(ML)M)v~WMWM~+>(Hc2ab4i&)5{4%8v>%qQZX3+T zIqE;LCgGY5aEufd+aF0_KD&p7S8LH;NbNK0_<4+uW1XTFQwA|2b*0uFB#i89jW5oJ zsNgUeoUtVl?Rvi?c<Agr<}t( zQ2;wCdo{;0GhsF+W$x0HHwu!z@W6Ex!v@l-SRRIruzjM63Egrs=l1+Dj@hC|O;f!V zsz`!n3f1;R39JCyHB>GDIRbUx=wU4;K8Vh}f2s#@k48EavPGASD@zhM1!F)+jl9j= zM4k{JMy8A~5P5TE61efoYq$TTWHR4i=IHi!dPA(2x}J*_xDoU~hM)gzq0 z51$xy)`c_1g&BHwmpAu)+h>-D2oo$uBs}}<(YH~k!m;0TO0&KEJYXAjDaTWoV-L_v z$Q~AK1!K?Jui54+4wEH^W-Ya~x#u0uy>r9Y9j7qh)hI4v5^O;yG!dQ@XV$M6o4$wE zYS^BzWJ#)-|)JSvQ^mny$k+5Qg_MurVDbRr)+QZ{JL#?2q0eM9Nfu08qiDuk} z4^Y!oBuK1RI#bQ~z;SdjacFwwoKjPKudrEi{$V_X#N96>7NaWt9zcVB`i1Ti!NPB= zFZVm%&S+Dc!twsxN^!n{Y|eqvVFT`;bzjrC4Z1I_8d4GLKGD@I?rYgA66~Aq-%C<9 z>JEFPBYy==%qrLy7q4m{cC9~rZJj7pW!=&%dL}3@VwJi7Z0biNF684?I3(}bu`g19 zOIaM&GqxNibK5*DQ&m9tRfNgLQ}g|170s(WhMkh3q^ZB%2rI!rrCu4`kB;)ahkhkg zLS)8LiZ-f3T2<<0o zsPST}TTo-8u$v|%=}*~XgYA8545t=S7%tQFl%KwwgIIsz4VI`LL91T?(F-+z5=qb0z8!p>4a_FKLX_}d5^66<;`oYyUVPt;zJ zVJyavymKwFC@+`8%YRRG^!B8eR{L14mZ`H6r90c# z(6xf`LqtJI10?!#CcrJ#bMj8b68_LYB zskN4hbL};7$;rdd7kp))#NLx0I{yxp&aHVNGVwx1%+l>}C=yf`X`awoVfk*=&||V4 z6VC;ehC{iL5BZTDL)!Yw-UWr#xoEA-oS(dTLEuBiKyFaTgUw*QRm0&1Va<-dgQp3%imr+i}^WN_9}XatU0D?l8j2~v`BG+zcJ)3kkdG^e3jCK z=@95D+@DZJ7I-{oC>5q+GAKZOs|{FDQ=9S}HvLkVhjNSOhwt_gkULZs)lH&g2jxu4 zf=g=9yBHB$k%Lob9j^LC;1F&4p)3l>wn?1oG?KCgq;(I%`#NM5L{m4cxo{ZkgXf1m9W|3>`;VQ#7d zwn>A$zL-S@P*zZ&e(?DOJ^xIlO4|vp_5Ia+y=2^kxjqeAuP6^Y1!g#XuAq4OBEs{T zGkwi!V(|BEm2~TM5M+pA58VTZy;AYUt#+U?cc=Ys#;Myq&A_;K(E-0JNT>q7gsJ-J zPDU`kzBYC|_RF6*)&5SSI5JcA30E6V02yl${3Vg4vl$)tcfp#MeOkEw5W6xvG-rJuLFuB18${um~v%~;ppt@hw9F=>%;eq64@Ac=zdVWU&Z=szE`NXvI=^i|gU8joG2oB1L zG^eZMXq7HgX4{G84olJk`dvAjpkjdr{r=pf%k0IIf-q3b!slr^QE%8y@cM>_O!>{} zP)xa@U6+_xn%Do95%Y{z7H0TV<*@ zm_Ge6rTW`U+~|JuSUf3N71D6GRIQA0{t3?>{Uk@96n&1=siT34$7ToJu8xztFRh+? z%OvwZ4FYMA=s~Hqd5M&S`gUB$>|b)Hrai~tzrz|H=9&-M-;7V+PsszHW(gPB^KA`` z+EvOK8#}vjf!qbWjZO-u`1QTHTVs^}R>|h&?aWce9iJI1vBO$qe@FO0fD@<|IreFx zA3Uq6Ojsx{VFWr$EEq?SV;jO@`h=m&k|u3i-NKuOpfBR4l<6+D`- zT_4TLvH?lM+7C_)r2t)v4;43v&f#unHRbr%!0%_?#gHbUGTcpPGvM_i@fSiS^@g%?`hB zUj_;!>qw7PRncX7Y@(oT6On-#L*?lM8dvYk5c>fO%o#u5Iy_w8vRd#sT77;wtv!P#c6?<6TK z;OR0~u)xPSXJTGVvbGPJnLc%KCMQHX*eWq2Uao?lIVxz6Pbg^`uaQ45d)gp>M@vy= zZZl0wS{m}p+9`{3kTCc}$=7LsNr{Pmg`7MjI^##`>ysj8sg*1e2Ia|x21gMejgKvV z+NMh(dR>6iL4@e47#ga9~fG8^xusG}%vGk_}e!XOf)Bn2%hl z*+Veas7*1LUID{O9u1%h4Nh4~AUO|K#%fdM`8}<4J70QX(+!IpLa_Y2&eSG+Usy|wLxxPT5dM(9)73#%~- zwdiNC*cqXX+m^Zz`pUwz_jtyEDiQ{>B~LGY$z{OLxSefa&CLq z6izlDie#>3?rkpbYO zb0$>;tcT6J%jDeV$B)Br|Gl(yF|WH7D9<0e%A%)fKD6paN3V##TQ2(483Q+Iz?Kv|l_~p6C}KvT@-#k(czYO0083!bmuQs$MhPcCn)>awThiEw zK(!!EY_CL2yZ#*ERuilL^?2wm^CV-tMsw^4NZfv=5^DvVnm_efzkL+VR0PaP-#9gq zOX~BrNM0Xhdadvv$sB-zQ$5lW?~$zE573m!iTnf#m?#uPFY+)y9!~L%$qr!bGfXZ` z)4rw2>YTSFHmjB4T|?#gjgnM8%(Af@{%kr0$m!aO9qpH})2-oObbeVU5<2h2X6s02 zl@yQaDquaB?URuoy%Tvxwzig>j9~F7NP-zQhzb5cw>Rg~UvBVTEy)+af*)P>fgyTlR(RGlmF~Zndxa zr;_82^Yk2yh;n>sd93NM8uvCT8#r&h(za<(E zuY`*~`JPq0gGd*{@>#ixT)jQD`V&P^CSf>$dS6X(-Q+FiGTk2=4WLcbUX0)4^~q7K>@(o_J}(6RUv?um1g2b4b@-hP2D%<_)>LGKNtD zd%h}FMJ!~`P2S%aL&nj)(7B2W`Lvq8d+`{gU}YR0wGUfl`r_Fq>b(@DFqW#Nw>-9QFNI_QAHGw(EG5MYUo%vwgR(rzsc{`IYo0BJiW<(*t`l^_6mxCFSXcZNys5o z*&08B%(I7HpI0+;GM?4(M^z70PqkW!EEs|C<@x!3b~ zE;K)ttHD|XWUZvH_h>dcbJ44<^pxgQ!}x0Ue`a5ng@Wv7DQ}RHSp;2m zUQnlw4$-I?CyP(c-OWKC?h(GVX{jH-wCsOMJn15JSiN-nGx?oidT4FJA2VToLGn6! zl|;T{D!48n2{UE2gNK)iGfeGFuSpklUb3WX77hLJ3ST=M2}efOuf$y0FlF70uib&P zcBu_zS#uJFz!8L)R*!Y06Y!0F%0;*?_;rqCgFHUEt>Ch0A*DjjWaQ56NfXoir2BMw zMr{>&$yP5ZT2!#e`MLalEZYa#u8vEA2Ksd1T?g1*Omqh(m^hCqS170JFHQan907`# z^GS@-Jpc=bOd61o+Dy5rv#|K*hO33z8+74p@ zNA6EkWo1@^st5kB_TLKkh&E!M6J@^w?``iF_G6Mr#k9yzUBPaP_hwvyNU265!GQIrF+5XlhY0R z8@NVDW5jz*?A+LK2zJHn*>$Ge6^@)}@=)7U^9e#$qwjWO;G=wD_#Xkf8fn@ z2uRb@E(iW&m}2@+`<_>Ir_X1Il@*HL)6bT;n_f3({{S5g+F(*93wPp+GOfm|u{*R{ zUm;4>ez^KT);C7oks>hk8|P4y&(xv{p$%0Q)2EdAHgSX7)~v(Bm2`%ki8pSwL#Nr9 z)F4)bNjC1!0(Sue9L;+L&p}Di9`|EIs=g94|1B`=PPtZZD%ryfzHxcUhBkrs(?s<5!6n=4HLL`@>V=JdPYV3bwL~S1bt#H4M zzeXmCVr!~IGgr;&U(?USqFz{!2P3L68CZHWiu0$+4sFH1^~b z4B50>ff}PvS-AP-*xjao70MfMcm~18txJ$#|KRY<6~Y^uVE^3@U{OJF>&t|NaE=J; z;2_lmn>4x}Snu~b7`ucG4UaYleg<>WUrT)kkC$tT?s2S;|M?SmZi-n?r9;SFAODgm z?$56!tOKIeLpS;r-JkZxLrvdHgD##B#-~bfm@oI^7Z^02J@QAVk6^goy7GC$KadDk zra>o}J={xyn~|vzidjA)ep=ZU%gTN6J(Mn!Uv=_OC$a1j8Qn9D4f_n--5`E%^5$)rk`(n}4t_cJIj< zsNN=+u39ig0`x_(&ho-b>pHH~UNvLCA9O79Zy@Gi9(Stc?Bex?uA-uKqO)1*^13lf z4zog7>OPau#c=KJ^%~taYnIK^oe{2as%j=$L&b%{3ql8ovaGTekg4)$L&OIp zyg%Db7Z!hr<$~^&*VC=;1cDa4p8UayxEx{*L3%Gs)OCmNE4 zv2S*7guLe^tITtvHP1Ul(j_!0KY>$m=>~$=Rx2oiFxoQ`z&D9WqDvIaE_X9OdOlCl zo(}atQcvVz9GjgC&Vl5kXfS;vR$8bZtJ4eUr#IQ zfF9*Wg9{QH&?~tfXSb_m`Ttq~^VT<~x92G7?I9H(J$~Q8^5od+$_ zx@jNhvwDx^azn;V&aNmi5%E)wj#^VUUe4IDW>a6KJz#Xw!pM|S>P5CGyBGXxY|8PAqVPC#uauP`&tBf{njA5PVsY3qy+0*&idoK? zh+HV=c8=JKgEAWJVIDVPgA%C`cXA6FwqFpkS2RcqDEo2kF`;KdXH63kdx6K|>MEe? zSr|la`kc~!hu%HL9gu)#-8 zTBC0%#Bq{ex8ya081oAu^WV-MGjznGy?!k!oL6$??>GS(XAMW+X%LXAqv-A(e!eq< z3k-GAGN@MX%mI4U8#H?tS25sFv!9u^ws~})XLW5OU~)t~9dF$o*F}%DJ$fQ#xx{p| zEBRSe3#@tP`Mu*sK<`GQzaFlr30wWVrT^h7(i9Ocsg#_K%g@P)afgE#au?PszLAHL zozNLuMUqjUfbYN2$W&$sUn_S%oL^4tBV{YP??st;+HR%>pvy|xwr@nz04C`5&d25& zo?uCSqdAB&TvL~D8a9TiCyP@8O-sFE8-Ql}>E6(TF$;b5&tVD2yZORJue+A!_~IeD zb{F5Qxn1K9P@iE;uc@7M=25SmGzET;GF+0)cjK6#1it#2AD$hxkF@QQVQPG?Z`-<^ zh6I)Tjh_9}E;hV5u#HWcozo*^C>nm=^eP6&21aI0$DQ6Vl9E{cD- z_UF&`OA?@%BNarS(7WE_W7hagZnGm0Ogn1nI+phDCkmSS+>3$y?{E3#Yf@N5RZO`GG*!CH>EQXgmeNBhL$nqJE_~|TZmLl*}5Qp@oqtq;f zO`Ra;(6uhe< z+Y*yN&F2wE7l(|4DRWvbWDoxSxhEJYX6A6TTHTLD$KQVL3 zkQ_t_BD`?_R$u$(DYzz$m6j-C>v$B`A`(wbKI`YqHL;rwSbK>x6RO} zu0li;oryj}mFDS&?84 z@lJx3j)u^-L}j^7ITy)dYRravd1l+a4|n)W$!GYwYI(?K-aOk1zb}H4u5b?6uiwxu zGi?pB8*PutNvDcmLq#0VMgfYntPv3K;wiRoG5HP8wDUf%0AY+)bmcJDJRauoCoYO6 zW*r&ZpI3GiSsqvp*xIpE`07 zGnC$8!uJl3JpXZRlnz3#C23L4{i|I>WRFvo>8lz(%A!h|*xJ-@OUm#MAK~h&OSJTY zht1>-cfg<8xzR4L+{mVBKEwG-3#?(V5?lPM%vd-(J^9bYNEZm#-v@}wOR$*%Zk~vot8>{xJ3HJA{1)0kBr^ySB zy8om)<(9*%INE)0o8+qvu4%J=r28@?XHz5kh-aDbN$3O3^I?xEau5!r?XfL84xD)} z0;QCaiml4K!k1~a27Q*FEf90enPy5ND7)4)g(U%*^(i6goFuXh_C#<8}1v#=~R>y?Q9twTwW0novB#uZcn2o z4b30*vPNls_l8fczEwp>M; zI#F+s$SRKHz)Ohz#I3Rtjf+-1W_H>D-VT;cCDS+F<_aSJPSHS zKR>o{hM(2>yx9-IYhBKXHn$5~$%1`UHy^*`2fu|3a zBWa#;JX?AG(2V-j&g05KQEKy4qYa)1Ymc04{!_hz$v78(c;99}u6C)|)34#~%w3F0 zP#s7h_49*Q&Z?HxNVspms{evUQXed?2d>j9r>DR7eU; zIMopZ>AHDW^YFE6?&dk%Bw>V6vY;5xmttiun~@G$a!2zAZ0d=knX0Nqa=!Mf zYJ{FUFOeU#T88YR{??$eq$DHD$L|G`?w>+qXLS-Z*c!XC<;RmD5TGPfm@GOt&YvJ+qy&KOw9nKS_7C zZc9SSuOH)rgkLlHG>yM#YFe97*0!lO=B@-vmWIbOmnzP|MZO;EjkqVwh`ohFQ(MRt zycx^gUQC{fcF4V+65GOylYfLN(D?GU9kcl5_;Y(AGXfrQX9%aG zV`y7ATGR94ZAzl~blD?oVJXAw!!}~AH9x`v{%LjO67ZRjADD~8Yj0UW-RNE-{hfh8%b-cP3I%DI1YL^EXaB+Z zVYGFE_WC`o#OziXEagt)KfJV!21gnJiQxv5XCDV}dO?xNm=*pU26c-=fW_Rz-}=)O z6m1VQ=bWvnYi(WZL{)8g-)+(~3xhiWa>cD{6440pA_#A2T~BHz;reldIA5O8wj5s` z7ZGqMMu?*gMHT&<5R&d`x zqDK9xYGvQ+zM&!{Ithvs_!tRVOJ;^{2#NbyjZ49opj0{d?I{de$~<@3xW#D^j;!^7 z(yQB9-ZL>sHhUioB;1A8mnE2`Do`{N&=ywK^Hga4lY#Wh-z>W0e$J{!-Hu^otFg5g zAoDUvu*T_!>F8cPJoTi#vBRgDXP06V_537#TtCesy`V0}Yua)1xUJ;;n@m<7BR?Jr z+Ts8=($*z&{mp?ty=kxri9S7#^Sq2nsN$qz5Mt}Ko+{o_$Gonb;=S5=w#Yn}+c+6K zC{>ttn z4mIZiW3LKVL!~0k2nEkbNv7RJ-qj;(cE1j@%&3GDjI4cW3(g+8M`ByhZ^8`ykal}Q zASt6P>Ma?NbM}RtUi{lfHk0AjLb{0$7AaQzGMHL#0Wd$77((e{+%-r3RoMHVssuFL zmP1f-o_BNtE}noa%D=B5^AxJ{riFv#rPeIs!*-30bPkUX=^Nl=&#AugFPV^aVgvC` zl81!p=DGRsTE*udh1W|Jg%@^+e-H8QPGJxC$Tc1E#8Apsb`w|FowkKWATaI}{x12P z{>-@~5Vk|vnxWwPXEg{-X8eTyMg+ZX0YjOY>dD(0{AZ#IUgq5(F}_0{(DLi8Jzbvv zGbnRouo4TJIl;t8(^`|VNQdqMbbw&I$|UoG)=lpR^9ze2;hO>1kq`H*+l-bK}i zdNS1yvzG)UOK6FG+S4bSgj{-?- z1^0&oCoxIhOpfJQcFh{&3#=C6k>N)&wLj~JwtvjOe8QO|jU0Zi%b(c8+nAGk+s`YB zapC%=#R1K_{P*E{81XcI-mi@vD9MgqYKG5OkOne1BNg@Z@vHOmjE9zqLeNCqW3JW1 za&w+NZKP7$>Y{+C$M6FnEG_mQ%(6)rJ;-3vr5HwS$%lM%xS>V0moW2qE=W?H86EPYnL9)XFwh2q z0w3px0CS@n4&H3rLY-#`Is2^icm=;G42noQ+^bYVNMTEJ^Npfu!cp8xok=K+B#jh*EzBjH#q%ZvI%#E+TrJrWaO2#H(nVxaV+F5@$aOo{2FsJsIPWRR&eI8*A(OXzGXc5>j1M##qpvOIfaTdT z-=FqH&y=^fBUbzv<2|0rebw5xGIjHHc!Qe#r`d9h+BSGTy}7gXhYc5E_KwkZPUqjG z`;gWx$%&#K2e;a@EqiE8#>Rqrr+8vXza1*aU%Zq4h8U#hGOB<8&79r|j<98gBlrI$ zJ>(#F_h_l#@S2x^Yy#fjtJxt`qpOr;r$_-qkXgE4>sZ`}ODOC!6*iE*ZYXJf?%AFr z{eU>6QINOt6>6Ae56LP6{~A##q|>tt_Pr#E#Bz0_y{fa$(`Yz3z;q^S%r=AAlJ-ZII05E;0>g zPSJ4)pltaBJhbNq$xl*FSf?4EH`MnZmZxDFeC-NXHHF_^uE zN4`{U%Zm~Uq%vk<7aoCpxQ6XeV^7Y=Sk8cr+PNR4tIcoeRgq`kFs)K&$1l%@wkMG7 z+Bhztpp|^R)i3gJ9&aW)cm(%DqzhRK=z-;At{x)?fkf<6jek7N) zUJHG&TZKPu| zeN+pceC_e`!)C?~+{hjA5_)arozoZb2mHa56))P8J3~5ph>)s)8*(AK2k&*!3Sr`r z+rC;X*PR@G4c>X>1rag8iN(}5}&fT)DHt=H}tI}y9w2^}$=85&< zouG&JANzIi5J5!&RmmdsG8(*AZKw0ivD7GJ1Mu$I$SFYdU5rydw(o5{h$+HfZ{iSS(XL96NFK^{~)PD zAn$0uTqk&RseNM16!`tefFNz|i&4Wa$rn0*7Ez|`=1kem`d=an$}T|Hw^w0Qlh6A+ zQdTff$Py-i>R(h<7^Fc8)n=11$%*fav&(}1I=c&|&#C##X?K5p_bTpfX(sQDf}qO~ zrfrn44?Wx#EGL9)pM$WV&J65LQ9MKs=D@{`%6-j+Q7tjAzY8Qqj9c#J*PAj4$~_a| zTI}Bbsm4yHLL?|}_&Oj^0ki#G5%M0Erx|iAxognmi`V=5-WlVM!8D7Q);H1M57e$N zcI7!kxGy4hB01gy#BjA0b3s9C=%h%cZv+$uxCb-5J{|ZqwQc;ioSm;-9KMjBEb9{v z6Za>yAA*vRyOSR1RauUNsiu~Q_nZ%M!iF-z#rOts6 zS*FcohwyE=Ne3ryU&>G_N8z*%-WACY z%G(Df_tz9d4uXPwGQ-}j3am-U%O~`vHYdWFOHeNE&Rs1013J~tH^Lfnr$7IxRk0P% zmYK=>{EXbwPRB0VlUPo&9}9|NZ#=q*mm1*f?kdT5C1#V(3hOic`IauV&#*RtQ|6I> zI6lP3-{eIu6;v7dF|$NmKo-=8t}$NpkTslBMx`z_R_~H-It3VM+uSK!G3OfaN^11r zMS(Ry1GAKa`UMA}?d8Fb zu({CXcu;qXEWyII|5N2kgoNzDmCW#bKCYHYKgQUGvf6UaF8QPskFPo@nZ`=i%SR0& zgBrxIeB_8wh%yCU-jXbpr1*m?;_B0dt-7SK;nUo4bl22F= zNt5;A-;U!|g7E1g{EHyXdkxDtNCbhF0Y2n*>sN`rrh-nV(8XuWK+{SE8NI5gBhoL= zVRiVPlKW;HZO1jh*+lbF^9;=5n3C81j|(PcXXszAD24d{6Tt3`g?t6V+I@U5ay&Y$ zU~|x8IoQ?iP}Z84vbbdF3YiGUD7KbwuSR10E9QmNqpx2jzRMObl+KpRfo zXd>D!77hEBvH&1O=wM=iZMt#q?Bgx8)#7dhi!p~DeKe;U7J)g69Z(@jgh=WsV0J)F`2118(+#8BeI1B8S_*yS`l0aY>OmLRFVTD^}2u!c8k4okNugAzfW;f z283==Ba&KAc?M45lmhqW?k&vxR1g}aYgY$jzat|#L1L;m>ECI*O)q*Payeac<|Ya2 zVw@9s=M+e%Foefwm1nThcVPW&$26rvNr?;4p)0F7Hf*QS6w318WfE`x%D#L6Qf6I9 z-{h^pRd_D>*`kyLPFCK6DKOYy_=G{9l#jb=}r7OV5c}heznY9BXu_K5!x2 ziJR$k375~r6Rkxm^>RZ#Jr`PG^muH^*=jG=!I0mGxp>Gb^6NggIhd!0O06icHQ(?J zui)~2pKo2jb)wA#zU?HFHTs%SP38?sCUxc^&CnuQQa}l=C^ZOK!`;Nz#8=|4rgO;; z?3=^4ei##z=5I#6I%J#Lkf|c+iD{H|UcuNG)aYSK_^F=~|FWz?0OTsDB}x&mCJmKt zFBah4O=2JMvuv1c-o_RPk{pE7D=ea_agL3)sP~LD8QP>LMs+P<_BOPAxHG-tJgZPm z-;!iJ7%`D0*Z~%Rl4uZC#&Sj_6*)7zWj8sVUj#5Onw7U!5X%mo~ZCN>qgTe$92O1r@t+}@HMx{W?<)mLU8j(=75nEi6(Ra4XRLf`!c zbUvtRBnfD#yUyh2HWdtdjtFkXevvr&_re5ELP(QWx` zo{n`ocvWvaBdFcpyZ0+?9JP|pe~jCI&L*W6OS+|X8$7>cf2O-9D za?W+J{ZsoKbO12H+o%c-9QGW(X92~fsc0H75IfFwk7h>64HM7)9@^69_o&b1q>YH| z)>RjYY2r1Xayw6EEUf29_YnO3!=)QPX-^_pmK#Jc`KqGt;rV=$B-h9LJ_p?^aj*e! zphvPaKqV6;rYGsJ`Jh0=CjB{BZU&-Vs9Q8*GFBpvBt|-nyjuAkdOE;+=PaJsRX5M0 z;V``R;TQ%o&)mWr(k+WrQMUc$&#y=pRr*-AkiKTPXP%>IPrt86@SfH@YpzMw-AEfT zY9h@*1(_b1u3y6bPq2>KCkd{7Wj$TeGIV)*5cYYp2nZ@b0zXA@y&7={IA4{L_;)Tz+@W&#Ttfzad*}=E}k|`;fPa#4(hC{|(W}%?<8NMLzRnw!r?BUE> zVh~14JXs6X+G0b|?dvF4N<$+iA~t{8)q@5V#u89+S@Y)3VL58eF9T;K@<^j@`|ds$ zx?wAaPmte=)7#{9)f9SzLaWdEn;>_Ls_x}86S=EW?AVovaCDJ*pl|o*su_NYbI5Mf z!J1-2><;d~LlP@=H8^cUqZjS68(^^jm5EG|C0<>)9JvlZ-be;`0kn+IBep-8V6j+^7rJ9xC(zSZBamep>3d@CO^~BCr377!Gt>h z)!1U&PqUJiM@MFb0r!sHg)}Chkse#NoDO*btt>DGM8EF_v>@wqK^yw)`Oj8Kpb~%U4jQ&ZVrLnp!bSv(gYWuUjk)Zk&-KBU~Y+?}>(a&n3aA zGhg&~FZ8UpDx+<2J}-Yk{VQ+R!xUi4p+#z12F-zTS`6K!0MwN#e||4e43i5AU9b0@3XB_+x8W= zoXpV^_PeK2|<8Yjv2a1r%%axDqCiIZvO0k{SUJnXM!xt19VSqmvX7?AN(uQ z#Nex~?7x1XfH||*T6C@@#PED26+#TxHuW=9Ehg2w+4iK#4CX@mY)Xu;nXGSR zR49*D%?*o1n6*IBp)aJLAFCLJ!-Ywu_SZksAnFey{Ds%>{fH#RXUiuc8JHFwRE2ib z2@>p(l*aEqJlLQ7nf~c+3E&`C2B%FW%Zkd8GKwOdZgaAhW2|EP|VS4iGr5h=1oC7?FWbh;DTbka*R2n z-$U!vJxR6uuhc6BRf4{jLw>ldZD)refzC(EWpNPD^w0*=?NA>DC6%qh>*$As;toDV z#NaQ$7Q%O=E(V+=Rd>u{y6_zCY9D)On%J4d0KKM9Ais)7M7GxqQd_Ria3ZPRe5Qp0 z_@{SM`2Qqojfnqc4$U~}`>-P!V8hto_lKU6o1^Ym+QD_%`}#sb3O+qa8a6y&S`9s3 zE-x+eKD*(*?hlle<13}FcB&vk-sz@#Ciiu>J~g7R1Roz&%yAH0532Ytf)B50tcWqn z)v~#iUFrs>ec!`8-TGc|$anz67kj+-II>Cg-Ull1iapwf%N_>ZJrc_3_(HP)C;|Y8 zw?25@eY))7L?VX4N%<@wEs!$pD%n)=I{)buw?tY_ZwQKpb6VDoXuJx?W+ehKu=H&ZtCo8?8rLqU@nShg{bso%pK)Ph$^WAG)c5Orq0v)8xnSHYf1LS4tbF80y`hhp zpSxr4##jtjes4-(TsNA1)q=KSszGDnIpt0{Ein8c$LU%C0RqsToMi_N4de)sW4!q! z)F~7)d6Gs9z(CwzRGJl{PRcge$0I~T$ZcD}IV6^W+?FdeH&oB41GSaE z==#7nT=||R`rGLi4U9GAX1NY}pI#yi5FcI-YgyEr#QTDm|N#8K@&(q4mM=%2t?d_SGY?ZSx zI{)CuD6#LaPWE8d?}{hnd8w!b>yR>Z<92fm!stz&ND^#2=w;#e?@XOG#qyIU2Y~qY zP&}4!XTW{J<*J4VV2Oucb0QjcnNm}%qCTH6hdUMyISkK6xqERaFkw5!1P$)g_Cma>Zcin<*)(<(K zL>!9!tc!#N&Uc@1qY9%#o9g1$Z`1rr$t<)F{xeWgN{9HOb+EYHg&6Oxk`!cvGlnag zhxvo;xuje-Q&2>%2#1ig-j*YW{{|&gXC{jQULP~=edp^4C^Aid6Jk@LX$h>*;I^Wq zfS&7vCtJ6sHjwD|~`lp~M?6 zoNY=>AKN3`n(2)c=tpS3A~XU?D1lSHA8R48 z$`y2DsS}2S-ml^`%zwi5-&@= zW_Y0q(t{lDHMW-sRD{OiV0F>dK8N?iedp0_zy!i%dp&Psf?utzXdUbFoG^4xHq^pXFI@ee*n$b&8r4xRrAr^`f~CJxwRTpYKsFtg$W zR9|*`QPBM>>w~&o59^)}ZL7^Wa|Su^&>UJ5#Y_|-t?sO2(ZMYv61n_!;u1P0dA3oy zzx;#zy8)JQ+YSA#DPqOcnQp7dY<4(C4t~9@fVAsam>O% z&6N^R^6uKlH3ET)4?MD*wWFr)wCxAo(}g^5??MoI8sum})IMQY9_)4zB9e|o>}*t5 zIw9pdsSIAA(Vnl6gnm&BRjKlAJ`3IJ&o>s+CGzeP+-r~LDV2Y0F+PcH^C*AwAa-n_4RG4tj+8g_BH)skjMt!eZRJ?g9*GE*|Ls^*?3YW6giGDzz6?A z*pZ~k9a8BRInv=|x|cncd$n8a(LRIE7?)4nuQsldU_i`x950GpqWeV)RxMFB{4y0Q zuXbzCDi%CJ`X;k??W-R1&(Z68tGJ=fXYjR~H`+>Hb+0fyZh2hFuvgjf=24n|k#q!`OZ_e-ZR>0oDF8 zj&~42P1p2Q?Q&)hm*>mJwEuTfKSx?E9Zk|WJG~6DhxN3TaSwHVJ&Z4bth9^^G~tCW zZ53OVSEat8d|LEEBXlaxh8SG>@-{M#-LIm++Wil^9UeA+89M8E-twTKK7rKut%pI# z>74)0k}wNByKBIMP>17C5J_=V`tVp>KYT{DRjWB7(`2H`wT? zo1ycw&9TH%^j7^Rcd1`@X>Kw=h^{wq3AZ?aaFVd4zNo6BcCjEo19qv9uaOTU=SCDO zLd+F2A!n90<$tkxfjnr|rA}hf-f806zn`FPpZ9IX5GhN*R?fGo{H0_m6TbvXH8)dV z)PKuaBP+q~c|3XDTMcx`N}dB-t?x-T!{_dA0wP#{l;;x(9as-{5E*jlW=sepA?(gh zqLp^7G|yujPBL_R@pxY_oXatp{5%B4)oK{H(veL(CGRdC!{{({7w8Enphc{FZ|uR# zyvz0sbl29n<+&#D6)EQCKqH9L&aH`=3#!il2ZO zLVHSsqK(s*eF>ls`{I1*8KLZ7`9wx$nP;&EuqH!jZ>J&XS0O5F$WijzEuI- zm=nSJX8Lk&j|vkkvII3Cop&mzA&ncyPMi29Vwq)NHWo$*RDuB{QnUX#2e9mU+na3W zT|dtm!?3PHkHty{i+;Fc2eV;lzbIOl>d+i)U3jaWcDr^;Eu!cFGDPt~CFN#cgvQcH zbi9N6zfJyUY?jOI6Nc%*Z;WO{<$2t|at;~9nT*B*g=&@#wj#@4ED67Gs*I(rL6;!c zn4om;*Am1hK+!|^j}`zmN4GQi2$Yn~p5ra?KgEQ|P(Ip`lKYaFFTNN}UP}SGnuIqD z?5^dR)oX`b1-_M_Ws-NNoJSo);B1NFNP#9sx9Ug8WC@vi{8cNndKX%qXsGHuqBE8y2W6VljOSm~hKN4;u6RqYVz$-DtI|V!4 zN8cwZ&F50F{#*vw`kP#WbzWy|NrddF(`CYdhH&WU?LYUZQ*nS(v~CUYmI5wx>a+ZOmcP{VrtzG0MGUm>5N35?#kx)Ylz_wt zUUBnsE&?O5_VIO4$*2G4#6(C70QMBe2gW;Ta~PQ9y_s@}HeKq-tHsH!VVn8>b!uvS&cWZt*K`DW58$<@^67+b~FgP z=U-8CRqxiz1N>Vttq#w0XNsr@<-GFezB;%)Ew6= zqYeg1MRYnu0rWtu8FEaB*;9w<`hA4H86fNYchvf7jhNvb% zo4q{9{NTVPPN)K-`)f}={j!4-4)?yRi7%v;NYT%x zOtI>rw3!nUI<`4XfihOYqv4|N446nUCX*e5=c3SH%>lN{wl@1Z7J7~*oyYs&^Z%hD zQ$$fsz6`0&lE0g)RAI}=BpBG20~0xrWe5=^9G@ft4Mj^@?lyjFITi-#8u=uCWt<32 zY!&u%S_lOY2oRpa2W3`#2Fb+&7X*KZtZF9>x4!Rnmdcjfd;kz)*)-ICCZWuv92?#V>d(&1w10RFGNw+xDN*}_JHLx5lb z0t63k!5t<+gF~?3p26Ln1b26LcZb0OB*@?p9D=(IZr_mXz0Y^fuDZW()xA{L)KF7z z+j`cs*4w?h*Quti+Oc3topna_{%-BPtl`7m``cT}maYDQ1NxR)ar57uCVA?#3Jyqr z-x=?KqWf?N94XN{`?a1XW%0)#_h{YJex>=uwh~xH^qL81Q0UQ8tiryVxcsu)svC^W zhX`{C@3zjt;d}K$m)_~6$718FyC9dFYeZ5M#SSeled8{*Tu49(DcEbHxL|tq^_sE- z9>upfsL^gCN-xpVL_=9nZ}ew%OxS`STQ@Uf9G+p+9=s4+%;10-C*eJg=GBd~?;Mik z=%}`94I!uC_^ozy(qxqI%gz1Vql8?Jn?!V97%YV|i1o&?wic)Vd&g1|Pw^lHY$3U?hN5cUN6Le>?ldh1f z*Mn!yzg>c(FgL4ykO@Pj0Uktz`qQ1G_wY{OO}!P~Hm1c>woBhEcu1EH38c`TKD(Te?64V#Qf!dG=y&3nX7>F< zC-Pj?GFT8_KAGv7S2fSLV{xg{y4Ko+BUD^>8aNc)Q2wqQGz&ksBuUVB#^e2NR(84C zq1BH41=Kx;)-6LGh2>YN-9rXLC$pMGJ>}UcRY)3qb1bLcN4WGwYSRZn6ML{4>O_Hlv*UQG3_6XC*5DhawK!SQJqhoK!6XuE53jwJxToG_N?tA^XZ@q9cvrnmn& zqWL0d-R3n80&Meay(|OGV41)`c8Y`M@)F5H#XiUJ?<9c+@?-}Hy4uz8$Y5+2DJ8Y< zrEi8(HTLD-8Mq}4TswKzwX&f_VjO*a(8gh>g*=CvftT>gIJk^#c5mQ3bEXtx61}l9 z=h=<%!z=S*5$8I*c$U|QRXTLg2f3BMko&WXyAzLH5Ds%|QZ5E&sXB3>+%i72Duq2d z`6vQMOFwNzj1E0>gthd_cZr-uXeXvIz3{7dLOpiEikx_}`J9yC~3+Da7YSa4sJ2L;%{u0?7{J75X zqTNG~KDCKsvPPJb^SNq{_ALWeotf0fpK%0pi&Af(1|eBA%lqEkwPoABF@fb*u6 z-PH{V!8P}Jw946tq{iG2I^#Tnw4$JU3Y*&I2IQ5NCi+ZctsE@X)t}y0!n1Umb#ARR$@|Yd9nor=vg@{JA+bAjmDN?9AU*U z=yX7jV|)e7p@Im1(vM2Hx1?_oq;}-!R?fcH zvnv1qoM`d!rUKP&D#dwpJn~o!;m!^hrOXR$0`<3s{nmoSk#4Age_5Tul2QNUl!18l}H*Gv=El{#L^HQOoiFww%L_GmN+ssh-Y5l>(QV1p9bBy&52yVhNRA;!AfoO7HNM@rn8H z;Ilkx!S-`QUZFZ-5}2tKJe`J9nY~A;tlaSFgpvIFHlz}Ud0jyW)=rLx0DhuhILhsB zB;ow-AHzXRC0lyPOqlCV%fL%fzIPXk=|hUi*^4SH%vp}_o}sObOWE8{D^tM7c_ zi>9f_*z__y2a#J`!3yPKig6g|z5Ir$FtK6d?#y>|Vfbk4K{?FRS6S;mya`t0%xaviTLd8+R7U=_zf(;fHSK3j zFQm-4{(Z0%gE+V2a6wn)d;LA4UZ2Gm-$*EE6e+V3*bEv{i*jHsiynIGl%Y=RDO}?+ zd#Vl(h;CBfcD|#B@~2s#(?O-EyK1c!=h?KIi##I?r*?wkc8=5=%9u{$zCg{7#MNRw zlL>II?LSG33q|~S(X^2uD@RyXb}h0&lQb5NB%T{hx+~y_s&Bc!YS-N}xoE#adm?Hq ztXHb7&8)32VVDTMve1xz;>K|Y-2<^}R~3Ykt>zp)s&TsHxMuu7vg*)7*^DXJ#=U^4 zH}@7RWyjG@a!5(H-C4Rp*`X@hXDvV;Wz#KLJvFw<=PA@R$7A>oM2*6Lh=RR?WYNb_eTvG z!VzahW5mDp#Rtc?4&MXkUP1qI%rkm8ai?>v1g1F3>ibS(uqAZ-1mXZsDlzB@5S8s8 zM^HPQkOId8!K$vz)^I>EGSU@|O2WxG!DXrf&PBR6I`k3B1G zfL>E^5u6O~_GEiuFpJ(pDSX)lv7rXFIl{N-rT0H>6=9djkR*4W-Z3=cZxj%gkW6Zr z8%Hohc~7@~o9*yZ2AP_HTe(+m34&bjtCz)2%)fYbZe0_-pbMGiEk~-*RFP6iA67A* z#*|0v3&Hm=e9iQ6D1nA$Sz*fPlQ$+8`6YE^q*Z?(x&zA+G+rD)rVl?^s5-GP*_khz zAXm>s{?5}#BwP$ZT98@!-en)3fs&wu)i@zb{Pn2h_-YZJVqIq>hlpOOOIj!pqMzP4 z%CzIU42GF88k2-eJ}*WJw+6Zu{mz4Z@&xQ7iF6Qdtq;@h$fVLnY}=P6nSKCeA)ezW zQ92^O;agF%8lF4UbIsXk5dv$oRG7_FnqJG6CfDWxV{%{`Mb#haU^SZ!2#*G$8S}PX zS(rNjH9mR1Lowt)t+wlI;iu`8RE0HVx_V*yld5Dvtd-yRfXmHxDq+A!xonKsO@!v= zJZmYa;kC*f5DnVs5`4ET<;y|a;!2|$(G(~C88FE^HQ7E+@K>Ox*sZjhl<7&aWx=P# ztp<$ANG6!u_aV&cUw0MZ%06_~i?WdU6Ea-<25kYhq7T@hRzkaL+UCpx-QokBo`2h`zP2^{k)1=+w-) z$MtnlijFtu$zY{LW4uF2?SzIbO5Rlu`-#1h*l|tpF(<6%k8ZB&dBs-po89}cpmAvH zaYXi;t_<|`ty<2&4rWL!Xc04t9+`U|y5J{?4to*kNN`e0;wyvjOLV9^FH(P1!IKJy zHDDLAQ%PQj+PI4(MS^nxO0$1O#f#D;8>&(nVEhYI4&lJMwUt&p=(zS1ell3kV4rU4 zoLPOzxKbzLJn~80q3ZiwsDsFhxGEIrY^Ec(c>qx?L0T3ZMOLIYxj)`b^aoWMRppq> zhA-YAAenPKzrEx9da?b}iCq1Qj##!dM>IVAWTM(bAXnli*J^jlDi!QB0yLN>b4ve| z(%0zSub$g)e(`lcbfof_ZrG!`F%h6eWW==d-EYU0)l_8;!_C z#=lpMOEj`;YNS@lW8V=a4daA9&_`ozW}sdSghk2Nx^w1(X0~N`3g`>j{gf)Wy&(bUwV#f?efEw z>2+g-^@IahkaVI=LSrvDbP@077i2XEh^gqBAVi3bk;&oe^vpciNrwk}bjxMBnbIWaVdo1ruN5SH7CwM$`#pv|J!NbRL#?9T#nafM z;+{-+{#n|KFT+WN3xb$_R!Yzt9BHAiG@>||-jmQfOk^N$7cP%}_@~Oiw%&eN338wy z(^yJQCzUUx(`Wu&hw4)ui`S4`WTkjjVKo^yy&)zs^n;GZLn|aNlEqHPiD}W_ z`H9&66l5uA0F{Ek&)-RJ$uO;9+j((^TKtY&Tkj8iBW0>e>px$Ne^2XERsx?8Jw+-1 zRXP!T37L0u=$}OLB$YG(GZOTE3JcH0nWowudt6kI((@5guA^V5VSjRn{#N%@OenX! zpql*SA4tCP6)J;Cs5wldI;OTD+kW+TW|hphyhp-Y+4QiG*LNzQkF2J^cb1^6g1?Mw3gr#Eby zfMsWvbJI-*Lp22yPspK`hVAid6|KdvLqbo|b92|^O%Q}joABY4;N(fkWPV8W(-cA@ zb<;bm7M!SSYB|uzd3IgK$o>*s9w&^hDTma@#fw834b3#+fm@Eu>)TvMC1o;ssBY&`$q0hWY`A7_d548L&xs_Ct-SF@^+u5WarHmb~j zfI?;gt2lF|I_f&-or754><)kaeQR@sGhg0WH)pVcTt#bEb}+(d0(}{j+A`T8^I6<0 zHidL3hcAQge`BgfT1TeQgr0Vyp*(@)cBH%EV!Tkp{Qcv+eAy<4JcQ}kqB!&JH z!bbL6*ewetn4HzTr*Q>j4BDNHx`cNm5s=uDZ7}LDC#Rg8kCw_wPySRJ zr4KL81(f&icl^Q8Lmj#EXOK@~lL3g01rZh5>Fo%)S_D&RA$g;VHnge2ta2vaIPerx+jXa)eXT|evp#N%ve2!Jd(56Yv(EKkiQs)_e@ zddg{vbDX4i9X8mQbRhYxu0mB7=<&LylEW1hS^tjB;X<4ckNpeOf1v&QGx_~fJ9r!8 zg1LMjhb_;xg&&UT+gogeQEQwh+L_EogGbcHLu4iBLIu4f!uQ4N4k+Dx_wJ0>OwIW3 zbh!L0$g10!Sp?;k|B0O$NYVTA_Wn1&{b9MFuK-nqM9CLuTS=k#(zBnr17Sa}Gv(H9 zF)PX~^1a*rI)M{?^$>!>@n^f^H^iW`|EK&!`Nr53@1PXwzqFE1Jyl9HK@NXJ;{Ff! z{4;Ii8GuRI^ofjD)X0Bp@IQl%Qi3u`!Q@W2-oKw)c#5k?c(waSQ~zlf`X%orH0YMj zvE{A$e;E7Eh@QbTLMv>c3b^_Aw9<(|$xtet84d4m)%cqx0i8_H3ax^nf$INvGf&?E zQc%AyRjCm5%b(YXzngh#*hc!9U!|tOpLMnWYVZFa*54@k|EjdiS_H}BcQXZC+2R92 ztN$Uczu@JQ2w3Y2Z_hl*c7s=vJ6>#ecmB!Vhe2hbj8aOwbq3`mS@p+lp0_Y*sht)v z#u#A5nzZ1RwNBUPV47%mTPy4j!fhbii0-wxAPpYw;H(ItQW5AOZ(ZaMQ@o2j|H3z0 zVj)rN|Eh%V z5M`7~7u8uo_`e>4+NS3)Ko9l}8N-Z!bt3zz6WhW4b^q$bM`R=U%^14aUV)xB3SMD& zZQO|_9#;gOH3#McrnAwfd!Iidq!-rPKT25-J%%M%{}kcW)RC|xGIA#p?9k_Guj5Na ze7Wx0n2dq{d}pJ0aC?`iRS7k4Xv}wxX};A@C$;&jkj^7-xmHI8SH?gth?wxEVR)H- zo5z#K6K`0(Ev4T*Xm{N?N7b>m?Ysf(7K&%2EziwizrCQzz$x^4z9nwyZXQW9gk zqE;xMvxSt}$>?Rpz0U)OISSLK=424q^c{CynJ#a3j4b4F>8EJ=ty7*xyykFWceC|a zm>_zwcVUTE{`)Mak;AK`2OdijCavZS5FNHPG4I00P3C+mb5P!zqlQxArlSa$fAmovA$~MXZK+^5yyfFe zp9&rS<73348&sHk%yzih+zXxwFb3U4n`{G_0`TP+Fs8#G4;R5+!m!5mp zYPwlB3wapltTVf{9oQR>PPxgT_`)~oI`DOGp|^c+`+UdV^r3Li^ma^RGCwQ1`GEkS z*a22~AZ1y)qQX6Sw4E`Teyl#SCY|Bsp5c9Nlkr}At8M5$!Ws0S(k_RMAS6HT#HYPQ zAM)IEZm45oVZ$6ak-ND=bHAD?Q9Khq zqx{e@dokD#=tr8f5^mcb%!d?qFlLF}-PPP$XzxyUxn&@8>D-<`SJJGI0c(2!ssWSt z9@C5(kB&?&<`6*+vdcG2DJRU9?e87*PS9?SOP{wK5M7RDCG^%o7B7KF?8USHk?(?JRjGzh{sZ6 zJAdpn-p1H>q>guuTzsOWpmTCu`*1`4>4`9Box<5b@B|fTC*Om&AwuWg2FX~PJE?DJ zK3J-ODK`9~gSNX)Ba&|0 z0i~vAfk$Hl((mRRNyG*$1wpo4!%KEw`OrEd#m`Hm=M&l_yyx9{$wr_8V7u0~FC0q< zHC9&Ah_Bvvet0bYaJ|?v%GBPk2X1yi-o4y@q?v5WJ5px;VUs=& zRx9wgX3RRkCEi|Tr7Ze*ZRO@XM<`L|3;x)rwNs9tixu{` z4qumC-3bobZukh<-QIEDY{NZX2#3 zTN%4!)1m5(2~tk2*k|BOi?&1l2@}LpC;VXi^UFom(ljTFIr%shss0fYMWX3VRipUZ zg*y6*{es8z*38`v_y|k(x?$|{P)^PVAQbsg!4 ztrb+-&YIjMA2uAKy-9uNWW#GQ-QxP4@f2Mr=64>LuP5)P-HRozarw0D=vLak+&_N* z)iYFSSLEdL>Z|XgcZI?&N&C(wXtCHk(xlFN>yw%5j0nfu8RxsM2XDWK!N%mcTk12p zkiCVO&pV7PQd8~OEZQ?UOz1~bU(GTEOz;l(6~3ew$~1$u7wUuMR5%LzC7jmwmY=!h zxfAoX8Y88Tw7iVnTbMrRbx=v%w3k zI&MiKtjShV08@o3GqNi_4ir&hj=;&UN8|Y^ye1m0cojLP46c(UstA@FOuW2m?gjHn z8fOT*Hms$`ZVrV=q}y_JhRpX{8ma5)RBG1isCnCa}udZmql82#ZSjJOwsT`Vz$;>F~-Ovhb1=UAy=FK^l zxfnAMmA0kjW~@ge=i)yK5}aMLm@cV_qTf^nv*prPs<;}O4WnXig^u7%ZOdE=M9Jf- zJ>S9TST;>HC1CRQ6iRQ8=B%bV+m=PLtWtF{yQI+NtdW`?_rBAQFIH776%=FxwUhkv zwC3Kmz2WtqZ_s-&JnxiLp=Bz)cnx+-Y2>Zn6MWk%{4(qT=00!e=8jRWYi*i*kB`cuFgFwKt=l(&qy+&MQ^zVky+nnNq-WkctWxvb&RhqrE$sK^YG;Ed z*+h^>n$9K^UA4@)+`EZ^GDf;URqx=o`8fB8!{k~ zP+&=?Ap>@g3qoSA18dxj&oBO(HONLobjB8BF(rK z#H)};5#CA9E970de2z;D>s!VsQXQ^XRaHnH=?i`_@05d97o8teqbI%%Y9w@-HT( z5Qy|nwE||KpNazkVwlS?;^AGd3_T+H+GIE?T$7AQX7`7Fn1IuGYa@lZTDSIR(+Mtz zl0T#wvBk2ons|r~zlop~M=;<}#Br4*%R=@eqlbZS$tJyRZ8Y5upf;d5vL@bGns3}y zQ_0*darZvIRjJ3G@xYp28t7F?M^u%@#1pO-W%O;|pnp#@loq^W&&H~~1UvG1P-Bv^ zx#=w>T>oRtM7r17eG(O4K%bd}x|Tpe1Ag|^_$S+8x!yWCZ^T3cIs*xtQS!Hdz_~k% zHm@eM>0^hxyTezL`w4-822zeF?_2kzC~FI`bN5;fC*{^tVvj?`Y>Q^6(kphqlk8*8 zeD*dqx4ffRD9t-1Q!AteHIS8Eo$%$|>U3+SJ_a~n>U`^4VYk}xu6pzCjrs}>`2kxq zZsA~wO{ zM|ugXay+HM7R3R2GgGaE}c`WF@(JX?I=Y~?w*+%V&a z(XpLxYg!R5t^LjG*X-GguCOeR7mNw4%22oRhLf|`+_wHl1Q;Ue+w{Q>A%`8V~f!>$+zzj`)x>#&Q zmJ!0r2tC@{ecI>m(QeF3N?q9?uQB*-r zKELJeJ27x%IDntUw0h)BE^E4Qt}a6*NJi-I5?+&C6gQ%l1C-86$4$KuC2%l6Ib8Hk zyFZ<@&vP$rXVddgq{~ zTvn6dmo=?OTQDPzVAbB#$p_PZyc4rAy)?~OJ{GHfo-JZQd=|^3uQ8#H$E@OOrx}7* z0^~120f)9RYYQu3_G1BuTdB3-=@RlsR7-rim$uA5r?;S3`esyEq-fu!aV(@WT;Z#~ zE9heDT2fNAB=x8Wh+F?2LmmEt^v(%YNSRt^!xmDiR(m`lL9KWClEw8i+}!yZKaKKj zq}(3qLBfOiY|`0Lxv6@i4PHY2P>iQWgW;LPuLNS6+c-#V$EC1y-y4{IBLdzi06xry zzy5N>8Ze}lsKdp94?@juRTsoiGejmA+_WpQw`1p1cA-HuTN2YOGmSnvmth{TQ+YXCm4?8n1v1{bg8;#`+ z1=Cx!2j-plbTWQ3mX&r9{FBfPlqR>syb_ybtb#VNh>9!=@%lpS?^=&&c>d;kR;IXc zUopZy_Vw`J;-p#0inruHLlV=MN+Md$JuUg6p!4#+dL6|SsacuvI6u&NwQA#qK!+Q4 z_qy9BDd(C5rxCjzP0DQkQvx|ID*cKISP7#|3L6tl3Z-8Qqcz*)p^z0RJd(tOAm$3Os+fwxV*+pZZJw4K*m3w^x0YeqloNDbwKQRE92089Poz)XR{ zK+UTIqOFpeN5Ask6PMNAjkiJPdxEie&aXtUu!zC7a-iB}%QX8s94q}a`-6@eplG{t z#%GdcY3%#x^$9^RU3BPBMyAA}okUl>h4}eqwwdVC8M0Rr*9+>83@ICej77Tlg{==b ziRC>7s-sdoz&_hjHDMD3DB_3AEA2mtnjmD8M=9oHA{=qg{bJH~Wm-3MW#=>oMvf z&06fjMx)in)DC4~(#&c^0zqm6;s&PX19N~ym>~^uE0gMU;!DamI&nG}`YA}wI{~jm z!3i(|=14kV?AE+Cw59W_Uplp+gs+b8k(7*c3+`Q-f3Du*@wy|Z2Ada~Z__941yb(c zz1L5V`x(-9V4YQKyux{+b7wbfHza(Eck!r?dLfOby)Ph;@(bi%d3tPzC}I?+U=KgY zqx;$tO)Xw$%X4eYyWn%Pm%9J8zxC=0QRQluXaj-LNu>0zv>z=I6RIq|JHn(1N1-)* zS_8O(s3V~&I;zIYb#2=3#eW9pD0nIwM*Ote9uyM1 z+h#1DXYt;Nl5K9o%b8yOUScOv>XVdFZp#YCC!{5bSJLm(8_^r4J@&crIDYP4s$*bq(epxKA<%09!bL{R#4Rma6Aei`QMw#9Mbb zM_;N%U6*)oY=1(QMsTTEBJ3N5z>dO^H^N}vc@!i<|pW1Ms8hkpqsVMBKKP!71X>RH1*Ml{`V;jgN43rxXPtJiX9j*wA zEFnF2(yMIipnWkV)!_wPeCK;Pjt)!M`n6h7W%|?!OMx+*t6J}RanlrM z#E!{ewl1W1w$^yIUXHq2&fK$AdsXWw+~2-*XuqKw=^d!CQU4}per8MXlV-kl-0--Y zm+nm}sIV>COHkqbV!``H-n{HGGb0bgFp_baHj}Y2z+6z10TN!DNwTI1by57z;*M~! zmb_xLlxe7YuVh;3bW+wl+caPcPPfw)STtOOK6qc!cn6v*b{~(1ua*QG5b_LnIh$g{pNct?SU-43>~hxk0)k$gnU-EX0N6KgOHd}$g+ji> z6NX(Ths5at3NIzHGVx9j?|o+GD~kI9`y+ZxI1M&ADBF+(`m+Z+ndP0tplOBnMbwZt zWqwxyfb9u`3PyjLjPV6Z)H9SYao8=4_RxbI2z~bEDCx;=K9xGvjaA@Oh)FH(bdA8> zW)Esm5)aAdfRvVLBRQ=TJsTyF?e>|O1F$|wQYYwhB2DPH*}Ji9jH-?+HW^iP55GRc zvD{0;2$(8ex=#M1%)V=r*!4UyKoC|bX=?DtO5$kcU+a10-H-QPGzo(3c-6SGKOG?g zghO1M6}n%sFv_$e-lX+sJ|u-te|Py>r@Z!F1c~FFIO7nq=tH%Alhs6*umFlVH}iXo zS*-enu4r;#d8J5gT)(h@SUY~D@-&qePaV#Y$5cd-r1hklcH$`zE4>9%zSt3VH#B0U zvLB>0xR#Ice%F)XSi!ESiq~)WNX@{*4ST=$v!Yez*USeO0;9t$hPW+Iq+q5>F1nj& zAAI%)%CdIiHuI75imjCT%N(;yuC%htt~QAoL&Y$BjS!(uhCp-7q_$VWIzq+LT=IY6 zT8{%WtMm*A6r1*al4k%fjCDGRHhZ}fDg4!*zEw$Jb31{f2`Zlj)7#lOMHN+FK1GuF zQ7z9G-uU@56$l(TWV%cb$fT2V3bCy?N4V47Mq`uJ>q8o|%0aN!#I*54Lru&BEyr|{1$l=TL-k&+!44Th*z@*yY0eWl@po{ZrpzsBUUOyP>lr0cxqV$olv z$9)^`?c*#rCHY#h<;!`Kl6yIWpW2yezA;4?qP3y2 znp*}Gnop$@NDkRlmHF{B@)8V&PNHnS3x4}?ldw~7M)S}ANJnG{dY-lg}Xukb<^&yXID%z@`VD(CsR5NJ@AnCQ$pvNC z&aC9i%Ky3^QsC^~=$7(uEs+t#NNd6*nyKtNRo@s7`Pe@=H4-mOsEMp<;ez;h;n!M! zsiYrHNaHf_ybUshc;`{vJ&nbPd{CS5akHQ(nr{Fkidv$0S5G_D@Rl(NZ(9aT`FqNs zI!y?xjdPq^AYO+2XA-O4=-Ixq8`D=@h!SK_Tk0Y=n|0(9`%p*DrZV+RAMG3ggjYT} z?Sq(#9OJF>>9*=q_<=%a?v z!QkNbSgA#^1=YJx3j|USbD49YH3yD)v_Ho^2W^3vzBr^VE?GP~EoTW;bbeKNA-r}L$@Zwuk-4Opl9M*KvG zokslCo;dS~Hpfw*c4LR>`)TYGq>w4~W`eQjB}Qra(xj_+WSi16XY0Lw$uZ&+GL27H z^mp5GO{yucln%-wqno}zuNP(1=0jK1cZ&18p|cw#Qn24=tN>>eNgeN2l@9FHk6@4$ z)}S~EQecya0Wf3gy^~KxpAOYv)P?M>4=+|9$=0G?iAHJ>li{sh5F{Rj6&p{$X}G7 z8xE!XvFV&Lj3+#xX~Mqft>!W(TC&mzab3`38$(DXJ1t*5IbT zhH~8c1ETqqgvvrnGbBrQ-1J@NQH)P384(Z)76O08ZM( z6)!#f_I*x@gySPiRM9E$<-`KE>!qrA(MBRr6Fac_Y*svoE4B`M`O81N^|qBeG|P%x zp8Rfsa@eA!uUbmR#YmIna!N?F7`igPo-mJ6tnw5zB z>gH5``6{@+w4lgUd5ff5H&V5vqL+^0P_*gNh*8ewyH#o8^679=Bs>d0M*-^3a~)=8 z`ojrqzLXa3fST(YzJ=ua2!{(2zU;kvT_C2}ptQo=8#ZI6!x-r6g}uM3zw-m@S9OI9 zmQC@hcT?6OlU z`?2vEysGqBy9`A5ZK&fcb#^f?a&Bt>A+gBBs!gaBJHvB_XYXGV30+3eW~#?= zcGtkwfB5igE7CT@Ah?$w;1#ny|JC}}YH`NFxj${QI*YLqSN*^@So-#5ME;3Ch9(RX zw(cq-D!_pC&s+tN!&HxQY2)N`4?XLVEr=;Epq~8Er-l_k7Kyj|t};FD-n_SkF*W?W zMgYT2KZ*P==0#(Kp$EYYCXWBx=?zGF#@$Hx3sLXlFIM`)O}{_kqk2otyrJ;Ndr8}l*INSrW8 zI80PgFgU?zSS}1jutX_X${nHf?Cj0^?d*#8%qP?PbicA=emSe-ZrL--nGS$co*xqf zK%oKn3+NkJf?4Z`k(uhOY63v8{2&bYV|Vbda*fT+VO|GonFGy;cZ2$ps@~Mk-ZX*S z+(K(0e$=0i_dXrsg>HHRvX|C`e(D4CJU(&w8maw6)8VHl#w3ISpmhaxxYF;Vxvca| zuW&Zbb3)BS0AxjoVmm;x*Doiqf1O6&=fDdr0|U6d0ubkf!OxuU2;BkQ>%qWd_>Ujg ze_TPK**_uht-nRfHg)lSAvx=EGo@aEs5-*NLgEj5*UkQ_kaG+N*eD>|5Aw?bc+XF* z+?3vzlX@RRbtLx{v?;OFr(n#b*8!Uyy`zm+`A?ZS9<09A(M7Qbwpl|0AbY;&q;$I8 z#NfoS!L8^ghDX)C$a84FEN!FoEWq!AA6HVI=kddDrx@Pygy5Ujx2XL#A?Rv5Z=&Ew zu`nJ@wW{;E%+tZBzOwXsS;4tl>5EOsiOuk>a&tT1xUhHQWeT`-arBBinQOi^O2a@` zKUW~=UVfynUQac>S1}75!(Vx&C+T+C1MxW%V{Bagg+Lb+sJ?t^CzR(;F$vP@haN~g z2L}$ux4FWj$`sAA*k(FR&Dla&C=OEiWI>cz6CW9<-{2=QS+ z_h4KR)Xlm{)JpR{009bKxGwK5cW$!qry_Dmt(kHCFg}+~8efOOO?9i9P&u#4H64O$ zpff;WsQa#KHtNU7i-S7j$c8Y2@xcQCnKzlzu@a*St&rW0V#sS<(ck~p0 z`#zbsJK!qf(Gnl08DU@ps0MaL)VWx5Uh32_KPXLfgfMJ{e}pth>?>LCc^CC->WJ@M zQ~w#?;Ju~{jcCPrdeDmQE2nS6>xsxd-`-EK2j+*S&)+|81aWPOohvqjRD!hIA#1_9 z_8>H!Z-}R?Mg{V9sM|VTi9O%HPQ_NnWG3po*>=SHFcH{Ru|ZvN<1{u9I(-$&{n@$B ztKY8i^V{)|CCUzMOS-CxhA4<%#g+H|{k?vNw~f#!&)ZM&pAQw(umFK4e06#UcqJ2J zeeYlRsfe{~OGB%}sE4 zg1S>M&`Odv$SCJPd_;wEwmq@*6) zR{|X2Knw!bP)MY}K0^e(NLAcc3pw zc}j6H5|h?Xs#c?BQo$$-JeDRa$G?TTMQH?R23Zf>>XTK6v*Sd;kwqyEs&9%n(`zA} zf}Vb;gkFeF|MAkNxaEFc>*n3g#0!NVJ<+2($hP%#F>}uXqX8oTqYMKJgAhg@<`jk$ z<`IS$MiIsWiYt(kEmKW;|0xRD0Xi|zA3mYyHxJ*E@JDkc(!1jaIEAx2Haj|kBSp9u2^Hq186 z{ZwG;PU>+gJku@%j}ew>aB5IWWC~cyOzIgu0z*};U5#0dTWx-|Qtfpeb)7@CMXg6| zVXaCHd~IdzMeRY&L!EK;TyXSw)Pk|VScrC&keH-j*V6foeXC5ul?j1>Fusd9!w?+FZ#AmOov>BW(%;T zwZ%T;#wW`n)Mej5?X>A^(EibO?9zW$y38^xyQ#W&w>7b4`zs55^AkNXJEa^UJ&`zJ zF*T6bO{ZGOPF-B>rvNZtE^;!0zN|XduHG)_9Q#vlXz)+MSfW@E`SRl0;x03PvnmV; zDS=TjYT0rz9MVeiQz{*T^(O2(O=H)|)7-UQBqbybq)$kwNM|Ieq|&6@q?ROI;?Uw8 zlE&iB;xBP3G4L_8u|70HbcWQ(bc%FW+A31>+N`oJ{88jZbgojBDyQk~&Fw|cqz}n3 zA7M;j$WS#LfrVDkUBydWR#1lZJ~DvWoST>XoBPn2XDcDNBP(*5zMH zKj-XKxoBIPM4#hL@Q`|bo^>sWpRFp6KHFV8YQ4K$w}<0}ixZ6<@RatEMi^5VGpCWK z;j6&b(5)|M7+St-NOo3oHgHOF*4GAGQ(Cj(@!;X=2=_32Y`gBbp}O9OL-`nm!HjM2 zQTij($B@Y8h}Fp9$i2wK2*ZdKsbVQBDcZDlHdH3k^!fCh(TFj|vGo+#G|1EiH2^Ge zxKy~jr2dFd5<+>kN|K7Z3Vyk9d9;S>BK;yxeTz+AMI*of9Ak?f9I;t zlEA{R@4d}=35F5dOk^%(xH zwYnDGl}885MooOLiaHb7u=NS{3Gx|vMSER%W&^_kEd{;suk?2oxE6R7u=TL;96%_+8|Gz}WhG1w7R-4Pvh2l(Rn4fhi#IvT$j}F(D z)`zf%Fo!pmih zAvcX9=iT)lW);)gLD)g^xWUAS4BX+#pX6L(uC$MfClbUxxK0 z9uno`emAhP7rqEVdWwrG8&(~WT#}rTV5@}C#-^*IwWLy|r0W`Y=ji}d{-nd;*}E%x zCcT#4kcgD1TAETeZR4-`tBH5rrGv+5a7SiDd$%p|4*YC)!?Za|15Q~?d1B#yHe!Ku z?sg7yp<%XcZo`-0VN(N4S7=?RFytUIaG-BstPoh(zZbLDBDNm=Sp&YY-NxAp*R0Os z$cD|__fqZ6-ZDf*YzFy;3{F-#Yu=gCb)H+%OJ%KK()q3B$JNtX*xDSA1W!W;Wyg)T z=Ofk=%Oifrr1ynex7p@W_JiI=%-ZCLe)6x2WIQ@k9fA2TW`G&vZn(r`~hV%PeQ?d^eHL)>ls@_*Q(CG(YL;sWoV$Xdq}SXo_Bqp41z2 z+ox*|t2sN+TB0;3))3c<-;6Jh#!NQ%OrX9=E&HJQ#fDv8f^I&s077K|^aSoW{vKp+ zkJ@@>R40ILytqK#oaIgMdN@&JstCiuI)!t@S&Ffe=WnJZM`n9K5+Dy|7$#Xtzgn(Z z`l2~I4!dNt&-w8$=^f8;Su#BuHw|Da6k;Os6%t=po1%fbgz&P&XPHJp8##~y^1OOo z4&?%kn>D=L)=M4LNR+V^4O^%4)-aFLM^iWpxWNcQWDrF#CcZ1DaL**V3ZG^AmEk4z zMM%!?F6B-hd(So-?IV}34FTPW{05Hkb_PsXe&<5Fa>f7~2T)@KJ6YY*fS(r#@h@M}$K87DFO?#rEe5c<9V;`eZUB#u?OK zI@71WFDJ4(9iAKyFxobCkXn{yfPJ_wP#%pF& zVAkQ*td)b&HKatQTIf7g_~cG}w3jj5S!&yKaQbttp#|OJ&C1zb+WRE0b^dV=5&`R! zuiS##mePjSEnzKg*`t1}YHhE^0Mc@BIR2uDCnGwm!G*_TeNuZaeg%8EXh!zf_kzf_ z+fq&+iiw$9>y~jVaSzHRAM6`l5Zg>1rlEx)vaM`%S){Lr>ixP0Xl8L*eY#fa@MHCp z&`kZ!_0H%w^^&EbXydG}dpmn*e5&5|ZxVMmukE|fA+};&bzZ*D#NLEvvGqGaeN#IY zPr*BTd$*5QLQQf$)zsVposr*qIk#pnkWhYr>kn%yO$#eqP+_?)tF;O~iUzBH)-W+f zL$YLC;kjjyk)Y)`_2#y3SG`Yj>Dbdn$w%4xq=LwR;svV5zc6oW>^O^aSsY=c*&hjq6_g zN#SAq;pTAxG68ZAq5yI*BnF`+hDFF-aN);?URzNTkyhOK@VF>6_8-m(G{F=}w-~pJ z3ad}s85d2+waCP>$FhvPCFxdaf)YHLE;qB=Glr&}`E;4u<7(V@#E!g2j<;Il$ya=I zlR8R6u)}mTz)og)c2D=i;gwv~es5CQO4cM+tt09uuZ${#x5(%8s!|Nw)D0O?nX1I6 zxH&u)_lB376v#_BNc>DQYHJHK8!xAhT`Nl|O(W0M?dzzoWc{*q&YA9L>&91Fx!7ww z(~d=K6%$n(he7Lp>)CTLD|YuIeX3+)20ES{Q<%p18Q5i>o(`)Q8GWA5PYB2r@eqn3 zyk;_r(g>p{6L?R+ry;}feaFq@m@2|6!i}FB#6H3_!nBXHnA9`W8!shyS4&}6!~1jV z&ZUvDCpYMj)-ll6NW0L6zntpXTOpAn6Mu|vRc?8c=aZ{IWh4YC#R^+msAE~AkuIw` zja~14OpRcWS|6RKJ}l3*lCnBFX2N;-9;ubM+RyFY&f%rrHvJF}P6Yl%(Bnt@4?dAr zVc34bemK{wFWDp}q#*^F;tL~M$zzG(@vh2p@_m%I1 z&l~J~(!QG;ICk7}WKg)XcwR)pR6F}QFtx}IrpY1dfi!{pqQ~Db!Xvmwo$vJ%ESVjz zf5wL8K{G_v5Ym69us$w9Wl7K@KmgR>0P1gm#ZtiRkc1y&F?h_|x2`+LK$1*1 zl6|8O-25m%iBk(H>_i#&dkG+h$Zh>ZClPr4oI~-Df|z5U=}|Z%cLGi(!C?e1a_Z&r zisQwT7NRwR9`u>mv1kKArrhc&IT5uZe1dWYQwT8CsnC8{7;IP5uzwuKa7DXAng~JY zUXb4;#~~jkSAjMOqzW{aa2#f&WKP6Zrjn0I5quW3mR$2Q;>4`?yG1-9$w1ad=-_=3 z`d}PvA*wu}^4YQwde07xS`xkt(UK;eW96eKW5~xra8tr$>?Za*+04QAB`>_Torjv| zDu`2^w7|7B$5685B>PZog|;A4u}ef#>jc-uC#7jwZht?zt-gDBgIb_UYaH{ z%=t8o^PC)7dvYkf;yeF^nQfATcH#qhQeO7XT+ zDr}vf0C~hb48KO(55V=ClBQ*|+*QH(^dNiPf<=y3>{Jr0a0Hag+ib zhJLbqyBuN5ry8`?ZE?*Ge$TY@edFC=W!qlkb7@QQVmk}`BlKfcy=`ebc-}hS0*;P{ zO&_b+Is+gMNE3I*5@+X@8~kq|vHQCOSrZ#45X_#~AU(Q40{y5b0VdrL;9u2ZKqUBa zfgp-{U7Tu(bOx}~Bn zVYcW$$OMUnMJI43>Xvz_=B*P~*fcDt5=+IG@8+Ep8)lc~l2uh%>FK#mlF#3VeF-^e z=LiYPEZ}n}GG6-LG}bs$+jTs-b3dl8uls%E3Tzd!SIcwteyxMMmgmgV*;Dqh{DI+V z1Y+@nD7f}V8~6ut6X-dFaHuXc28R^ZVQS8opCcGKiz$Z4gB{3b$XxM1;%X?!W+sc` zChYQ?O!`bYO^&{+wq++3sbnjVdq8MkHN=Ovyrd7M(OCzo(&+?D$m z$~UF1DlD_Es@xaXs`qKZ=P^4zCCa<}pnTQ(?KLS5nE{B{T8_Fk8Ktg*M zQi)P2Iy9*dVfKRAi+QY`wqDZ7^netY#RSfPggKH@pSn&jR%2J^RF|pmXt=P4xNWqf zwT-n~xyvz#{nNBnGA&CPKvhk2FAXd^M~H=Eu(0=EOGB!RvsZwaZeXM{^8Hyyg+Vbu z)le!|(ptQxFi3b-8mZA%&*YqXLkagi5*YC$MVAJtxvW029I?o;;>v-=*|E64dwIqG zl}Z@K9{XDEa`dk7oa0pNoDQBD_5&fy&L;io`TWmw@@QgR1Iex2D1`nhvRs+Xcr)2y z(vZTB`AZpTJn>#CjN8g44viY)O{cFcnSM*vfk~6zb+I%e6=m=3VEA+scEB8|{TMT49VXd;Q@GVaM zetdfaB5w}nhqIMNn;;=1 zOdvt@KtMbY$%n2CEk@C%Km$KMs?6OyEo~dAEHij0YfHL8MU*=R%_UNcA%Y>N#7&?T z2lC9u#BiSR+@&&cr;bXQe4g2N<;$X%ZMk_0f|p+0Otq%{saO7^$7iC5dk3oeEf^a0EFy^_zY-J|K$2EgaDiX92VN^D*(#x zpN}vP2vm>!TWAgcKbif(n-D;#CccUMyL^6vLuh`es7J8~h=22g@(1VWCi`8AAHNYI zIp-^K2ip~T?Pr~oDwY~b7K?=dkDKODh4Oh=Hc&64NUkp%(w@bx9$W4jWmY3_Pc4Mb$ zjeyp5N1;X79{LdYZaR=w@ZTBnBUP)^miSd90&59fXRKY}3TWZBZemF5Gx4(OOGnS` zAB<@qc0n!kW+cmvYE-TyefMN8Y0$1pbfa>hZ(&89{-wkoGDOwYP};VayV;Wbzz^D< zR2AuVR2|jS(UfoJgx1fNu7`35+$UZ!!cOjk!ua?Wx7o=}_zOoy_ zH?6B;+h=E@{UM#XgKsa;yR0plnK*n23XV-vJ6$}oT*$Nq40NkUiNQ^3=B0co_eeC0 zUMfN*3wxZcS2=ugMOdUxW>as#5`;U&r;(MZmMY$&8`6sewG&1UpfAGFlM#oQCa-to z>s61eQudx!$z%U@$RK_pFXm9+-MqACJ9K7FE z8;A`eNr<|yl`>0u_nc@^%U9j%(-=d3G*)wcKWhsFCskNik|Y$dKGo% za>|}5l9?J0(w(kkht1Goh<3@~l@9TJh?(Ej_?dRYLc`@143A$W$|Bu4h%V~rV>GlP z9=-n5jOHhqrd`f58`9+S<*nn0a9IsStY55gs)VV{{2}=YxbQjS+6&epY33@Lj%5DX z#>1I}hsPUYw}7Q6Gr56uB-NRDH?|G9l4tJ(9Pyomy;7Z=dhE)Rzl+MHC0(kQsMtRn zsmqWZ7*aO-n!(>g`umXx6$YJ`n5#Fx{C59R?>S+iao#VV+f35>s~-V45CKlv{%F|x za)EHL%Ee^9Eo}(An}azB|1Mq6dW2zIzE+T=B?3En)QF!~c*JS}E^T{sq?}#hoh4Pk zV{dQN0_62LYq2E%<^VkE`dHL-YdqDqfs@@ON4jSFBT;s;{)tPWXMW)zY7kA~dB`w9 z_Zrr_nj~tgJQCO#GC*-+3;oi(Z!XdW z%r=GEnQ=wv-Pyv3%4S)`Qpe}Z5znjb6G%;JKHDYMt1!|yfLjo)>l606Q9q~QsCig; zKvE^6En!em-$3cy+@9L$XtzP(87ZTRu@IN0ojXXa0BZB=)LFxiR$lP=Np@B8Zk1O9 zy8BD3v=@kcz$?Pk8P{B1GgCV7#T2iIOvgQ7T@LgWgw8935PSt61papiNl*)=M#G_0 z94JKVK8$cT*40(~VzymzQu1XEwj@kuMXC__?xiruNCBjQy-{VrIKXK|aU^*%>Yn}Z zC8W*VXyr<&S|7TC16h{T`AL2s3;preu>F42r!{eM#uQt zG>t`82AGnDjL7Y4sY0x9i+$xj;5<}mPTG4xs(R}3*84m~R(QaPqfy3O)jTo>dey^q9OM z7LNza*VK@(kULv}i#L;O*q9PYEKhLxg1^$i+`8U_l4je7%2veJ#i(Lw#Q{JUvoonX zz<#sEX^_`g-C9nbv6S5?Im6e!BpuWA38l8jWIlZOHcRy&cxur5Qyszi7k;A2!6Eu* zWKDC;Tk0YNKKebcq0pj(*hY$kix9Sh5618rb~~CLC#i4K`o@fS}z&gdEuK^7siPpt^B5CGbd+K>me@j<8F2@)^tfvtG^p|x zW?g+NPmpvZlPQRHL3=c|2ZH~;Oap%aY!5b@O@4u7vVy(6y&BC!QI=2cyCgY%eRU4o z*XIWfw~Z!Sd=~TRKt@K!div*nblu7QEML4B=RhUoQNgo)#bmk02Bh7kOvQ@_1I}2@ zY&1;@0$9Zw7y^e^kHKjCg}YLf z3JK@K*{RP%H%ALa2?;1Pa$J_K*Fbqr$GwkpdBr?Vx<_V`-0wY6qv>m9S% z0$_112iS$a^HsXC8(vqDFT*rVtAgg{=GV6?wlh^-uM!*Hceoea@7rclwH+N01JeVE z<|T!VRHpD9$tzsypsM@=T7&BO=84Hn+e^&D&d9ZND|#*$;ddQ}GJrxrKwSTTrJEdz zCp)3)0_KJTGd)jph{d8NCFwehx@aL?SM4DiEarxd+c23-7RQnc#&Si%fi;78dB!72 zQ>wb&q{%l$Og&yl^A!cB(pkJ2ryX}Xtw)t@$6wF)#?te|q1NAMI_fyS*>v6^rQJQo zDfl66MNLgN@#QHOrB4;tAo@x6^g#c9EZqe3`omB)0C?Y5{a zGg_iiIA2bd8}b%(eKT&4mxQ39p?4~^6%^uZ*vuvg4%gb)I_|f^`#vvGo7K4_2d#I# zoY=PFqt&V-x>A>)7z;c_`#$Gw)6hW4erJ#Mc{*ugyrc9WBuUew!YxEFu`Ut~ILlL+ zbD7ktB}Z+kNw^;t4ng<&5>hebG?!vY(d*KY3oGLJug!!2RVizjWmHce4tlYD5F73Z zfIN79xYE$GUaBRn(Hr?ZMh)(Wf4XdsPYrZASypZ`)K3D_%iwaAfo-??FNz33ex!V$X-&4J}=>j-O_4uFwRP_v1H2t?CD{W!$3UtgaM^w?Rp{ zkPRuXWV36#T9lHRfwAI;Snc}FZ8)cc;_(8s7R!Z7h5fzwcU7dz+JF50{4W3X7vhWb40qI=Vkk%>J4@Y(J7v;LU7^>Y1}g?L}#O=FNo zg#rmD;3Of9xFcUVp^FM56H~(mUAll{%jGatiR1ow-;aj`3dK;x&lfFSlH!4z`Dm(B zYfU>P4dg7gzf?RdZ%q!JUf^b#S^{~M1?@7%U(AixvVaHXRA$S2NlvnD7ProANKR|e zgSP|h|4Q=6KX?0AWxDTSS_f+>DC8=Bc0QO6ny=KBJPt_D%#@K#qMkAth~~b8at2Nc zq>FStUydlXI2c!8qH&j_y3C*c3c*jJyp{bXKDKOh8j{E3IQ$tcY$<<9o_;@gt@$<*vqk zg?wIc6bX&_?E!@~+blHWBkg6qRay5kk<8pg8wm8G0xH6vxZr zM<8?ob3^&d$wrqi_~?2Qg&@>zB({eCZ;_6F7E#Zdx`;tST6)f>UA~T<3>G?4wmSaX zCerUN!;jw#YPMXRi5VLoWHvZ9HuywHFHg@DEtZ-cQ{7Q(i!Fp z8G61>04{b9)_46ofgd!ArX=uyCu=5tsS7a?Whq+?71_bG0&fS~!|4?PBEayO2X036v= z(!5>h;Fd4ZaH%-AMY=eQD5$GE2rDul2x(R=DXI+A)URO{2%yUj@U6s$=ttdLP-SH+RyRH+k*M9$I z_A4sBRofsy=X#&)G&^Zr?BmjVHYya12NvMx3uVRU8p>(o_T`QJ)~%2{`h%5s`aLql z|5!4C2Q)Rui)Kr>=gBCoHKTT;0pXbEp1MXSRaE8uHZk+wz+=7pihJ$nT0D+q-pBo; z`l036Qx7+s$kUCusB*7#G<-A3g^l^F!z8s5i>5N*@~Jb0<)L9caKWJUA~WP5mQP1I z5&oIqmX24=r5RDz9}#B@((ji*b}o1)M@JAVTIM_-HDdY*`qI(O%@AZ4i|zo!{}?QP z!Y8*oCzS^(%JCvx_mGjaLXrJs})c%I`j#ArO>3U8APO6RUV98Pg&Slr^t^oqX4SDw0jVi`6Q8B6r@ zGB9;Xy%6*U*_+Jq0mZ-vrxe%IeKq$)a7@G;(f$ zga2P7oEy@87H2ux{(aQ}U5Soi&K=hh^>FD*5URXTTO-yuN4`OYhDzG^S@zoG0%AiK zzQ(vfkg{6sr$XPhN?yV;Mg3KxnE5RVYq|<4Zxj-8#ehA=3#_yq2G2(+2yidL80Yo# zmd(Z)S*tqNnp>-+dC-v{vb*tQp??(VgfabJOK66*<$VhfzepBq6lNCiYuD~f2h*_r zQqIWaaSAP$*tKk^EHWkYmR5d@%tNr)&Z*$LRuGTDXFzB`3aZXaPmUy10CzUxm<7b;j#!~%pYU%Us$Lg|F;kaMGP7+LsxizZ`8zurP-7`^E5w9Q!HixZJl`yQ^BVr_3)HD z35$QMDriSqi-NJdKKuEmSYwLe@mJ*CLH_hB8j8*&XV#lF7w*`|jK;4+7Hq`|%9i@0 zbT)H+eb9c?+CW^LcTeiZgE0C5yq!#MY{B}pcuB*z)A5=N7wYI?8~ggf+L^B4%h-p* zZOuCxff#CO(6_(qoBU3n%;FMcG*?zn6cAsT4e7Ph*M|;rX%ZxQv!02KH5#WA#LkK! zvtDCrk>TJfSxR^!%`}62GH)@~p1QltJybZ2`w}NJ*-U1jcj>+_6=LCPFn_~KcaNdP zT~C5h?UX@Se^Py%IPRD&wZ}oV_R31ibmg@ANvly_8ffLj2j6bFKx?ty-h%HbtV)3R z3ck40a@5##P`cB~%wUMou+FHST1zqo8Tm9_;fX#8te?Nu*c0!F(QNGeQs;0x*{qRq zz@d!(kHh^4g`*6jn4GaJBdJ^u-0pXut@3bez`F;gN*ker1l2a{YaeF{VkW9=S}oWFyiR4&Mm^;r6(%kWKFe83T(P) z-rw4P52yUpNPdVo;j*}vQCOfjz%WS@2cD<2({k?`c%Q^LIaXnx!F*=zGr1m!E`3iS zoPW}OL=+G8XEFFjfS3qAzF6Pg&R*9&@_DMFY&LqB#Zdkqp+-*rJ$zD0Af5;i2!p%q z;LZ~xRL$4Veq1EM5~!9S{0-zULU-dVM}a~TAgY7k?q|&+j~83rgNe-_Gid$;T0_CT zXQ(;9qT~g#e}KcIU+JLAg z0!06t_iLRip1KO3Ogw@QECvB$EI1+t6%ju6XE;#d@T(}ZkI70C0QkRW4u$1^vmdef z{Svd@pCM)t{4zYueCRa5iJL9!vB^xhp-knr*_v7@5D^Df7Sl3<6BP1F7Q-@3GNQFX zU*NB8jTO*S8OM91XDq~Gs-vc)$RW(~61q~cC8VTgoVS+8$AmMV$mlBiOrdNT9Ml%{ z17{f*cNNT^p46^*vp#dlj_%pNMW9!hIPSXBE6+QH7GVbT&(c4XA?Q|IzoM2WD^-Lk zmZqX1$bBus-b_2bZwmo{3AF5S$=NThlQaA_lLK8-POBU(O%Gkq4io%GYIozRkcS7l zYQ_E>Gd3xvpxwMXac#Y7q&()|yM+Q`KV*PGHE~MRY!WnB)n8V|Iy3CNq9wa`{{<_* zzAyc>vr`I}YIj<7g7?b``lBhJ7mY~P{ik!R7wG!|HBMav%U0_aG+Cd|q z?>(P~;SejOlIa|lOP;)MOGqnDrVK)tQ=pCK3E?5mMC~1+$6L-m@Oh1HGV9PbTQ$ow zgf;8^<}y;TP4o(Is*P)UYyVXSTa#CC+lKLx=W5%?!@!1;=Vc={;cDCSx(gv`ro;eN z*>)kEma|}<)r$M2#5*oiNaWT3b2HKPBpSG&@TQY}CSHpN=rfn#H55JdpAp@Uh+v5l zKzt2eXuG0#O2TTpAym%b@~*jwLfzz7adYBpvy!bjs@_te)y|36siq+TQA<& zOj2hhP-~DZeqZjj7UjJw`?V;hsHx$sWbqBy9iYUEv#wh~Q|1{K<} z(fV8!=a7M$FvP;|-cbt_$#3gDS|-G8YDaB8ejC6ae6MVM@Mi*p=nVd$2+JI6 zVTXWSg`Io#NrQ#P7c25mVXd^Xi%+A$O~Z!`a$d)6FUirAd@%Rf1({M_Vc}J2U|F^a!~0MOQJ=#}N3AkIXM;88otK6TlgWmEz_* zWu*;PudM+%4a9xLk(yp7g~(r3poELMBvS}b;NJj{*_#f^5=<`#V|;LN7Kew}umSkE z%bD{->Ckz!f1VkBwNLDZ;gSl9)_qOyDM!BbFiU-}KFbuw_t8b0lLVf+CpU)pCmjVs zMzk_U>&|+t#PTe`Gb?t97E_%{zS;Vt_0;^&*0 zvoUe+7Yzs0JVA)J1~ugpJ$(HsuZ2qO*>?A<>J-?|AfWz4-96+A3w%XwO)j3pMf%ib z_*HVAx*v>@Pb*i}3-^z1vwfLc=96hBKm6(C2|hx#yPRWtY<4rI%~m6g zoX*0_4x&_bb97D<*NnLw9U$EQuU`qJ3TkQ1%hp@Q0=GcHnXw@GlngxDjHhehlT*nq?6I&hM@6|G zdEVGL+Ll?mLe%$c?aQ0n>d)k`#8SE1kjtu5ow+It{J{pms4{h8nAyni()D>-f5OtV z1Xp7gf#ry1aTz|i-YLEyy&^w|KNMm0p;S`l!EVWzS0;m)!)mG5SB|(mzPHWRG(<~( z)4gL>n=+_4Ng3-uV5he7p!1)o!k_+g^?DLgNkbU?SkPGR7jqwVE-TtCF00Pq?P>7f z5DaaO?-`>2v5?YzZfDP=B)O z`%xPnagj?fS1E}Z@0|n;t``5y8|^Od(vVHFc`dXIX0Nmx=-0pUolrB-y)%++3+MSt z*>-{p+$dl_gxw`)MS=pn-NQTDdimyy14Xh>B>AW%B4mTPrwWoH_THQ6#wsE}tOQ@g zSCfjMEx~KgR3;*Z~60|KY3Rj_- zE*1>CBHe!Sp*2jAh~FmjoTFE_%`ViJp<)>+)JB+912$2=qd4Q#O&F*^Qh&JzFk3}= zf~D_EE7}rh%lLH-F52ii(N~SBj*JpGvrg!FG z2$+%)@Sro+5B^UDD%8cZ{mMt?1m(YO@_*u+*FIFZO$Q%|^KN?{=~~!MvsEgDY+n%?B(*?VKnU3h)TSt) z?vnvsPK!-Rvyy}DD~BWGTV;-|&0d5`kk<5YTW&V3%6oF71*5UqOC%c8Tr?cUj9`>A zSm=M11%52boG)u2w+g!m8KAgKYza3|&T38sY4T4sijdwV6QOyy`w0{N=E?OFy z0YKj`gOnzZ_xy-2Ksz!QHkS$S6Or;7&4{Q@6{ZtTFk?c)`EPjUS3->OQ`fwd73ue& zY=v6C#&#Q!!W3lcD z3(>Pjha?xC8}+QM9O=S;XF|g8AVV*jOoU6j`TYjcn$<1ks9bDDGrr>US_a{q@O}jq zy9v(tV)G_c9F5n8Ixdb^i#^D-iGon@m>wTLf@UHwX4#q1P&WZp}=Q zyQ)rih}?ufDZ^+>-z&*4MlVVc4j)+b!XNz0gnxbEXB}Kot-#Ji{#d78BWJmOZCjxr zLt8@uB3i`UG@&v&(tkPO0|p5~{N8WVGn-<7x{vk~He)5rziV*4;JF~N$P*4sDjp|7 zNoKpnYM2=+Xn@TkToOpa-MxJ5_5Ra2fFOo4gCe~d)x&%&MFmj)1iq)!K;vr}a}(Vt za2joQm*f{Lcu>jrCZXO#t}Eg>EXW0NNtT+ubajzDwsR+E6jX(jlNNMcB^sGP&En{w zdMvKdlDvxwL^)i)JE!0iw(+9}U$a3%5DOebK zMgV_5v>TC%YwJ}n>At6#I!AIwpoFW|sJ zHe^D$p|3R;zISwnqgj>g8p#;Mn*1I^4zeO4)94NiWxGrh%|l^%KfMP+WL9QSkT*m` zYpSP7Ue9M!tb(tkPHVR=CE6Qc5W$T~&t_Dd4uv%M+U}smDLbLxqKbOTNyw9y7W(bS zUFvLZIb0MM$-|p=vOLP7GtODyKr~Gg4kbN&b&p#J@f)62L;2SM$AKxQg zqTb<(sN13}zfa-igv_8rY_67!g z20I2po~QIBpimIY2eJ~}*uo`3&w1a@P|gCx7}u&ym* zZQw7Xp#%+=wV6655PO&Uh*^_chOn!LbgG=Da!07(!1~5^aR* zVST?8uktD6{c7QxoiH82CbIuf5uCoO_+vkFsSO>LVo}|x>C}+*?W@b81GuD;=~E;C zFI11Rkb^rx_^Z?VWYoD-XA-h8E2l|n=&GS+sR+Q^euN(VpEHmb)~gxAzc0lKKJUX` zh@S6Ksycs<7lN(Rmj+)0ogDO8?-TL{>KvGZT-c;*+L0x>U7hW#w%)H1{3lHV$%0Bz zMEvG%*QK;wz&7z2$8nGdN+QrC>%&!TEO--A7M=|)SHkI6^>5J8Dc8-|x0kafkBwAg zmt10z^`Kfqqe5&y{_I2{8iT9-bh;gX>sTh=|MA+BW&joX?F@*rEz*PR`V2-M z8$0H{3);|40og_Rkm~wlQn;x~VS@KNcwOB0{#)!qFos{B{Fj`{Msv8v7=1sv@p_g` zeV_|uJhlV*j|mYxLQ!)Vx8I2tCBoiot{Z>43O+;utvzS&zVq`3zV~m7f|%9|i{99RGTpBnH5}{nMS52ToaU~j0&{Uxa=vt>+7neytM%SX2b-`Di;ljH>q!cy#x$-46e@wL)KkhVSJTP zOI8EikKGDS6QkePJaFphsg^A+P+eF*y)Psl&(wJ*?fOKjuDlD1Jj;09n)NEC@&3OV z^dDR+`8pE$stsFel5*hCm^25U*1t>Yjn4GDuQL;?o=WOCBY`EVW-w^2^=*x%laACS zZ$Q{C=T^FcgA#$;J^+O@yqPmTY&h|}R1OGeT(1*xTBju^-p+8H(G~Epa88EmcNFjC zj9s6Gq&r1!UG+N=HFYathY%oUfcKq=Nd+4vzF6m6qf>V2G0@%aB9vV3A1glFi~LMN z$A%rm$(QuOk**dYruk9VVmrSc=11LAKYE87y=I<7N-!U~jE<}W6}4jr|DP-qF%rD* zW>DP)!Uz9~D`le&6*{J1W23PcY<9Q$|ZvnW6jb|D)?2 zALER+c;O~Z8Z~Kb+i9%EYHYJ{8r!y;#+lgG#I|j-nb`K7o}Tl*_sjhc_MX{$?X`Y- zu3ERiw@E$SZ!_3n0dzJRlQ-}YMsZLau~@!!MXFa@&~&}h$h-VsB=-0?CSLR z$61=~3$+z7%yv>suYd)3CJ$d+J^`b|BNS+;+*m)>P9i;%#-s5YLvHgVs2tSUAKqji z-iIQs&Bqs zV+7ey4Ym`YbugWDt;wbR$~loLE^xXV+C=Ue&ei}YiC*9AZe#9iTEi?rBUP)j&|EG3 z?kh;QZB$#XBum1o=3NKRhj(g436eLTJ)X*p5;nQwf2QsyDaDs30qX|u-Y@#%AZrIQ zas%B%kkgS}qmnvaIVwgf<$FV2)%kM$z|j0J>Jh^vD?NfvuDSu$)42%!@)RV%@#D2d z&}m3G`fAtV^}{#^zzZqrR;eGsA5Uw76^b8|JfXt5H4 zzlV%p1sG7LZ2jqU8h9`MnS0K7lD z6>6F*;a>)54g^{MZK;qEp?skt^T1kw@j>H7d12V3sp*IP7Hgtff`=2&E}~H`BMcQP2;^ zQ0$s)>g06GL0dW&ci_2EAD*8bP~PMEr#ODl(pT>3|uP9l)pI+ zf!C&NO@|KjKd?9}Sr#PB&;|_2vw)ko4U;#_{B{I^}fw)AyjCE@g(> z$&uWXRlAf<`QVI+M*t@d{>MlURsoCANsY`5&QrxjR!ASL0EPtNMI;LmIPH!Zn%{-= zYehjT{!Y5>TqAFfR8RK3D7Ag;ML%?ZPQOiVDmoWV2Z%VEFEOm&Mxo7h_CM01tKI53 zSptl$adk%$R)Eeqn6N<%5U(?aIm+#QI&jf1s0jorm%WH{0b@*?R|Ckv6HRVU^FIrX zH?eX>`3nw~8G3u{Opf|N+8i}I?%x%VUw?DX#}r^ezWdm9fF@(ASW&u2{sV{!)}d;6 zIYjEE(P7aHI>$O1p^l&b0<(F{X~nLrmVp%S)f&1rALt=CP0&{CKb!5pob8jI8=E;B zKBfu0_y=cs!6fkS@$kP&_?gIcCLb|8&5(V$Ex6vlrs;4%RjrMldIB-CZMdwxL;wzI zMz3h7I#Yg^s@2Sw2nP~1Z}MkFLAXwJ!Ys+#e}GgJ5=708%qlArd##vGbb4IYB@f6g z^T23lJdEoTSk(0e5&Rsc7`x>TOMJ|18AZ6F^>v>Ki#M#g!U87Wxry?jm9sf1v0rZ7aB0V^oOg zwOkRV%!o9cCJl_So?6Bvr$BAx2scT7cue9f*F7E!s1Fsh-6^}jdwYpzCgF-4O)_8R z>-$rOd*8k3-ZW|cHnHn;Db#|EOrinO(}e4?krqZ(ML77p783&?Mn;81)zo~^(UlEw z)e5Xxr9F)M_L|pTacb^xLvrPui7Fb&-h~H#KUsj`iC8`U0r3O1^Jjbjp~u>015n66 z8M6Nc#a1L-MV@4xMq)b{0kftY$Dz!e#Qv#$qeCG3)c=A;ekriDM_aV?H4bD+s%Rg$ld)6TbV^Y>NU9e|zwj{1cn|Y?i z+Yp=JxLmUT81b%_KSproNo!w0U$qRL!|@8`k3l;NlT^#}!;Y1r3qD$VQ8GE%_B=9-1zy6<<94Ih~^ zCK&n8-RMdumf3V7G!Tcl`Ty~8xsiT3%yv@J^ukK+;5y zrUT*chrE>>m5=Jt+?+xKZA@LogOO9#cJp1&`AEz}a%}tU%;r<_Ma)|xpYPzi~J^Bty$ja4&D#!ujz!d>l!s~ zUxe-G`JKzT$)N^LAEjco<5YzGQ<`9PU1o~~KkpREOD^6N!&hP)2fJR6Q($&*r7TAN z=lF$DK)!J5cLu6f(<~p{I=#|IMZmEm^pyL*nC<$@0XBc2j*ay+^ijqw5zFC~MG8=N zsxanyFHk)cp+m@t_4j90+FW@_AqJs!{+$Ck6AoaZbkM%Sl-aEnIIq)QxRQSl5g2!E z%cws=f9GGXr>VQ4XvU6uonn_`~&HneDTXk@gvqxmN1cf<8QwEr*S?%)xsrH zQL60-oM310C^#axbk^cAS>%dJ6P>sleESbz;NOLYjA%J|H135f6EtI8@oq;ll^CD7 zG!ywMP2eqqw>*{uoLH?z<+89uTH71;Tv-cl(iD3RsZ0?Du%DSCbr6nTU)J-jG=t4# zOMFCY(Ejjf52T*$eAr3459M}M9nRH9U5aT<4vwh6$a$(NSUeAmn4sVlFS6&fBO_?NPh@c)Pm z`x!!#u8S#KAqlPdan5K!7`Jt6kYyIRbB%>Fz^OYPi%VWiZwt)C!q1Ea@Q%Tmuy+N! z`tl)apswKe35@4%57Yr|Cmnp;q&&bUC>yIHO2>;CRZ%rOE;wnU$;@>>l|^d z|4NV4Z|s!Kac2hYqy6p|d(1`-$a{e>a?hbKkf+OjaqhFCQDMaeD!_G0p~+B9P6ZG= zOUd?L_G=tb$_0oEy6i3+3PKu&g*#1A`uqQbRv_gBm9}&fXXv+#Eet-VscJ`DBXqVf(Yow`*qEY+d5F`Ko_xK7V9l$}2Apfd^#J?d; z;Ps%tnb0wP$pJ}rGJP$2!X^WZ^1F9YI# z4T{4Z-YlG&@ufCu7{brJMFUU7{F>!-^sr$xb>wAV`tz4Qdu#z5Z>Pwfu-|Am%U6UK z)qYxK#T0>5?mm(5m|LTLUd-af&&7eVSrUO3cbeF4U@u&PdLxkQNP)7!Jov|^6JtXe zv#N8r1~L}{nQ^HKjz(eNN8r16j(^IP{V9(%&|?LdKPy+?86$qXyVZ20cJ3X$F<-EH z0+SaRJG97!Q!GUGALAXpo?`!k8a1Nw#0n7~p2H6^q^#lB8|K;#GH+6w{SLq$qsRd^ zBXj+qt2qUPJ{vC1vKW$sdGZ(XL$G04KqwkCV7KvdZn?j+%RYKSooe28GG@byy8203$;z)B1W zl*B4pzei7q-$VFA+WRCCLh9wH+)AB2m7)^G3Ug`a+10eoX;dXA=vZEo_fPC4$*ndl&#J17n9!8e+h zUVpTQ;2tJSW=q67NGE;+47fDt4XP&{rJoNqeAX*l`e-zBLflfa^da);-Xy5{#IBV3 zv#|-X?ijt;HBNu8eb{HnhViwCe2J??;r?`W`lTlQ@D6;rw4r)#O^;z7!`hjitNU^T zEvfL$I%C_d=*RB*7XaSUWvaN;80Mh6L=Sh+)^|6l6eA6^t&CwX;!_aP*(R*p*k0kw zWe17nUK3;p@%xKgPnVua2v}4iOTCfxJ$ay!$x$Gk35s;axOj22Q{>Mdgt!OD(^1X< zQ|^gt0nh#E3eUtzPWb;^$`7y)-v!6q`vamkB~fF0^zj~du6B3Poq=Msv~0<|mijWN zq7I&VH_#(SxMO7tiZ%t1u%HBbPEXGlkWg9XUg z!ymIxX_SgUDb(9z8>r)>s}0f}*cs{^GU9Fy(XQmwAtbMb$67^RZrVLC+*?5V-!6qT z$WN5ruC(Uf<$Da%9DQgl|(mcC^T1mYFj9!Hp!5;@wA1G=#xzy`Qw#WQ-?(-7113g(}ULD008Bu|a&;6Ora({$*Rsp|i(3 zIx8ehBueKyxb1&t9O7Kq>OEZB@-*38v8nPQ;gj>4biSLGnXjuxo^H!DbOcL=5uLV| z&vLn1zOEje>ziGp6JF(vcr&06N&GIsrauHfq-}k5gG~`WfQwpt!n};-6eA^Y2FqcvyEG+{~GK98eqYyOqVry`ys}LD|2iFdO>^S_7yk;a6a$NlZ69T@s zQv1X5WI6(%q#m@X3V3AG7uljO^!v{|5GN$a!Ed6>iEqk~WNC%k{+E-anBGw{_IV+k zuRoBsMdleLVVwyLkhAmT~}j)yozvo-{Pgtj?UzDJKvPj)WM$r;zV0;Ljy%tN1uH9t|Yz!M4wlQ+=^#d98rWC zUX~LoesBbXTIqF;*#drwEaA_$SV_O@qWEjqt-c-O@C65e*QuZ|nDRrq3m`BBOtAQp zZ1-ejz6e>?)^)d~zBgqmnW^u#ww#&6Gu&1x9A-3GjUxQkuO5dSRgbqx<>c=T1h4PH4Jkzo7A{PaRla*x zV5;_bsP)(OjoK{V+5fTt2B#{a&sOK@-Z#z=kEcdpY-uQ9Loec*2zV$qF5vicf(GsA zNTaTjLnRTP4~Q+k4c^CMueh6C>pG0U-gH~1zcOC3%XN2A-dk#Iidp#xNWcwD9Pu0z z{g++piS)DD87Z`B8msh-_+dD9eHkiHT|QcFl5{TbFc~kes9>jJ?ua7W^5~iV$R8ur z^_NaMJJ`~bLHdIvOuQ~QBgbXgzH?&Zow}N!&V{_>SopyTHAksKA#suL{@-*W zEHB7m4?dqatSqKeoU#^H+(^N+Zx=8e9_()f=|5qYEUlV-#_vaXUD$$?o>(L}=wPF;AJ<{WTmg+D2i9K#~7xf7#}g5O-qNg&SsS{Z)Acece)}WpIr@9&k|@ z&3iciD6rz|A@e+@!)`o?ilQYG_K_XMm2#|d0A0Xd(%N|A)n(6?k?Fp&ftaZB$;CyN zRAmH<*38-NA3;Ri25XF!wTM=Vsm|R-S!l{yS@@%c{#?sf_96{8ME?Sd>hm#`{PUg+ zJ}OKvGeW?aqE`yvDUkGbX1Dh|Rn?fUW(^ZOmI!#4>!SVobw9Z}X7z1ZF+otg76IpH z0>(TfV4o@AVhJMo_lsS=#RTumRshksYTnaBj;`ZSe72e?EGk=`k74QSRo*Sr{Gj24 zNoNjHoo31vvmx*)B7gBWlM1NgzcwV~Oej(FvZ2ja!HTG0Ro|zNqu(e%(6$DS^q=KN zd*WA}R3((nEtt|Yr{_k%BblH(cg=~89?%u4LTomd3zM0pZ~njlcu1$qP$ zza07L-DN%IeL7f0OR6l$pppZWg@Be!D^)ifONahEh>Z(X5RRvvx^p&UzFB%JL>dFfwB0ohAMx;|4SgPF)wLGeP0R$<~WV_h3q#{3==)d1In3MG9uuK0-E zoDvu}#P?quO_T^?xo!T?yT{2r@8}MGXiT)p#S~}HO3>Zxig!-+>rm!A;IQ!8>Rj3E z6>L7{0+Y~7I2bySnvhq6H!#|{Zf3*{`veQgV#tnaR4I#kn)RArICITlsz`-Bc?ZuW zsI99}>z38s2AFu;tqB!xDb^tYnwMA@3w4DyZmD1uQPF}nCEEUpx7GV^CCim8TPW$W z5|>s2X@q=G<6q{BQZ94C77((c+0foEKwg=bPT&y zj2X-=!RI7uGbn|UI3N%f)if*dZzGk&{w_h**dYcQ>Oe)O!@qjAP&UV?j$p4l8N@rd z63vnh!<}0Ys#w-?+dkl%XYlU?0CqkE4c0@ zSNmXy(X}g?Mk=ZOhYQACl5>qq^BHFp$@M(JG)A?Z2W$K~)o}7-v>W>e+JAbChiJj_ z=On4mQCBSXVcKcD%I5CWMtC|oU$|ETfZ5s;m6F_R3U|DTt7~EY zmH>+By#j`hC-Uwu)%&2Lm#Nm~M1Q9E&K!KUNW*GAt1&E8n=tiS&(a&C1~k7!n`GlN zy2HWBBCmneaHbrc@D3fv*P6jxwp~Jcps08ao`?xRNoNK`^-S>%L4wQ<4nHrnwh1K)LAkm_Yw|J7r-c`8GS6 zW$Ggoa+nF^gp7vf(vR`1+uy#kVAUFP04`1UUYxEYqEO1XVWJ;Cl2A<_`qSB6H(G$^ zt`%owCi5m0@IbPRb2ba>$)q0pB&!imf&Rg0h=eCa&!ZYQj_zSBfziwzCI%2|G%wE+E9$&W zl_0zJvQ_QqOR`5ZAG?9RIeH-~=>=pxl8bt>WydCh8UK7;yM<`;ussUvdqmS?e+T$? zx!1hmdXIP$Ina>9&tzZiv8#Uk))&m;f9`VR&yeN*e*`1aZRBMffD?F^WLoMaL{T(B zd)PaKe@4i)l&EW57Uxatc6-KZIJrDO!T`~`F_M&G>g*=K6@K@gxDYOkQl8Co!r+2v z>t6fwY@-G;h!j-)L@o-he~M|%P_yVOZF46ApBGH0^JiFfS&jT3)k=(zvio~%LCUk{ zPqofkb_+iJ4c})EjGYN815E;X!S5oV$3`I{;qL-; zOpY=>`PG)P{c-WeRv$a2HdydeU$yVQ97F@bVLwk<-tZWH$B-~f(D^fxkUynx3UupG zcW{{Z=}`KF$SU@{T_+l!R6PHd8Q=QWCm{KfxHjL<_iqDR zD8HJ>zU5j~OX;B0P{VIn1#qI;`m{#ka9g8=5vv9#mgQYXQS2YGo-K4~z^31j@?^K0OCjb`zld$l;%Yw&$t6%#_A;hoGrLRiA#P0ZkVPCpsUN}dML*8VPKd#)jn zWlzmH^T8<`GextkP2tw$jXCzv?c@adNMjJ-$eiV{w@T-o>L@jeMG&sV;2dyCvtpd; z`gNcW`yVm3hX&r8jxrQ)MUKjTX4+$tfYihM0jncKv=Xz3(3Ncqd-dI~OJKa1%O#6X&4zSqcr@%-rtVb{LpHx8 zGMg}D1$@#p7{>VRtN5Hz!MLMfd9XeOzC1RwU%8QOVG=tcnyI7q0;R2*>AX%Tw24ix z)*ggZ7{|MYo?8~?iGp`pL<*TBZCMPSUhcE&7_qt#hs^jIVXXx#*5Rt|-okK*z@kF| z_g!E4hOT}S%s-}m4=TZc>O93u5o6a*AlDqm?%e{$@Hy`P!!< z(}b3@!(mI>g^;^1YKVQ#Qg!Ef{{kGM1@pgUdE_k&4r^F%?Gl1DY@KC#3O}!@%jw-ju`7lFPT`E zr%?8_Sh~*b)aO{;;-m_h%)|xH>?;Do`67h&l}@ZnH=`q6sl%f^Zs}Aqpythha#1++ z%--qwy?dzcSX%$wV-NWV;UU3K0C)0QB35NO%yE3M%L3FjR{0g%Qg<5Kz&{xywd_{# z>*f&xPlo)Vwxa#Ox!uBtl+Vcfjj%2 z0s;=2x((${@zfDCnv}A+ggJf61old@9>J9K_xSPLcfByhhd_baALeY9Xe?4t1Cf_)Je@ma6uFu!*^Jf%}U5Iv%7RK zKhQt_tBG5P5SL;m?<$Nt0eiAUaVngS1Hu&$rkqqhuGjbI8+|Sq-;`x?sR(p(lZR32 z`s)X0=f3Y<)tFfSI#}CV_+k$&M?}MDg0GN(@qya^Z!E`GooP!b&}?1`z4b+05 z?GE-kuJN zA7~hFG1vndCSq$*((iEC$M=*IAM}_^xnk$p0G4l6277zM@Z}Kx0{evV^>TbTV0B=Yw_XfP>tRkc^SVJjT zovRFn-u*RZA$gU&8YU^8X2uc$__l}Fiq4*KclAmT-v>;_X_Q09b11V`#EJ0P>kdKi zsMURV(Ush#ZsgX5DT~qnWfQxQf6Y0kk1(!5@%mOa!2pEHScLme?VM(J5Iu(>ndc~} z0fQU}Jv*(di*-K?Dvw8N@)>o&dQPVCz?l$bsoRvhRKm#h&J-M5@{7rvAKrm1p&Iy5 z|JWr>G-Uk@yP?P9VX|-&hU$xLU6Y>*+RykFgbuwoj}F&4Pf@|T5=6wzp7G|-WqdUF^_RtRTZ)*1 zpT#S{NEd;=Qvl6sWvix4)$c1+uzn`0yK<0TiRlKMk3$thKtanee;+*?&~`=u?@PiF zp?3#>JD_|$v$n|iq8Nqt-)zx`4&lyu`CKAYFx5N>>$JeHR#z>^I}6I}d+Rj(bh^X@ z(~d7t+}QEWZUC~#B3=}^BM$@(v>){G;!Z|9@_sDxwy>#FTrkmj*(a(Evvj;FoCN&d7^ofVLd~>^nXL?jF}QJWO&3kl)eV-BIjM z5?Gp9Uj9jYL9!hd*pqag`&UC@As{Bqy9DSjf=jC}MyJzevph0%G*#SLIL|_%wHU2> z${6a`*t`NbtCxREn>1p-XTay~<$q0{Myzyl7`&I>83Z`L<)3FjRqs(_Af_SMq9zP} z^_vYFt{jE!-fytY@5Mx5KUng|P1HE_x|L%q7jdQy7x>ub*@MOEM#fwD9*{mt#_kY4vWka7TwkZ*4#NP)+x z@wDY4F4V{uBZkr{|PIZ7By%c(Vh$*_v|hBcAFu^S{$SwvjA=?%umLko#+6p z&k8n~WgW?OE#g#^ZIvy6DkWlv?GFkuLbdtPjd;^D6Z2HhMvf@&R_eDpy2)-NhCe{1 znM{l9A2-I-@t!^6yOQOm2FxH$g0|2k5-7XVzVkiZ7KYfX)%L084OmAVGl)b5+A`>CWKmUNSd{mPPBo zc;{{S?fI~jMK_hz)$N+K=v+y+dsyIQWAC0Rnc)7ji&F1n0YqwaiT-`V6R3nh9zIwi_|62#tpIV6wAmp*gF z44ko)4Ng)LmfceD-^L?-+!f{{Qr`u3j&qhpr8n}5-@2+YfGUB$D7Nqyqh2AW6ow($ z?0Ie7IYD~M7fUvyDAbcWkmmNSV0RE~iF=y+8JIR@tUu9uUiq@}@0N*jK)juGX>G8G zL06^u+#A%!v};-t?hDuf`3zK_w6hXR&W-{&%o^ZkpS({>bK{R=oh+-W3L76i3udr}tgX3w7P?(KjDh zTIgAJV4C$9&in*}P_VgNOEQ_CSKXd-!>bEC_}1apqrVLsCAudAS$GPXJLr3={seb? zt55Bi2)F_Zj_XX9Hu=9#fc{tLh>^`9@#&2I(s*CXo$EWUE`qZzed$dU2hx@a-kW-d z#L9N8zBOfAJqem!XJprn>43`4DK{{|3_oQ<nqWdUyQgw~{=gv?ZAw z6o?fx!?!ABao7e0j22L(z%+xmmU0>9_>KI%Wn~dKba&uAFHZs)76l?4l^#+ttZ>^! z14)O(0%kzJ5>hHVfwas0#>tQV7S$e;So-R&k|(}kHQcv4A8jYHap6!5+`aEPe=S!+ zgNu)k8WFFXetq1=c_EYe`UCfG`9?;E%z4P{KJOt!Gqs?j!Sut5#n(8~au4+GV4!7w zKH~ArO5cUzi2_@k1 zX?e4wXfhc;JL>q8LhNs{x~u2gx`D+v+4V>AMJFM)?sro4!LltaJtoz(7{Ie z8?*4ozrciFBa|14pUG!TQ;8)*>zkCCsfyHE4SiYc15aYf_V5ZBAa^xp5iGn@$q;3Y zm;Q~b&3({>6Z@L3AS)ehYrY0Hs;p65({d05Z@#JKywMG zGmt@Lt+tEo!89lU`4<&!of{9gc+J0G(;7#QE_B}e5E8BX3r@kAXCY&J4fhr$ua}vk z+BO!%+^NHfXV??GL%ZLJ%|nv|Nm~ufCtZIkWBGAQdCg$GLtjDzGR5=h4OGq(IoAG} z(-6O)8r2q``}qI+KmtSz^gB@elsTwBvX=@#8KkaC0W@!9Fql`a{^0e|BtEpl6 z8;fh#i5+j+hQig+DIhHpoti=o;da(pfj|4`F-WkUt^HS^lfy3eKOpxVs&)delBQPZ z@Es-Lr8r30*4uA=Jh1*Zn)v6hkR~OF?s6pwZ&l$5k8^Ce!`%%DNhy&6I|?NH$rD3}1nQRm8}mkl@gh**sm<-XT@2z8qbo+dsASE@%f}T*Czn zv5uflDPmZ6h^?rMs`NA7IQST-LOz=Gu~~gb8V7BXq<;7W?f(JQ0D59atzX8IJ%I~DEu&XnVFUnz!o zbHCc6`X0E=jyHpUZ8L@PMvl`dvIzM)z_vYI7u1M(K%zgtF8>vO%lUn>Vk{Dt;vf>i z!3v(lbl^w8CpX#yLMTXigJBvG1bS}cj{=5|WJ7X{ov@IvJv6=?ahf7Bzz4CP8?qb- z?Z2tMvZx;{m-oY@ae68U5^KS6fu8goQRC#b?T7v z1@IkwcgyRRulz1sGhK{FJ>;s%QsciJoKgo*Qd)a z4g$CiCK_Wjs8rpBnr-1-d-fPpy*eT4tQ;3T<1lOABzy_}+BbCxXki6_EWQ;euYHsd zlFB`0n@MFlbuuPkZS|b<>gvtd26SN=7nDCTT6OdG1Fc_kT02rM_bID4U=}yEB1@+p zZvQwJ5yV}~!KKi@Dr?wYGB7z6D^x@7M+Hc|Zc!AL?qY=6C*&1|enwLEIfLU#N)}nB zl23f%c6K2mV1zq0X6AuaN$4^LP;%)gkww?PcJF(6zFpOnQ0INOW@;VAw+rYb|K8mA zR=`&Bg9x6A$Szeon|~60AG*}?1_6?U#4h)s=lrH{t+xPFJ&@GXr&FW?o$e=yjxu!^ zX{AZ-q?Jdl0ch*|Sb})x63>M!R5P$z*Oj+QM(f1&va;eG!20QP&JU{JB{rvCxKltB ziun>r)!m9aZcHV}dHIH{oexl?F^a<1`Fv?qtr(>lCy7!Hw8ErQkMvEFI4bh7p?Cr8jabXJlcw4_8 z*oS^f~NhB=>uIi9xUtHo(4m`Qt9;gpU!;ReWj+eG&e z7pv5EoC}BNcE$33m;kg)_-^QB%T`qUf4P|AbhhPmK~QQL!!vnbDsTu3r%V~_U~5F@ zr><51NHI>#P>9V7*I*-uu|smqD(N-AwtKa43_i3@4C+8qcwx8i>LXu7b;W$^)e>5> z|7NM`Q-Oo$dSpJ8&*8M z_tS+<62iMb5FJ*dBY3!^T2ZH0Z+~99nhvddGUq|;IP`eZjSj3kXctpi5n;%}?5Ou^ z(A1u~KnbJ%8CZQmyb&^1WET!aexN_10KhaD|*Zb^^W-S zHXbp{pY`JRd#)B@Hjir>fL12n7~C)Tq6B#C+5D$J;}hYB%2(@^`1jJON>@tobXBV0 z5D%0hQRVRSSR2kNF5yaL85#cTVc`*mfY^k)z$#wQ-c)L?yuQUMR#QEG-bl^T5=JKtllLQta0Z zr-IkA#7RV7&96n$4od7tjPvqr>1qvsvLgOQ+|)=5LanC34bOE*@gWLV))0M7=C!h6 zgn>`@4V_K|tMZ`2{a$=bRVud|QS^1%CGXk6q+{nTP7J}J3=ZN?w2~B>Lt7lh>Q`oY zTaIuF>ZHyY3sTN%fz0gp=W3XN-<9!Ny3)SZYJy3*Iy^2fbu&GbilYwlV~I)OiOhfZ$&OimYrdkO1%4iV}` z_<9!<1Pzq4sz#LsQ4cdl;0)5T8|ZX@KLkkn51ZAe45tDEluq;?o_SUMBXe~1K)SN# z{<=GttB7~a+aK`K8ELd~%{(BBgp_1;t&M(Iz@W>olX*Y!Z-no+#5YD6wHp zB*gKVauo=0Cz|i`*UG$v)t(dTY=6f6U6sl?p{Kt2W!;+yz4ON#U^n(^oM|Lk8{^NX zI~dvbTSalS@&Livj(*33y+;0=dx7^fQ{OiV+0-Y-@in&|pyL12A#-<^)MivSiz~r* z-jrF{p3nKZZG$mZ_%*Ttq?vFbYizL=rA3}m1WjaH-53`L4j_#O5;ez89*}nx(wrh-ncCEU5dFaPkSN0OSA2ZVX(bh^@ldfnt4x!)KPj9#@+5Nj=ov(Bo(NV~ zdL~U#wN4ztk*k%+S*S+QN_1qMr^?$YA<}+J7t8XoPN62)to`jWs99|9!9uXwk%Vcj z*5l-bI=WJ4yU)xwvBAzel*`tcn7GjR9j@D%=e{Ve(X}ur?{BnedUki0;I_2E?ecov z013RgJJ5SS>+V6w)5?v-=u9Cuj#ThhDtruCs^7zF1g6G|ZxKE_$H-%gCRfa#`hk%(N2#c2 z#0D*#dRK@7Fe4AvKOp{C>SyvZ{^nMv&5OmJd(I-!IpMRTXIKCM5C>BY(w*m~HZS95 z>P2ttpGh=2h+1+fZ7_5BTvR9Hk6n}x2y3oSzHOr-Igg}>oa=R}PnO+g^Bxweg}h#G z6cEj{9?MTBWpK{{jo2HnO?@{lL+f%%vb@FQZQ0Yqj-3d4+IPSQtF~jUub9HfsuxZJR@V$U3tV`)Y{VJ- zF{EH8czl10&$9;*CN?U7Rm!;0V29L58R5mV)kxnoQgam19A2wG1eF|HuCjDm$yg3I zLw8h896&1IHBFs}OYYj`tl$u4ZXjm`!<9Zzr6nqkK$}NzfE!kn!kW(9Aqlqc{=0caKaugHj z1vj7)=YABeLjm=B#0%NdcVCmt>h<~5De>v8541Hq&zs@vR1zmlmnhr~P*DuDq*1=& z(?2{9ec@Bo2^*V~C`WP!juZJx7{FT>;^_^7%eIGg;;ZL^kCdD$6PmVU46w9IIDD?K z-V;LONZocinb5tMDMKKu%F8SMdvNh|!R@$_dE04|O|s=-4Wn48oP7z;Uzd+sAhl5! z!Oc0uoicvkSO0xrir#)+7)u<=G28*isRz zrv^F9Dn<59lTUr&b7uv8Go|V0Ji8&#)~I6~?5knnxirxX=;H)pIlS%3z)#I@4#eZ; zS(0=}gavTCcV(`-KU!VfdZy^w9hmUcfed`UULP5@H`%EG42I5JT&n#z#RmQ~PS6ma zn%r?nnj9ifBHdrd7A@r4KPBim2t#YCn$k&?wA$Pi)jfKlu%CGHAzGQ5hp&%U)uQ^X zCqtF$G>@ULadxk#kY&5>sd$hv{>y0D+kez`n|TNzFJ#IIx&u#fA$M|O^lv^;F3thB z^fzbK3}#Q$f={2=e#&!cgNo;n@P&@?l;UkyoQrwQEn-JVnWa1LD#|#ov~BH=E4DRkQ2Od z)KEV8KW6P$@W?vLJG(l&s@}RxoG;GSx7{+z->S6l^X-2#`$Ithi>V)lP_|3zEuWr? z_)U@)(Cl|q6b1I)mYZG2u~OxS`OrZ^2m4_ZHLgVsb1SmTw3S00Ln@qY+SuDCr%2Nq zf79cL>liwJ7+wY8NUz)8w+qEqlj@u(jh}V?n!LL1Oo2YP^D11PrLI9zpGcEu4~?C9 zZ%up*{x-yLf{I9i%{Xl%b)g??!&(fruY5 zbwbdQYm<65G#zEis1S*R)}ClK@-eO=qrTpbAIEVXrV&P|yF6W2tt{c##3V{@Da8A3}W z<5Si|T{h*j_(d2zMFr{{Y&BN$goU@{VU{sglEm7C?{$~MuiI~`Rr#@Wazd+}W0nxX z%>~;OmPR@r=GHdM5=D#08=kSADB4G>p`y~k2|ZIECOg3W2q97dHUd3Hx_nJU)q8K} zhM`9wwg-k-wgNJ>*qf+!&>jY3Q6S^D{aWhFWCNG>PGG{qxsIsdPV@R&Mt`EN8=phX z`|uOFw=150t!2i(*-GV<8&xrz=!b0+5gyn4t0Ggp9mTd+y$o|e1RiEAK1%WdcfIYb zD^nAn1HOF^sBu`zVc;HVna1rcRO3u#Cy6X$I=TzR+yKsIEc^iG=2YyJUlsJ!-8`z@ zL^tGruZBl01w~F6b&|;YEgw~~vPoFrYnHB7O~B}vu0fky@qb8euhLiG zbo#_&mF zY3wq*JMP;R(Nx1b(dp8GZ8rNzkcQqOKKpK@`}tOF4n9N@p2Hw)pT^YU4sLDw+LcSb zR&rXkx&18?io=9o{=Jc64`l=li-$Nc3{^r9Uwf!#k65@9?(VZ1HOQ0t@kcW_+(>kn z1mTcMbgjivE`mN7`jNPi$kh@_UHx(>BUabR$(ibcgwuSy9gA;kjaH3y7s+0sBrWU9 z6-AP6J{?izHdN3$*%qi7Y&Jw={SFkxjCF7uTVWR?#(~^fp|3d$AHfQMW}be4-%HPz zaL~Fp8dMuN<6w$c+R&bL92R?+(z(P2WthO8Nx_gmXliP4Daed(=mO& zHMmXug@B$Y#wpT)wXanbDl`HIAz374Lz*pRXS0EIni%cZ_aWALF_M41w3R!;N%E2$ zeDE%}7<@fq4h{E;kLM97H^$))o9yWt;A$P_ajn!U;~)bo7Q$X-CDKI=oW2vC9vT!c z{vm|?{S);2nxTN&;!7N5I7M=~yX3QuF*G^f3}Qv=AVD)V39o!G-U0*#Y`&yiKeY|j zZu+my%s;>wW%|P(@wme-MeKUAl2z1akZv%fY_O1#c88Nb@*x@ANc74)NMFq|H;i!T zs{?=Wh9NhkX0W}bxSZCh9l>XO@*~kjO{KBU5C|mnFoh;q28I+0FlX_9F8R0;MfwXC zV*jHBR1t4Lij$;0<46M?8~WDFA(NIp@z|e~N?|lJHxDjZsM1k}e$<66(Xw6(wX8dn z1LCAosb3()4J(UK2>q+Nk+>s&mw5_8zL`m&7SaAms>MQ^s;4 zjPyxSH?cek@Gai9`?^Y4>U|(prPt|-S1@euFC)Jt)KG`TG5eE|ZX@QQn++?Pu+_3V zSc3A7tf%$j%H!sM_qr#4eK_fxF2VF>-*KDzjB8zqP{*XzOo|-*flvzM-$(cCMobbM zpsBeB{%0xgeQKY?oCkI%bR8f1Ee0VLS@No+Y={lYyT3DaGxe7)a+#`RBda>rMStyMN;u+N9B{-AU|s*e!lKtaabmfLT7GE$B;3+PNn-M`_?zw%xGl}F* z+pO4HNvX~L!G#WcSJ=>4-fibE;bT9Pfu;AegY}Wj=Hv@2O{vrwDe{j{#?z_a2fbvb zZ!u?tOKs6eeK)PW%Xr@(4-EyU*TIbYl72X}xX2J(bDVDCFOK2pW+s$fV*`SEI&lx& zrY-DX@m;^b5Z=9~{TfS;N2i#1y@rrd8|THj(0E@+>$bg(fR-7p zvmzicWo7WOl(0h>_FWF4O=I~jdN`q2+;MQ|T!>MEG^Oogx5Rwut{ED}qC(5VM8nuN$r3J9ef(BL;+#xAeoKo^i*)h;F#b%b>&kJ! z;|YpDYUC1w@yqd5DrVkzme8TIfug+A)b{)UlbRx)YLGhm@g0vvAfR?zC||uz(M>bA zUPDA7W+VuQ#`OqU-VbiY*qybZ7W{cdyBE6~!4?Fp0YG5ME}jH^3Z2n*qv@ae4SZUT zzoNPFW5f^Y-yq+#)eu)*8s@>al?e9*KSbPc2T`tV zfPH+(D|HO;QkE;Lkdljxtb0)9C2;NH(jTM&teKp|pBS^++=@zn=MZmcnggV@^U6c0 zh}A@#9nU32p~aiIA1@^Iu-tfbmhGT6y~QLfCt&T&GIazu`CZ^Qnezo)7AGWIU7le& zIR5v&SXY8Pl!Yy{T*OGv91fvQIk2AV2X2SrH#LjiJwgT9zMpL9%^8uk)Nntq|AM;! zF^pZAgs;Q^)#CS^){t0vS3>jUv}_EBc#fXHVZCAvF%L-$|5DeND)=27h?1`;_T8M1 zT;c&l(M2+{Re7PH^a83>B)POnew}YtA{V%1E5WJZEz1X0(8`YN8*qU$3o*Q{Q3|n- z+RjF*+t~YD`<0AGD`AfCvYh=L#s}QYB8Xyl@&%?08Y)TGoJs)iAp4wcd}tS-{R>gu zEn?U8rS9oF#PV=p#Z$K;XVbQV#K<76o8E6(F%OSGuXJ~p^dJ=ouPs3Q$+_&3wl{HY zNzi1#US(7C=Z9Kw?+Uz-bUu+}9nv$+#SQ>XW4d;C2qs%uVV=Pn!yw}>;_!Cp;o)5^ zBILnpo(4VOg4q5c$_83K4_4Tm6rAIicA1!nprAVpN|8i@t>nbbQ=7fy6qy=!p?1N# zp3xCbjix}CRju={hU-DLrL6XjlFonFn&ZLCaUR!BXF`&%fljhB%*wnhe@1cd$7Qvz z2Lr3CW&&+({WyLHHg}5qeZ~2#mNL1tHSl;{Y8azIsxcHVa#rL+{qg@{=t~?15`$hS z6Davc#>K$E=;1JS`?vcrFT>V%`R{wlqXelwR{L(s1er#^wUv!?_`~zxABd&Cy)D`z zAiAuzyq{ytEq8bPIuna_c5|VwKN-)8G{p5uOUeTZ@d`>T66(B<_c$2TbQqq0WA&;N zHv2-}S^)<-yQd|yHDku$uLwT2jFC_q^#RAB>TPenV($EM9$Y@{awst0!=#w$j=7M0B&RgL zzfy4Y6J6^M6cjY{CI7j?MJ1I@beedeeE};Q1Nmx0P|V(5?J@FmHErn<7nJRp$*Fk~ zC!H!*pBnxs0yK)>?|O`1ZP_hn#=Eq1LzM0u7c{|fpczKxpX5dfxvCsYs^zRd@=XGP zR2x#U>UR3NI&A>y8em$<9bI&9zQ_r>i)B)oX}byZxs}8zWei!tY0(x9(U0p>8d&H0i0Pysgcb;evc9@F@QpUdo8cILvvRbsJ4x{d9^F z1ru}qZ#H@v8%^YYFRFL2AR8|M`?#4|}as651NR8`awaxt)WnEbmOj-s;E`n#_T=|~MqPgPr zYI?#ml}b&y6{wHtKS~s-xm11f=k#xN{yLk4N2!N&*0-U3Jhv7(oU2#Y6kTEaO-sF> z;pnH-pC(q{_pd_C4@N z?4+hB&uFH?p92`RC}{}2boT>{kZ;}e`j&Q@@ZP&HIbNJKSyM~~9O{)sS^g9T>AO~0 z>@8V0`{Bq4v^`Pzv>Y>LTW&;gyPl0$TiB96rjhIjfTEmxsc7+3%@#qA^VxFcUAVn= z$hsZ`A3BCthnDGT8EFJ?;ZLGXLLAdH8L0hCB|kW&^NAX*aJ?+4EBTh>(3=auxZ1GO zF2;EX*0Wb7t;!p-ddGI7>=0GITmh2mn1d{iABG9kX17pJ9C@dN9rE*U-nG5<4}u0H zHv?7V51Y2lW;XV)2pXco(=NDTn0I)f(l-m8c8-2$e9tmT;m%=}WO8#tqbyl$WDN$C zpnG$)^ixw#w@ruH2;>jGQWB#BB_}lqx@+0TdXcnH7doOXXU<(;9pKFq=Ejdv5kCaw5%ft? z{UOjq<9z613GsAk8my?~y>45twovKCaki?8zJqiX#BM<_Y#)S!^X zj_+xge>GcuaQ~#Se`Hn*)Hqr_b^R#QI)^3%QTh`<_oG%AR=qZ}eE{m7se`e62%#i- z`xRwT-Di`k9pyS`#jxV-D4&P-&0UEw5>T{8aSlbOW5Z+(?)$c@2iVodcCC+pLb7@t zFpz=nDtNF=v@})8l{)(PttQ=}S^7N&n-vF)*1;#y)j~1JXQB(ZvQ#Wt_Gi-M07eGnYzyo`o?Z?0izY^mlFTB*LHfvC zLo$Y;GVmb(!aD7b=E<%?sevp-M=$|KK=O|Ml_q23ka}Q{g@TnTI10%R4)T#g_D+-( zr<7i{J;P*rWU;v+JcwdifWd|ymmlEYs~*12j9e}iF+{%DE|1n~WV)qIuDko@p}>IxI)*;r z4P=MP8Tw5N5$vPV+E^+S7!VObR0I3_xvw?*4tAwn%lj#*G)!qamAhMh`EU#I)&MVt z+^*MiO-E9a%pUF9{o=N274ub8aj>e}^W=|3@+aYSx_~lT#8*|rLXCq#@)L5+I<`cv zl|S1S7W^@NUTx#S#hm0E_J;)1tmbxp(=M&tXntMp#J-X1YlYz4JD(y7bXr>B`d*Wx zW`ewJ%0nJ4<2S%3d-=TS8>;ZX-Sw?dEj4|!*_y-jn5)@__9qwmW>F|z4swzU{agkrw4y#B!cM=kf z2vNO#YdOlt=#}5hT=4km^!S%FY)a=;?%~5&U*o_BHMQ8E9~6j*uJkOqzt!|Rh5tyc zhh=Cy1XVJKX&(&8S8;urf=^|*+)+HD^;X|X6vGq{=qHIdS{>ul>X|Co$W!e;@V=jjEd!aTcC7POG z5r4l)5-=xkqW;j)hj>#hme)>d4Pjt(gHOse2@0X=_c+B7Dwtc2SYG+@P7PePlp@jz>wv6~fq z38rc(pHMQfewQm6+v##-|Fn67*Lj_qwm8~CfB1IQp#P_5aZE53$eR@>fCtE|(UKP< zH|PbYWHCSoUIm#m_5&pRJeoXLTCIrIeVoK2{|;~hcF(fsmD}l^^~-Y3Wfe1=1a=Uk zc3&QmIb0y7l8}B?JWqZvfvynnsi}>p6rtIyj8-uU0I16BOt&XG0F!E z^)|S2&m9Yh^M&2W*k`Y+fcF}1@wn^C-~u`-UhIAUAf3x#YK+-huN$RN!QPz&41j9@ z!_b6K{jLuYB6;+qdfB4&2eLjn3W^MWa)p@2&CoS%I)}_-l}T}1>E#<1xq6bi`!6g+ z@GG#5g7Y5aqu4z-EeEm)o55wDXJdA(5ixD54)#i})_K2)dX zZRD4`k!GEbdQ*GB<$IKWdeZ#>0Y+s+WJT5?N@Ci?C8w_XUq&dxKi(p@!Gq0@YZ6df zJaMO~a0R65ez7S@o9M(W&eT}7k0PT;bNvE~ZCXK}VUXhO3HjplWP@A57Z>Apvlbq5 zMU-_rbWEML%Z33zVQJ&WrurwF-N`3PeKH5yFM$ASlItUY4U5bcMTCG0xt`c5dSuUM z!sB8rKdvUR9WC5Taz9{S81*E43^_>lb6a{AC+eO2zVFO|RBmiQkf$seGEgw+qPs{P zK3`1+bEb~cSK`{mMqx5~&nrVd*H&!V#}`~DFT$RPgJhYSt-KP@c(|y$s(50dYe(LX5!nyKdW>{3TPqoZYm2kDRn_+35qFhKTzNe0pn7 z4HOB2`x=#1LizvxMLtQg$JKnAE@@yyHE&kg#5Y>ei)O5ZAc#IzC~y!mG{JLK7l#30$Uw zJ*v(^VY-E@efbH%iE6@Wk9JH_Ae(+Qc0_2nnd-If$06sTh01;aU+;leUK#FB<;M>W zbFdO*@Y_=LDonE!BqOD#PBTpdEGSmRKp9t?f&y~#jVI`}Mx*uu#P-tjUuuDLRnWr@ zo3x_fWF|k<^lB4g!Uu3DXu`>Pq}W{t%09>0$0OdFLAE2%^t=ZQt&&tVvtDeQ*P$6I zekW-t{Ut?z?=T>69>E@eHPpj1YL|Q62vIA60*~(5iJ62tjm5m;!*Ev7^c#VKT8(4D z6E#F37oV+^nZDE#N_3;=sLBgsXi4KjUz;n`nb@#yui*EbMY6P@fo~zW8U$7#A~YwW zDwnAUY?pT|8^*^JdG*e~9?mX}yXt&-lZ38^8i;&->D&bi&mP`! z^Ku&=fZeDIi{+NydUGiJx^C`{_^Ka1Ce5SxzXc9Hr^pe94*C<};PrFp1D%50unwVz zN3rft1s(^lh67)2|9??#uBu1dhNZuh5%Ee<9zC5eekXsVJ7HG!cY0BKTW?gAs5HL2 z>kCAQN(Eeulj@ecpMHgsio+eoC2cYOxqGtKO%5KY8X2nBOzR{54)>f?@V><)~MQPe2!%avVa5tc>d6D0GEIt*W9oicT2fL~8__uBY zoxCXviO@$ffF=S>xZ0-hTg!IYcLj$&xdggvEK}^K_fQ`aIC)y?-hDeG#FGt`*##;Oa!pj8WT$~X3>P)5?&>`VM}&AzlWx# zhQcTrL+UgS+z6C{!MIx5lh#VziC8?5OUZ(SkX^~XMzgCoz z_}ASUrbB14TiWtDOevl(=^c-IjLu4}VBiXXR*!b`_Xi2t!1Z8zW5J z_UYFP4RT}*?1rA*%ZyXHX3bCP|WQ~12zL|m-G%ptJw}m%InPY-I{+8CqL)NCj^w@d0pDZhRz`}@xz*XMf#H;Brip9#saYT5 zWq$}95mKj99&&J_UA#|iL(&H+6Y3B$G9e$$2wa|q5luhKDF@LiDUo|yGLo|k)kidD ze_11s@v5xiM-c5SB2$iR$B)bi%tXS=BW3(P)2Qdlfw?_jBPHBxXZ>%^?vfykW-hTZ z8=N^=wZL8GSVUh2z=%g-Xfn(%T_3P?9#K(4FB5@8J3v9K_9I>5o)QL5mJ>y_%XIas zLBvz*o^o}nIae-n+HhgX;e2!E%g#BsTa=8y<|zF5M=A+waSqk+2v7 zZrR4Cc}&B1$}!04h&o&z_`r3b{oSyE+ET8~k@>hYzesc#omTx@~*Uv%PaF0z$4i$Gb~@ ztC=Xpag2c;*D8!5fUkuo5RJA2YZn56!FKreZ7&LzovBAV7h9qO_F~_Bp0>@&quW<& zj_fRi60Bm@ep%kk+o{RhybO0+iJsfOREmQ#&Dvn%8TDHN?19WYvB zI6FH1`7>pwICWbT-3_n!EsZ5t5%sK)srS_*Rpp|w#%YWUCY7}4hZ?uRp&YwI0v4Ou zo=3~dqbURyYIZjvhGWD@s$;|^T*b=^SH+K9Ua^5IrVNB%jA@N~HOS`uBqT1 zs@Qj%BryUx>m`Vm>FL%7cAR>bm{ArTfiR6Eu!IcM6oT5Vo!t!2H$qQ8+|hSa<5t&X!`3>`c`TUksqr9>@`;0l67(Dg|2rmVaVV^ z@ns&sKaId?;2+vgP^L2a*>rrXlUP5G+$r`*ewQO>Se)BOhB>0AGL>J};Kp-giBwT) zd)mfx=!hWod$6ubx(?AkT)j9^nDpQ@#kI%#i`YjFKSY7AC zjW~-kKxmUT36Rwlo}Rq9arF7eTq3&>YPLXQJ*qspML!DI#PEo${na-#$!sg21ThK+ zZo8b}t`ETEdXVn_y@3Stj8*5K)Tb3e*IP8NSLk6BlF4Z^02ApTy5H2_Nh#E{)Pw() zSTAw9vZ`?MGEAAzPkA2WP_;wUp6_xD1IlcHGKl%<*hWl#W|ohxX9RD0h`A>lo%FA( zW;%d`5OmDD`gO1Gs-UR^sxYYMF+2SMsqcW0`Ze|k2A&)o9pd2Rph}ETuaHJd2Mj5z zVStlVprSuh*N89;hH0eYUaK%qo_{LlfW1c=-S_teugR}oolMQ^Z&}SBqv#l`gqOWn z8^54@VQZIe(C85>{sD)w|FqFWC;g~A>Y6oG0*$ceqUrljj4Vs7MVSa(F-Yzc`cK`x zh){ayzQU1t9yyLAEmc^r|E;{8u+~JC6>u)Of!t#(1x1imp5Y$Q;|Y5tACB!>E~Hzi z?zAPvkv`|(&xMzXGg@iAGMuHRuXt2W1seQEuo;#!2jT!gYkMmn%(`TVC1czhA0Dt5 zUKsQ`(KdWKgwqs!caMv~dR))9)fnu^i_RY@jLk9>;O4=rZhp*ps6(qwFy$29)Oiy$ z@~F%~`;`tOS(RcRLB}!fTGuw;N}-XRp0n(}Ba8g-?TIC5+4YW2Bc8VJCx^!vI?~IT zucrTfJ7pRTpQNw*^*54RM>KV$Li$Hy$={Kqy$@pM6*LZ30#30WVQK_1EMQ4gRj&m5xyB@o%W#1c1?7d9 z9Top&96ooeDbTmw88Uf=sh8aB%>3Fna)fHY*Fr@@7^unZ$!Zbev~EM+ZBz1$mN|LxF-rOCtlP%r1_?}cg7F#O?rp|3vAtVkgt($Z=EHnse ztXZOw>biAdFl(0~ZX~Xo2<^`uN5JN}RY)s4rjC%&JMMt5)%qJi)*>ORYNqcv9!e`t zPKb;}G`}wPTGKjMw$=;;+&DE{xX^VGpY>3dGkJARAh0K@+m1bxw3<3m7*=fCPw8JMlrp*+(%|+vqh+(wklt)_BZG%VxScZ2D{Y0`VlQtG%X3uuO%CnN zTPzojoPbq93rAc@k*%gr&L;^g+)1T}RI{09alGYyTKmL;0IFMIRdctiGC{I*VDBt304>}ByZ5Pk6C=S*Gjc{l4V7c0@?2G@i<_I5{Ylx{)cBrs=;CNH&wg7* zx3*kJE zvrBG2px&dP&-`5?YZ#OH(9rFP$4IK@jG{5LQ3M&$zQgzgUa__}a<&M2vhHeZ7QrmI39%#f~<5(;;CjP!^uA+>R%w}_5 z>LFF~m`bX(pAzcwYNfRTp8?UDU90#76s!}8_4c`X)h$h1b!qNFn~`XiVIxjEiJE$FjpckL7P|c4?R*+nxub!*I+Y@HTrgfbVrA zDOisXbcb4zw#Tp3wP5;cy1d0c`<%sCdyyiQk6g&IeVfBPoxhYhu`cjw@VuwoeU#OL zfdi=#-wR%R%hnS$^zfxu)8`3R>KGDY1_c?FtO#q@Fs{SbB9eH<{GN;$FFo02k?{Jr zsSNHdx`RRkb2O>*?p^V_90nDnw#q}*vF}2QzKI;e33_#OkpFF!a&*pq^mU82*W63F zd|VcDi@Rykdmj;FB6p+l53uAEXzDXCtfAyvA;DTp&O6XGSC!gox;Pt>V!VrlERU?B z_&r18+Vupje%c}8hY%sygv2Oq&zGH-n8ja*>{8-2^iX^+uwgh-!{2&B-PYyv3m(@y z{B*lF)?-nc{2Cc)NwS4)|1cyqt79jwk1Ad=M_}~*m=^Y@eAoa2&-CHhlPpzF6HPFfLvUE)B(a2n>K< zyq@RWlu7YP0VaGNsk+a2GM}gHDO+*W2Yt4S-9H{{R|f0C6n#qiMZaxBSAp?Tx8Ge*BZR(`JY&li@}W%guD^%Q2mFLv7vNRvMr5wq~^;rX}TTWqMG&2E(7S zmPVCZV=}gu?fg=xGj9CTni?sXnGaHi3p?`J3bcjO73EAKiopFo@lEF+kf?9Y&No54 z7~i&pV$AWmgj9|2(G}i8Fr|nR$O<*2UUJWC#HBzai%B$N!!phhg}>hy^C!xF*mbz^ zY?R(!NqD5g{s03cIKb_ZKPLPFfcps!N%2%3-~7ud1IO&8TP8lD0u_g*H`qGv;>%F> zDd>L~rZk($9Hv!2$pf1zVC(iv_-eR^YZq@Wyvm*)pEWRV?jX@ObX@_3d#)MgN->j< zWr~-f;X#bG9$t^b#MZTT?+urXTC?}XK1%#YH20G4Omqh!S0%1TL{E7rEk|3mYs*^H4>ADYFni6{-^vrL*#L)KlAqWDOOZmA z1fkippTD?j#(=cy@(>PGt`b7dBy1|3XRnl$h|crDKY83ow)7G1+1Vf-6D0>`x01@3 zlk7@kYrZr}wJLyw7N$mB>@B4!#cb7jU6t_iKPAB8Yl{<`Qu$PiI2j$WLE0pkzHq(F zXjBB8HU!PNLkSb-JNo_fCBESI@&l;lGMs}^$!Qt#@+>;Or^>hOJ1vv!&5IsWX&l^RWKC*de1EmPl~@*yp76ldkv$BSL!;a2F9{1{a_kD_A>BI&E06Q&GNPk@zryPW_p^u zgwEBPK%Uk*{`5394{aU(usyUahwC+djJE+Hj7n*D@lOrGYZp z)2X(?*cK_j{P!DYi)w5VUC-28Hr%{GLKLbcgsZUV(EAaRfZHmr8L$hdVCppHzwC$v ziW{68M~#h)$YCzQ^3665bXt=a@o{$D0>j?Ni7vuAd6RVW0%8$@+w3XHCxoFD!4)LI z#ML3uVzTy3{{E75R;AnA01o0k`lw5xOkX2@Nn8LGN#uNKp+rJ0Vn`uTsEG|t?7@z7WsIgEL6 z57OJ`nmO_=8m+~9jm>iH$?BFdT@@QYY*=ZafMdFs8rBP2lka_Qr#>}KL)kO9LBBh< z>p;K%&OeswRj9|g3x&1U+(y+oAj?Mz`%CTR3>wiSGYP%Qse_2~kgh$jPePM6Bs8;$ zeuukcrNo;k3uJq-ib}*%f$sii+H7$*t7Ple&NKN@SH^$RnSM0moRek(^aOeKzxY}o zdXm(z)bS`U!h`jjE3YepJAwvO$!TT!HrQwwc5x7?hDuM7{&3r*M9?Y>aRXYeG@ohM z<>tx?v`8PJO2iHX3G6!Ty<<;(*o6}rA>&#>*tm1At~gm^;~%=u?Sr>&++?5$iV6W= zU=?iCpuhfmp$Pv&D~JDH=p%>>9mTNX?!+YY!#lY?HO&BM-SBEE4iuaiKtFtC64Tk)=}> zH=P%BFJfVpRPI5zr^E-nQxO}$vA2~&0r7}xV10n+hz<9=C8tIn%V`K8NpUz!#%??_ zb@Tstw#nVS+;8<0Szl038v+udSLN6wyy^ZevyyRl-?9S%&^-b;H-in8{kn137Qg_x z8XoK}zf6bI08VD`I)zgdP^hn;@w{RWhj=+Nv66-KW0vKcaeS7yn2vx3K0I>AR{dJj zRQ#0EA-PZ(R7$1`NpGThRUT;iVgxMw-mvU?j3rT&g@P*04#(~2D97ghV)(sBdULVG zal)4g&gfgJKW+WCLPDIBcBQX1KD~x%9~OC!nhDlk@j7U#U-L%as55R**->E!)f z`av0<=x!G7H#L;~2jXouGDNY7c6Jr3K9$|94}MM8UtP_dAjL02)>JS>Ci_V4^ z7E0gbWflx0FCv$wo;fdVi%(#rrUpg}X#HsgOD?EVS1+Xfz-S!Z5Z&21Y1PXn;9NbEp>KlC2_4h8FKAzCLSOVf9*>Vp4a4Qq^7;03&iv)2Gkygw zYjnRQp`Ik*1+*dU<<>9WOJ&ZmNN5P!ao`FV^?;k{HAo}6+>lsk4N5RL9K@fO{~6E|99;p} zS!1Croue@IaKil7``1E3WD)M$?pJyKT7y8q1}IStm^4wB1W8ODieb^3r= zFcg$#85S26FbOaWUdG?XRb?l1BiqvGBdB8w83`0=;*K-A%t(qFfuHD>f$txy%`Djb zWeNTpTHgNM>~`SzRwoiQY2;F6>OoAXhV@8Bx)9PG!WCaTJ`KJNNUxE{1#P$aF0=o$ zh!{?ade+Y!$;p91E}-CHniHde>#jy&>sYaTNn=TGAYkr7Q%;Nc9J=?FJOPXOInRKX z4wEA;SO*^64s@Xn+N4I^Njli?a}8IIR9}-jy3=^F3*yAK2hDG!xTL7X!?Bc0!ZOW7 zk~l8VGH}Wsq*@VRHDKD47Z6BK!OQj-q(1TbcMkKg-;!bDpzOHYz&_1w8{iTo{}MTy zIQXY0ZVG?W-t2NlyVv#^-y)nW3H*2)2n#FI2*yc3Z`}#j8qK9?2wTRf`e4)JmkS5? zeV^KQ@+Iwto6b#|ObjF)*TrEDMt)-Kbq*E&hqFAhqaP%>K-H&QjL>;!E|Q{9->qzS zvf72#^76$6lXp0q2~mj{#f6KlebOgzL(+deTpU2xZrJ*Lb1%<^8)g{HFl& zG}uUJ*|5en{{XCr0mib~0&|^9uB6Ex1>MD1JStaB0Nzk1JO91~0!c(ig7RTcbbb4c z3=iGhpYykJOwFqejLUEmeI!M4&M8EpJLWysq)|#&jaHL=wCU3RFq0ghKlOC}qAr|k zvAKpjh_jYhVUo={YGm~JZ!sAZtfvj-FmfEU-@ck_2H8MaDXZX4Enf=csFRK@>oHkO zgHSIcf-%>#6_$u>V{~YWQyVs5m2;}@?B{b5i%Q5t5i~pU?6-$iHZdp5$b2h!ZBgd?A598&`8_6z2@;6x%U*ig+$zG*HS zD}f6V4H#-aIW|PDo?z_x)xLz|)uCg;lY1cgQ_)zDaxfG!-n^zl*eNbvm=u?KpckNM zt`IaO>lY56&$h5E>b)~AB7PYB=6G*xS-GfwcUM@rwZ6t0T+;o~dwpB7UWr86?CazF zRLGSz`5|Cs2Wa-=nv3YZtX>nfdt571oDYEaCld3x5np2mgKL8(H zq)Wl>KI6r5I^yrgW{jh8shu^%4tJWGx@ATo)k8ck*4<*va^|%V<`d&~?9!S; zpQ*z4pYR`O`tC1j$2<)7Q<=P5PHE$aRgKz8<*%PR6kuAK&zL|Pu+#uEUjU{-ineX% z{PocwR>$?B(;<&Q$&wWz6O1GMqt3f<13)M5(G0lUSwXvKdLJK{Y^rTUb;j)qXxg2N z4SaYM`LapaUvd(M@K4UHs`I9`=W~BPaSENsvKJU^V~G`MqbP5;K->AqR`b83@o_t$ z+XXW~3Plah6lgbYXj->wd(vTM^F}9Omi=7i*toQG#4WBE2oc}E+MLN9PZ%Ux-1^_;WUYUy2C8`*b;NvoO6BP`7%_vv@_ILASM;^>)&v#*to-^=ccvkkIti3G>c0f$zNB@4A%;MJoCWB@x5T#5!a5GK_9&?UFs zx#P-(_rifeL!^^ghoV~;Xy*OMJl^QMwSsK%3ms^0WO5kzeqHipmUe(k>G#Mp+TSH@ zI32he*^`isS9C+(GDg#e49D!4GjtNuTluFU|FCRlZ$E8Txh<=b(rH8I`!}Ge@+1!# z%wp_s7uWUS<3t+{7__e&b%Z}jTqe{@75F%#%+!L4MXK$>@q*`C%f61LUlYe;bS+Qr zmp*Z)%Ngj~23_iy$wFE*n%B{H`qKhONoMiJlqg2Lis}C6^L8vhQV)SHzxc}`X<$|h zkwm~8bRex#%%OMMwmbxuk7wyJpIFgHAezYWF^nm*tmZVREVX4`nWSxP%cqTv+XWm3+Di$5({zeU=wS<7Aergk11-a@u)B0jX_1?=7dD)boTvhH%`GC~Zwd7x@r%~0O3#huDi}$Q^#8ap`4DNF7 z%fG=b)%;?g1hmHdYk=RJu<;35$8qt%RB(cG2(Fsh=()9xCQt{ji}C-KW=z!WdTa?%(%|qw&l(X-6!S9a!JbOA}T?(2URk85*le zL?ef>G7b0%t;WsJ{?F4{0-4GloNNG$OfdGL$;m88vZ}}UHXa_MpCuL+HdEKJ1O}Hu z+sJ4f_b>=k+#p*7YYYq%vB;o;3i5M9&&xZum8BM=2ee(sAr#dFg1{u0e2^|{2Yd@& zG8HoA#6q?LT_(=B#&e`u696i`sdSfTrkw7`KbuMc3+nT}ix05CD?L}au0|ZTR zL$FQ(hg}Kw3)wj<%?QhiSI^|zwQH$Tk4GN&g$TvFBRmWIs3QyKWshGZkDiS)YYOPo6Gbphc+;^O zqOHlX;VpD^MzaT${Xa9F1PNXQHjeh(RwvWDOD573R5G#^IP^5cW1Tu*=<;G9F)IFI zD=ui1vC_)yEZT_?QT`WY`aCt~_q44CJtw_9zioiw&rJtGE}y48pdQl5VLnJn7loqj zXh~^fa`%AyOGQaI#~a@O$C$M**9 zTJV-9VI@N0EeOm{Gf$Af)qfH zi8~HMkz5)F|M~2Du0b|E2jq@12!1^+A=*(LG zR84L#>R!oHmXGGDTSM2IJr(P^EteNjfjLNWG~uE(BGuz6)~@D2XEA5M`%YTh!vqtR zBz1_}Z6}rbo81XyQ6XHdPPvq3q%KWQKL_O3&u6n+GhdrqZtN;H)~=S5KeWo*4D^$dIp&rI90QA*Q7qa3(4j6Z zaO;Zy+-~@s7C(Hj4Ybe~r`<6m$s-M1A8$CEa_8VEy$PZV@TKPppom(O9lZr>>sO>M6WZHIMfxaQ+v z$0R>9VAC1JBd}$To&P1;H^@(9h@19mUVC`Vq2DD>m+`;!4Mm0$g-*F;H-g|KEP9F( z6P&UU{mDiA0d@=);$PGN0ADISd&SG`2Zgn# zzRDO3jE_-49sv(fs6_|A+Y-_>zoYUiYs?X|%_Al#hw8-IXh!ABMI8idcTt<8l#0M< zW=<#QHx%KA$ypcqI}>Ku6Xb7YK?sSBjx}09YjL5uR$YO+IdUK(<9boxBCAi%JPk=x zvk>ey!t0xw<^%44Wg?dZ5vGmnQZj;#IsB3PW~+@AxLF)hP6x+j`?(%(~YF zwE?!-lJU?h8aKsr2Q=CfF{U-%bgf%GTrY+Be<>d5N9@W za1C^zi_1ew{RypQEjjpOV8s(d}J;DSPaleC1Z!{8^ zd{}whuA^ZDpB@9u{o*a}QeufTed-!Evgt)b1E-`dO@Td7w9hFRMP{&2ph#2XtcQ2F=?Ac3a5oj_RjGC$-J3Oy_40tw zSA0cH@ZQthHvkFl(_lIYtJjs0t|Bs<`%Mh9V)}@aYidU1w79cfi8N zh&N1l5^nWv53{`Yvz0^bs?PH*!Lh-1)4mDyO=SQYP2cr??P3;1h{_@-`qn?oxv20z zR=@a5+X{g*_4^Qfj~5R`N*$NH%e*gb3cyp@ZA~rXJOp7DU)9hy2$84`S`YFs*MAsM zJ9eF$Md2blskxzp07C+W03s2xwSzNyn8EbRQ9g|fRMPw{4p-8u{*Tso3eaG;1I+OW|V zDi?bXNI)xE1b1>WWQ*6}9-Z885$`ZFWu>bUYv6g0fqIMvLg*X*xi~E^3^D|NF^BlA zWdD3|s$9+q*=|HBcfqE6It+6eYjh+shX-tXd5Z5hljn^I0!Uye0>D0AH`0$se?Hq` z+Pv)b)`y}}uB+xb(+zg0r~BQw+h+pxh8{r^1Ak3Fqoq{!`ETY}_*?d`nm(XcB|N~2 z@;_<)915)yB}xj}@go9{$7jykb=Q0Nb|Ag^I?r}VR1wW6&ZreyH)kj~4t-V5hhlc& z2`ow(_VCGR!*}%T+xYoRJs-`n)GS6$n59uCHy?nV!?~Vge0u;c8~qv%(#(5U03UfYGawtnPi%78M;YtMPScj5Q)f+_Sp1j8UfHBgP$9vu&^ zQtE;Cd?RXFa6`)XzO~FKVogUdzs8_P+Kh!WW-z$DK;k`^==*71{wX}LU&$@*8DPX8 z$mHTfzoxBA`mQcQc$*=crTKoUypY~hJZT;HkX>VTpDX;{?xuAQ8vA;_CXMQ>dFq>M z|Js`MD8Ffoem02F386K`yk2_5OuNi3$xqgAMMP$RR2lmgs0;hEoisuBVMp#}DY{1D zj`os{^2<%UgzrDzsV(vMBw1ghSHhcN4&Mf-E?#Mr+@n^T8{Mg|dbl5ai95orNjo3> z=>)d=sAc48tvy9#33nh?`~r5_7cX{pnbP*#Ct{34Z&XL|5ThBZwG5simHzEv-*F6m zp_ODicAUUWZ3AtG=guM#{7ZBE96VzD&AyJ012=mMzNAZwC45EIn6#tNd}jNEj0uNK zZXlYed*lIOYC5vq^yeIdZa={x9}(dH$3K%;?SL;10eRM5cj1pU_ii@|-ug7-lR-Ak z>uNo#%~nKMpz`~UK&E%wtNE#1NIEvE?{`HM-Rtn|z~HM6-a%jtEs5OeNRA-{{S7mq zW6bgMiHBS{K%}H!1&Gpfk*F-mIq#T_%IS?KnmmRY5!Xkt()?Cze*ea%Y8WEvVUTiw z7v)0&K*`ce*;498AW*f);H98eK#@*DZ9ogf-F8-8aa~nl+`V5vG%saJVcxL|$6<)< z``^FkSYF@5f-u0%qNFf|qiYjifZU>kJM!GDwkOde`t6Kj0-jP-*CjBFZJn& zmVYdR-G+yVm!ues(SeL0UQQ>{Keiu}GSOi>XB`tYN&^OYnb}Cw< zyiG&H`5O^k^+LJv<4L{WWJuk?4VV1RvD>$$$6r%7l&C@!!-x%F@I;0uoj#mfPoRMmY}%fOH;tn zINc;|B@_=|mL6o57CZJ4meDVz(sE_b!}Haw>Rm*XIO^!+P3cBS7FAfMxG)Kcqp$y5 zp|sHlmimZ~ZzHMS7%o$3v#_#!m*Tl%tdsurfOAGaNCPrhyHjNifI z$KtyW=!CmJ2&Gu+IR}g?i|q?_&T#RG5|$uQXpv%4HzSu&SDIF%j?!gF7?DcejEN4` z@I4SZ9@v32gEhXDFgX%;LqC9jT`nY)RG`zB`~9Vnyl{cSV}Fipyil^fyK*CXtr|z7s4>mn|H_*bp&s!%DW3T*?6M{y}~^X z%A5U3+!2NKl#z@IbtZh@)I>e(k=j1)BhuG7jCw#`@$5IcDgq-2_bDJ>HE@l)W?)%1 zTs=WMQc}6RH78U07GY;5&e%avLm_*){`1e4qkgnSP5whF$bgx~&9r8MKYp%I}a(;^pVq5yH6JtQP-`^ljn3Xu49f6KqCHA2s>aM z(ssD7c17#Z=DPvrSRNf3n>sBJvkS{Kzr7rj`UCL)qs^wPw@Tl;Q^&mtW7 zqcL0K^RDgY6JzbppxdHdbF#`mN++ayjjS1pjI)kRzt2d_Xw+TI(o0y%9NVoot?~J@ z$F1Ybc9uJ-L{M6sb2YU&$b8qDOXI73xA+VkusboHk`3E$jv8@s(vyxr5N9CGBlanN z+&mQZIxEqHs(Ouvt(&!Tgn1_jUC7wbZveW78c>bM0V@1d){kppHZfsSOpZKoq}~{% z^qXFSxp|v|v5;44?&q<3PEREaqp5IqI*x;f*8R=IkEakHlsaBfLQzO!k7k?L=%j&F zcGj>YW<{4Uc-yHai)G@mrgibnFRD1JCuY^n5FQ>P@NbQ z-So$Pp=&4Y%wx;z=;>%nrj^HAD=)$?D>P5q(K~{OQ#F*whi#6>_H!WKN?N}~;`zw9 zxPG0gRv`2P(nrDtE~vU2l4egxQ-%<#6AyQO!Chb`4EEJVeVG)a)3?$PGo!E3iyM8= z;ogzUGT6C9nChT}kb}JQOLjdWH?C>!@*uuo+YA&%a;CP(4*F=iLb!z=yNIsR=(~>j z00BCQ&Z+Ltc-#(UmCwFHj>N69sT$abh-vvVPKd6U#i>OSNubBA-n9c=OFBOo?-XzJ z?Li8CloZ+**qE8G07=m7ZGW4P)#$o;o-1xxOt9$FyCSH2;GvUDbic&@NU0=S zY+ET@@buTsqJGfDR}9HI-m-760uY*QE`WXGZLQRXMJ+<6v5vi1-eGFSA{>Bd?ypKU zC{x(qdq*F=O>kAO(|3 zB~J1cRam^E?luVXeZFR!GE93SKWuO2>mZufiD)v=_=AYSlWy!3gd9F1`=uK?U~amd zn#8)(_geHkn7e_Z|6um@wXpCr@+u-bQpMiGLbE*^T{W5u@qak#yNb<0B zDqJ~C>w8q@cwU@Yt&-d9 z#zbfYh_OpTIm?8tElDUEl+nu>lATB2eW4RbC9ih;FySF+2@;g11{T!nzZ$XiWp~I- zUxVle-LHZkbZusjyi$i~@Vyrz@|cgu-g`pMnni;o!Q!%&ARWO1s52TEXaB5}!~on{ z@hQ%^2!3la)UGxCB}tuGADHPlG{<2Yjo8(JsN{rQ(}R8wO5S1M)-hTei$=cmpSjAWkk2d0+d;n=4DWU4?S2xe zw|WgAJ9VC#{QT`q(kB$R*reF02r9JDbH=BO?pg6Ys zFz`ffMz(c4%4HVzbKl}L4t?*qqPl2%{AIZqM4*b#Mw*>KV6mk#rHaw{J|Yvm9a6YI-J77SLH>B$As9Xyz9 zp7nt&#S?|=g`myovW#R0OIpjP`tNCLlk$7bdU6t1oi)Y3R}Z|hv0l^% zw%K7~gx?xbP&T>vQu_Ki9}fGxn1%)l{m4qlwZ?}xXA_cp;O0yyhScr3$kdC`RHu1$o3#Ix>}1na!>*rSzFCjqTPsSh`C)ht#VyJrgOyM<#)*Mal zq!xB|Svre*@uy_16E~$T6ju695w(Ise9s>V2z4`hm*%8yJDN)w2kP7bVEO_;D)C!a zxM@ET)-v=2Q>4i?{B&OvvRM3FKky}0KW6Z#jJi3mt1_AbB9GAi1`1;e-Zojf0UBkp zk3sk}%ZKiwo_<|gAOnp+YdBQ}--GT7yiYr#hh{J6?zR=7h9{DgD~peww~a_BY{%^t zC}tw-ndi4j+%=DO^P}=inV70eGk+bZGhg#62KWyzMCM!jdujAI8@B{QOq=WXc%Z3I z;?m$piK1s*-V&Sri4|yIQNJS7oQjE8zGM%bfG`tc5=m>2QAa)zPH<4~X@6F)1}lTe zgr(#K7#Im4jJhN0bpkKywH%9XFWhvh?`E?(C4*A)n)M?iqi-;*#|UfF+cI>S#$HapUXN%5#M37l2O-BBLQXWymIA@~f-o4P|BQ^aFn78F zambFdr@P-?e6V~up%`fXAc3rbX)0B#6v#4ufydFbtr05x{Cxg^KwVuER+}=3$A@Bs zkkOwEN+|jMX>BQ7{q}mXXp?sCG)gh>)xV$6HHzV4)!`3fPdP@+S>E1OVz}E~IEnV} z(rV#aL>&Af6kSFtJoe{qN6l9BmNSk1#hdi?%d?TPn%j79+GTFzVS z50C-;I@)?d&fki*0ByAVfyN(a%94U1j?5^R2%-IkE*X}nWI+)1hut>YhR&mMB-)~A zF?oHcl_CWANC@x! z^tpdlY%p-Kvkk0Ga5Mjoz1lx7uZ}1PG7J%@!3KE`jHW%It^@vx_W7!f)4vVQ>0EvPIl1nq5$#2z_ zu+BkJ#^y&&l0o_y8q$t-qQf=(QV`p7h^LMBulfw;rq@mAXY<&aN|Q<6J1}>=<7#x$ zUMcW?_@i*GYGUt0n!RS;*e~4_dI*kd@lH!+VJTXj@N}2Jk;IMxRHfc8DTWcAF_emb zHhYXD3g=SQaYo@~T8wMf^v2?20%wj1*lAjy zZ9uzzEV7=m+`Bjd9A{oXBhm3zOd~!kS0~IQ411KIX((e91!e*S8K?n5BDuLq21NZ* ziL1GYc9t#1!_U60U|{_3Bt-;Om^j~=jWMUFThZ9JFdVu>2)Ci9SbtVM+UZmB+u*+i+l$o^nM z_7{RDz(@n>$iQFpP~n2qsIKa+Bs{C(>g8+n`iZ4Dnjl+y+P+M(ym{>LR8201x7q6b z5JdZ0X?0=r{MmO=qAO=lnOrRcu{Fn4N z6~ujvKjK3lRaU-+5=NwsrSI>U^N>Si<)eBB3N)a)=30yAkE1~gdVC!2@d&r%*z-&S zv>tFGhkJR_YPqmfOy}bD8*EZfZMaf=wfsUQ{Hmi>*Z;L5V@|e2k~<}BO5+JudKwNF zeRAeowWm8J%t+Nanm}dHs1#X z@bC)fU(;lkI?M7KCtKHd@2C2w7x*pQXiG+rkRl0gv(yZaJ8ct8ZY)>dw{}PdFURHV z0j7(m=3rN>mn?k^Z8|5njkMvqmR0d`(500=$hZ-P09?Zh@~E(Isj8YF;r+!K3Dr=c zw%(OH6Y=KuVv#7!Njbj17sFq(o2!)1Y-jlpl2 z63Cx+hY&(nv`t{8J}t2q_67KJQHxDzHP`I3I%b z2VpqOiy2_c>-X4wVi#aa&iB7A|8nCuxz{ij6PF{~`32Jk^PCoL1OZ!DjoLnfT`ux1 zTvH@e_#b>JEZ82L?grR1}l<^12HYcjN2Sqj13=F=9E|tcEU^JYlX>r67RgLl$ zIrs+3#EqOVwA8b#koqsMWcqy83e3vy(Sr;WT@vOW=}bNV6%6^^myv5b@ns#N00u@` zBK%mn3*r&c-4YJc!BGNc3y;9tY*{}8L6UqrE#0FFge`NtMON@O%j2VK+8``r< zX!7Ya(2X&0`!-M6%|!d)%C|#m8c8aikGSjDIbGtl=i;J0%KK?(7T_3YI`4EJhF(r; zaJEP9aw{+N6sH{D=EDbA)HdW}5M+UZFKP&E*#ngEjv{fnP;(jAN6{{QNPJ9d%5KAE z5ozlpip?Rh+MB|nFH$iZUv_#q=!fYf;=bTws%uU)6noTsgz44{_$r&QS$W`L2{pEE zI&y}n>TluwtJ+G&Mu=@!t^TTGt8IC2qN1W2qS2~{L+v7c=RlRU2U{4^#uDeR=iPqy z=6R_plOM;cogX6A9zKxCX|mKTx5weXxt)Ar0UeKxO^Y6qJ1lT&4Gp@9`;Sc;bBr+< z)`2ulX409Z8lge!<##(&BPTA~swVdzjpc6qHj{WO{(#A0v2~iZTCHV*@Ss+C8h$4p z%@KPoP<}|8Gd-oJk5JKD8ow5Q1)#OJ6+i`N)*$$zC8>v>MZXRO2L(8{SZY z3mO=o=FIpGXwv^;hb^ESZHQOO$pVKb=#MbUy2Fbd{m=g)&I2+#5 zbn`dd^Ho#i2dC7y$$)^#m^=SA8Qmopbhp`$Ogml;sw41-dLCMs1os}Fr(dWce2Le^ zW>qF~Dhy~OP*7qq2dnrBahczfCXntNJsnm{^U`K$ILvOs(OTpLm%~T2;*8O$7C8@U zJXbdjsqq^(%N$tJ9*cC@C>9-t1QbiOIZHaO#b$CEqH_jA(q=ftX;ROn@x6*@_GC4x zH~u|?Vt8|=Jo&N&?Y}=lG6c}(aIWE&;<~gW8*)}p$O4)obd;9;qLWX5X1Sg=6k-2| zxF*RqYDTc}MEtp3+nH%5j8&T^IHWwXqNgw7F<%%*28(k~yp{RbtDy2vTET&iN_;=O zhEjJ9c$#Tz7PmICBU@ZORL$Uq^~gwr;rl%X=uLZQ()R1X)a17_03%^wqOBXXrVah# zwnnG!!kr3E*{o9(PC%Qxdb9O)SPt@2%pc5Wl}`IJY=W~WM?e5V+Auk!adI4K9pn!8 z%?b+)Kn&@0S=Qtx>1yEQS3ohGd*o)KGgQ(wYU=wGcT=l^QY72gE=zZ^7g_&f!OKjqZNxuKED5Ph9V7w*A<< zcIzyGflz?FLfC^9rf9G}jMg|;8soZ4IvA3w*U*XX7H+e2#GbNMFDWwru z+q9yPG%&@79W@lyAP=@A^@#oXEoHPoX8hhsh4iXJTW+XIr-7rl z5csTj*AU|ra)baQ5g|=e$WSu*X;>A&DVn9xg*J`&j&UJIBRzBPc@f=;(un#>wbFX= zm$T0u6uM+`RBw2N{zd*F9PNI{6+%_m8v~Q^nm+ys+J??N=E_4Ri~t93trp|93y@IF z#4T)zVY;=L49GzN;Sual)$QS2t8_qRIrH8hzA~5wcLLi<9(W=z*jb;W3ab(hn z%n)uEo2DkIT8Nx^KReituj4l*y;ouwcGr_f$gTo!( z7CQ9_&o3dY}f} zWZPji4VD!{uQHpHAR(cobj---5R3^C{(O_!QgeREhl8X28e6J=yl>xIVGs?m zM_a5$t~SS~^EwZ7N-^NpghPm|0g!TQr{G%SOBJCi6W}2P)w@ ztzSnVTAc$(r;HA78&wZFIniuyZ~nqc?`b4J?eOL=_y~c%^DP%a`UG?90W|VT{mf3X zr$RCzYJBz>Y2G34E2otIju;qtU)Ac0gSnJJYN|vA5j&f{r4*Wp()?IGPR2Ki1gZpG zc+rAwvvDzWoDG{GGde~jq(q}kc;#K|`#&t|E;~DhY-KhJuzgxVhR;4goOUe{>yT&s zMJa`nNNvB#DhOyKuJf>rHZ3_^5r`>1U_JiYPC^NK{sZD<4FEkUCH>S@{-gQv=MU=1gFb-Ax1KyTdHGA#~$CLT$YI{it>V%0~I0ffCM@ zT)p9d;r?LajNMAI<}C^HAd?cqQs@NJQctc8qXXH87An^?5UaadnN*Sp?Y@yz%Z-o6 zWvKz&_u5ls?UqQqRp5`+=&*wpy_+&F6 z{l~hBvth;?e#)^>KmbP(j)8y8s$?1KrY+0eM4rptsF3J0B)1)+M&HNZ8taDlTQBBR znHtVLOPwY9SOKKi&FM81$PHaM<$!xDUU+{DU7=@SxCU+`|K^L)0Qbk-qVbAcDF;8=*w~esr!2GR2bgDX|l)k5= z;LO`xL4<^0{uRxThY1wSA%>+rX@wYsv?*ACFlnlE2Gp1-b*){wO6 z6r21-oD?820hy-SB zF0FQdW+Nz@`5DxzFcWD62t2vaB zktUI_OuqhjVL+yyDX=FI<+`iIlivRt%a_|2V^aWRnodkGZ&ZtUsrnZc0EsivteC-u zH{jGIweyd#m%qCaFl37OAMkh}6g>0%1B-A(Obp$@2ftUt+AUu#;H>+RMSbi2n>+?+ zZyg-y5wAb0_Lm3kK*2DO;)llhPCfV+`Aw;v{oez+QAdr{EoXOl#R|F#1VX zl5vZ7;)z3gCrihwG{2DxZ95oJ9e1z^aWYF*zL7+qV}{NgR0o~mT@4$$ zA*$Nw7OTB>tzi!NRjYG2J0vy{K8_pKJmT{w5Cl57$3fSoB9LtniVfPz zi_{YN>t)J`!9>A^8M0$%!sLUMx`;@alRIVK78IoxlgVyw`nGAEj@^Qax|?mH+Lf}e zMYv{4RZI1XYU`-V!2PwDH)><)XI{gfd*N(agX?YHc1rv@pa7rR}_6y z0BEXeJ5NffODf=4RgcEd4t=rucO*{(!UHFIoxhN9bFw15@Jkk21 z?}4jq6+9R?N=NYVrzaec(}ZvfmjfHK;1Y>^$|##apA37t6IcuLz!t*)qa%R_*e=OqiT~bnOeKuDJhfR2j+#Ie&?V#R=2*NBwLw&B5qXfzzg~Y2QrbkVereoR|t+@ z{AkSjMcd*Qy~#fS=aK;>x)it`M}JpE@@)fFw7nB&{<)fpqyaZj!;5rOCOA3!E$>IP ziyI<?l~62j?dRPDmfrRzLZV%LAAnqZ#@^Im zE)+2%94!UVwv`N8ELHG7^3g6$rB?o+DB-iwnwlx-6v!d)L8z}DP+e;ZuYv1)I99&i zA63;cZh~lJmK5o9Tt2tQnVbFViYbI34OYr=83&`O@Be&MR(+Td%+T^i;Ebg?_RV1L?{0R&clx1@8TqKsqf*s z#gYZjb;suZWI1zY`Y`bd zLTK5`kvixVt@?AQ91V+a`&Or=$3LAZw(7@yQ#~Tpe%ATQ*X*y@vrypn^eRAx#*~@|F7@L6?4C~k^I;-Ik>J1*K!AF3i6CZ8|IK2|h~CB=>lF#{ zh*sxVSvKuU+r%AL7@hZ>pIx2UUj_IB^-uOIFxQh&9@n1^1%hnvK14qr?j0y`i2&tK zz8^1`OAbUNQLwo#9i7Ujas#HDwjuG1f#i1ll-cz|2er=bFdu1L(KSwFen)ifqp1^nME|80B5&dB{gT~ckuXzqlRGY&;qQ-L z93W}!H{fJbxXX>Ssl!71;mf$1rw7rV;}_0qLsAejHwsU?4PouPf9*law@L3 zC|Y&r1ixrK2Y#wL?^@#=y8H5GnUXeb`DoC=KcaM%{yO}hbjvXep0f3pWXpOU6**kQj^Oogr+5g$N^Ux4QpY+j6RdiD49^~b8q=Qhm8Jkf=O^|0pP)GYy~?Oc?JujzXQP}_ST1Bt%(z3 zPvr!Js?=+7dAj*ADVx8=JSvZ;ICYy@?J7M;J;q`nlEvy^{2dmlx63+Oq915(Y*g$v z1IMQs(bPuroZZfLf3b26Cvj~NiL3idiVX>O7Kh7^Gg`-Rz`OCthNDI&oKpUcztJJc z(4s1MQs)Q3uw#_`Ic1eo56t2m40hhRX``r7zx(eI6rW)N8LdE$UOx0DI``zR(M0ZXA zj%b|J>p6Su{p0!K_41_o!^q`3c(p$tOf5nP+V*t^SR7ELWxuQ zvcKJx;2^+$g#cc#Ea$blyalIbTe8Ac*L}vB54L3cCz33AZ;7G4^B-T11`$BjZAcs+ zfgBYse!%R6@Q(5!^|z!#DyD_@KfgeMAe||Pv|Er{H)n`y#d7uCU1L-3KQ8*ki>A6W zTuX5F8PPzM)};T5p_QNev--pS{x{p=0RO2gFnI1*pBJ(8KaJ$=^;QJ!KTTj{wOi69 z(AHVvW~Bd`l*FfiK(#>Wj{B>=?+gG{+$0Nvl2Ph)`Cm0I2y|%-aG0ViU2Q!wIev=1 zeA>X&-u9m}^J6=;ylAlS9jB*fhUd1W@Q0PA?Xn%*XZ&kTPvTvmRD;)6OoeD%J2fCe zJo-#Q=wAO%pvEK$CS}Rh>|(qmTB(Ne&;y^NY8@7!0CWoyXcRQCI!$daiprP{2)QlV#pU z;{F|KG#DDzl=_;{;jdl?Anp7nuNsN2)374+Z+I}0})$7`7mVl_k-Xp{-8v;u! zY#?DIJn#qFA>1L_fjzx1wU0y6^xM!Ek7j1G)6q2My;Zlh=Dpnxr&SL-(>8j$Z0;IU!-)DY><8mdH3)u|hvQ04k@MQwM(t91+~!3Rfv=~+FB23`VBG5<&=0NynDA0ht` zK+4b$Mnr&JN+aG8SOuKt-JO_g+Gg3Zvw%Ccvas0Rfv|SjrL=vw$p5d5une#b66Xnj zKlaep6_Pfj;i8@WzJuD%H`yP=OS*r;Kv~c=d@_~!uYkG!yx>WSWM2H&jc0Iha155y z-(Qk#c2oX-!Z*am4u>c0YXN%;#_qd}YfE9L)r-VB8a$?f+1)A_Fr{M!H^i2-RB;XGVW?f*Vs zUxxS)nAPg5$@RZItp5M+Idu1!ZYK@i|53(&n%8&_j7-lu&Hp~ts|#hFx%*w!n5NOcvi)Z)-l_Qup8HpL zqG$ep-Y`JqIX}!KE&gu%FJT2Kk$LZ<#hR+H z#aPy54$-}DpJO?^PqB4`jS)E|R+1HZn@{c@DOvIHQ08y5EQ%R%2-^H$Ph+D*!?OI+ z78P4sdbTuL55Qsy_4~rFoinT8`uZsRmg-D~!nEffZ`kLXUK6()Kj>`~Fpg@^H0aH; zA61K^MHU)xbK7Z``HiZW;`Qxmg9uN|p&ftlB4Xe{vJZJ~q@A4)vBTJ}i$F)P5=p?_ z*w8EdhI*Pa!$4$b#Ku2aOW*yeJZHGY>L-K0vY@Kjt=QtiDSmztvLeA9AG2)99~Jpg zIuZ}57%5iH=mI_<6^^ATlBhmd!4h$={0}tx-*AdP>T<5Y4ya0eb!Ol(DZ(Aq3YmI zj-h7fMdT-&pMFjAKlS3DTa5wbjEjD)+p6i_b+vwM9QQpfxpuNwkYBlRvf0IJXk2@B zIVA@lxG5|=k$KLnA;!UBI}thDm};`G0P0i3jjBI*xXkz_zL+e%N{h-G5%(k|z?GHq zSz)N1wS;erhcq^_m#rmfiZ5|Lta@HWEC=0fkMPcu@6kiKtV)D2<;XiH=^veN$_M07 zcg3&gQ1A6E=|1h^6k>Q8yf8;zd|$&mUTVV9gkFDmiJs4}>x(LiWMr0nz zVo68P1M^tGlYUW2ijV)^w%NDm(n`Fb^4r40UzbP$)hVbLo2~| zKMw1&{8Ty|5^Ya3%U6A{Dr0GTtfUhZvY+2|3?5{PJr`x&kzJu9qto{~NuCbQw<3_I zw@S?U(mkv+Hjdd1+>Sbm(7GeblX`7k2H4DN&2RpxExRp1<)e3b$CTmxeZrMZ&9`yQ zR|hE@G1g*eD3!$l`4?3Ex|-}ep+IguXmH&6oYTvq<5qropS+w_VqWD%GcWso%&&Xu zbUo%i;B0mrHYqcTya=hjV+z6J4LRK_)d%hl4#<}9F-9ldIG@UM%6MAdFEO7I8V(`T zL@I7YTtU3oe-_>P4+de^@oR<;Zn>@e3N(tuyI z(90L}D?Y|M6=agYoG(0T$E{4xV^wluoSn3P)OF$ArZ6>gtZOegtA8>n-TDg-}6Xw^V&eiZNcn~X-yxR8Nx|P{DGk@wojWtYA zKGx>gl2oeK5mFGL#|Oa}e|ggQ994(CVl6AU(6QYBPmE62({yf_%E=~CDZ!S(Wfjzb z4&BKR+=^+kFgCX2IKgjPht_XD@u`d zp6kJ81&Hr;O{yhA!BLmks-79|QYRGrW0j=W3ml&GcmNxHvHvL6N$E~P`k}*c$o=Ht zK9yj?WBoZh(ibdZBzTDkWCpV6_SK4={K$ru;-5=&16Tc(=We4J zcao2rQ#jcrp&Lb^aGFQiYp0ndFExv%O45 z3^DAZ)v#xwb*$Y;;A!VYa+F-WZtf+%rrp5ocSs!x))?>Z;E5U{PHJ4(9Y$91@#9kK z$*xX*7w>y#KBxAQ?Is65425oZ`SvSmmR3s{(IIK>_w7W8y*yEa-|SehL9@irgfd)Z zI5SLj>~u)f$>b26rFY}ZYKrGBSVW_f>&@(B>#g-Lc#{;yb$C;>6%~+{Dosu_Xs@7D zkE!akkqb-5I9V7-^>6GtkB4OoKEJTl=L?LUdHKf=kTx0)8XLfhHk({y=i;chQ*CwIOsPl<&KV*F>qtRWh zeb-9A=HHDNF2XsynsjztwA5h8pM!NZdURJXw(MKS8EP+i5(v4&W?u@?y7RGq%{Qtk zM!sQ(==OeZgWipMo4YB_xav7|(c~!FO0;xIaT`C>58vuxq-Nse=QGD(iY&ylb6(Z8 z6Z@%UHHYLWI!7D6U;8@cNdppiEd8(CkCnQ!QvwHjs3^vdl+p@?{3;kn6YI!i&pPuy zXM2IrLqqQUvGNY(R&Pl4u?l4LeeU%)U$J{ zdHR(n%VCHc%bxwchkVI~zOb>CV{vr?bfQh=boib0uF-9qMGXJu;rJX)W$423)*Oo? z{^;)Z3ZC<5OC;ct5*Zd{!=sZo)YimlJA3oMz5 zf1}N$*0XsF>0SnIFufN_4WD|Au-y90aLXdGwT2&RqmJsIBX7pNon|JXc}`u@iTJHw zM23?rp9irFyL~-eZ`Kucq-DA<=ws{bUMH14OSa`r;viw-hu`hee9`ZuXtWzJv_A6r zsJ5W=?PMT<+Ph6x(bs~Xk1r19Iqv=4RFp!ZO~Siyranv7aMz`8Qk6f3v28}!$8Eb6 zZCGA-xN%Liml~-#?`YXZPA2q}<_-rJ+;tX)k44$ z5xC51pY3)(rl308fBNQ$wc>x>hKx&P-R%x@&|p&CIX(#WnLwSEuE<98a7PPLRwJ-P zSSM~ywz*;{cGJ~Xmx@Z9w)@D?SAtbFVN$7%gE*E*=etXQbR>SB6UKrKf3j<7)LqH$ zi04R2eKyCuE&p-&jyu_6!N{#~M%9ZR=gIvU{B!1N1@gepL&NLDy`2OIMc+&0;TVG6 zlVFZx%>m!BmK@>A=`5*(kx^yoqu;#A0~lv+{20tGxYOEln=2r?9@RD2tf)##Hx`#; z@`dr|I^VH|Y`_U=K2R3z+BAl)yo?h*pT++!wR!&Xc$*a~J|4sH3dO0+UviPma3KcG zx9a`@?H3{s=q?Bupl` ze3c`cTA$oJ%q6xjL>FJbN~9qLzi0lnUe;;b9e?Z@g-5N9l4U)-e9PEDJ4@m_=14&x6BYd(of7{Pg`+xLsICPj%j?X@Ij zh$=*aeYypC^^zp=@=!A4KnsG6RD1+)#W}D~WhcDCD-NbdA64Y^B1BQXA<@2m(gwL% z^Sj928uII!l~U~hRZ(keQJKYrqh1%RG9OuUzC0&-2DZuAf^3@Y9Ui@~95Aq&xnkOL zw&3Sd7;O3^Sj=@l+uL+2@T+4Hoi6**oLt`a*M3jXmdL@kwJ5RtP`k+2SE~b1IQd)f zv-ueVp%1eP$@qRCWfe_7&yHT7yB%=J(&wKU%Jbtc`+Iec6EI@YrG83>HF~G6iiXv) zNm}Tq;{1E)Ojv#zXkRn)GwfqwyQA$o`|mfv&(E#S0Mp{l^rh>odcHF)H6AHxzd~kL z|Fq<58`r&YvA=-Cm|U2-d!_bE&bz7egYQM{`6HA=@%l|k9RU>aUKr19p<{m~Xm*`@ zhqxR!=H=25+ZS{!5rw}+yk{*?YpQY7Ka)LG+Ad_>&`C{y^VC6vML#Oj^ECh2qiRH~ z{6)}q-QcLQJEj%d@<<3LoyEOwt0PD$lhlVNcYZ*)E%3T z^QZ8JEWq$Ex&O79UYd6>pNT2Q%YgdhK7RL-ycm%}^4>s$o!Xi;{|XU8w>_^rbctm? z<3c?m%nnt^IEOxDf5?(`j*D95{>zp)&+V=A-Ovc9Z(C7_->`wm-I9Ex&-k;KT=O$W zBO`8^Zzn2}SkW_!K*hzOA#Z*13Nv)42hqIq#!!Df_~YD(pzI}adBjaJ;kx3WOK8K; zZ@I^sI_aJJ?c_L4^H(c-(d)w7lcd`OUV%kf3O+A1 zsseJJ<#&t$&zZ9Pdq|Af&^NJnY$flj`erxgV^z)Zc}sgMwm?-m)C;eqvnJx3S!?wE zIZ+Wdc1m0EVPKG7KN#|p`_+PG$J379^XKK%+IC(Vgj*v+i zy}EzNs6?R|iY)J#SC3fN9N&!I?l!ly6sX7yy}$kbT$kcE{e*FD8KgJHIPKW_C}_=t zz|(&C{==oR@k@D*--ZAhZqWUu--t?%1Ga}az**m?Dlx6HIao^7?86P0XPC`vewM6q zCmE1K-bHw~*UWjM_Tv}vHf!0sn&$^UU7t}MxdQIpj(e(N8tvd~RttB#8?-4;|l};vx5k+lKp>ru7T>8WCIm`s~F^lV=nDjLu{aWykV?n&_PTdee2L zLx-o4pHQiu4!o{@bA?hr_v!xv-AG4D{g z2-FjOYH}VAo^>8CzVAI}7U?C8-=6fZjok|mTp^p_h~E@GnWim1>W{ELpVgK!U_?8n z=xrGuD1U6+)z{;>S#sNTTk=>5Lmw}lJqVtPcP z?itDK$dP5}d;Ma;vn%J(8gx&aUgaHI7hVTGsL=?w=xF~jX4bP5V@K2q?^VyU#b@C9 z;qfwvs`2@D%Bdn?)t5_ZrOa&Lnr7s7DycDV_zJ{(&CH>~iTVr~NiPRW?AkX-$$ z%9E8B+xp6kRW)7NX}8n9mA&Mx4^tT=?I&Nm_SVo{9q;qTPZtCP#d$4mPCfqs>0nR~ zh5lAKqiD>ce{~~AZqNLTc#B@fFv^Qx69lFnjp5BR6LvktbN&-swdn5j+yV0%36q@- z!uFGQY+h#Z4m>7Myh)^jM4TnR&MkR9Z{cKqJ%5Ie4Sb z{xd{YYjnqB9RXzRaHk!;43Eu4soOA^)J*3$W6r44Y(v@8=eMz43FEQ73-vH2)Xn#j zx`D5re(<}mmQX!#AmgJv2o(#2Vi(>Z;`8YUs4y{bzQ-^8*nuP)`uY)8s&d5mA z3F3@FqD2i!2*Kz_h+b!mlISrZ>Zs9)lEWdR6YUPpUFWXz`M#gt_sjb{&sux`_g?$A zpS3eZr$T}c`;0J`@Ns;l*UEFJ1QMF942HMenuaY>q-95d2}0=WYR)fOyIOA=AZf%Z zS!PpOEDZKM`gR3YBSU?Jh1FeI5L?{{Kng3+VH-Bbl})ZAENGSp;tck5(6eJQIgek^ zH)Yl58Asw%zSSLgE_zB@uietEa%;@jMeAeSg&f0cGb!~q)mujN8A(td7Y$SE(bTLn z8@rW4Md{wC0wFOcCF4yEY6T16Xh|(-VrC^Ls4ACjbirzTV5t|8r}H+8QwS4UEgJM5 zGWgE6ZHJADYcNnZc;5P(n!QC0HByIQ@8<}`J_P~RyfnSsIJb)@BK?`D;R<|mbdA`E zixJEwSx4^ICCa(lY(s@pteb=h7>TK}#vGK{kwLXVL(Lv6sUDcWX&>pjTK%s!ZS z5u6UYj-FYrL{_|_oeq)B12;@m1?#!hPIx}Ye_+j;RQv(J-|{YR#U1UzVV=fKRD_5u zw=u7AS46TCOr%^&n3_zgHPWdaCfsRzQ;Adb%puiK{F-}Amb|;SV#mTnQB${g>&BR z1P!*h%0B#JkTJO{~6GRBtf}ccBZ1j*L&5BM`3_K! zaYrC(>^kO+G5NjdLZ+gWlk&kUUiFnUw#)K2`UuQR=UKiVUKo{Ee`{7h*dihTbI++t z!+~qET?@m0KCVpgvjH5Cbd3b33jELIq>B8NDDzOcEg#~MUvt^-H7?UeJ8m%cu2gti zmER28nXoq|Q&d|C?p7gwqJ+PK0{hHptQg+vMlKc!ry(B`wv@6*;qFk%{2@^L4RgeT z{W&paW{)jxAkBe`k*^=6tj+Z@D@c1TO*JNa&kU^Of05Ak8j>?jLdeop5PE(##m<5_ zR0PQfFTG|mj(LW7Yn||m-IZc#&^PW*zy->%;hu!P5f5o8kgoAiQ5?qvbA8$>KF;21 zFgle+A~z;stP*uOIb$MWG2q;%he*!*EZ8#I1}tmfQU~JU)yd#})(sdn(oWOiuZi3u zjXQyLHvDg_afR_^3RDFou7#(_{9pdq7SDp|@VWBm)$x|$otw)YlB{nbq|Nr@lW{gmKdx!RO=gm683JUKinMgJi*vrPK zr)=cYcB-Up3aL$hPshV~6RQl{yjUc}K~34YWj2t(K)AxRAHyO_<7fl(YZm&aSvvX#i*nzA@{$*-ki z;5}4Ws`!`HA)`mZj zMl3)&x0%|agdFtu!78;ErntrAC!TZHk&OW!Trtp=E++-^j)p6#kSi?@qK)Jpv<$82 z7{4D!NWk?B_VscAyu1t6>4OU^(LA%7%g7!oyZFULeu6eB{t>n6Kux`u8;B3fl$)yo;)WXxo-#KWfe|3%z9YOIQDj# z*7d?@$YMra^l~w>&o7TvajUQ7>z|4I1k?=_kzScCu6HeF=-sow**SPxpGGWOzuT1m zUFfNK(1!)q=W85cgqTOpz@%pw)R<51s&&_CCSX36ZnXd(4dzE!`371zo z+zduxw>|uat0!D7l@E}|JR*McvgUJ$ujPQt?B+?6;)f#z3Tr-3h!QpWXDQ(m)u8RK zKf5jD-$k5R@qP7`8M*+5pg-Ei^o~A{uZmnK3{cG9$19a)IZIC|PUM*dJE3+{?6@Z| zus!V$%5ZU1g}aIT+Gex8pG*6lYQlkxj@qN8juu;J$_<%VxTz?{#5O_>*w0CI%>BE& z=IOGBMuCD&hh?NB|u4N$ZBP z%WC9V5c~(@`%)bE$xRC?^u2%o?ZQ0@j&7-3NdI@MKxQHZJMaAemYF_2xBq%(r`M%Y Qn2Pe;g&As<+_n$?4`rN@vj6}9 literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/movavg_100window.png b/docs/reference/search/aggregations/reducers/images/movavg_100window.png new file mode 100644 index 0000000000000000000000000000000000000000..45094ec26817aea638085b466e6e2b0339c2bf7b GIT binary patch literal 65152 zcmZU31yr0%(l#30U4jz`?(XjH8r&J&U4wgYcL?rIa1HM6gS*?$e*f;>ySa1TIcKJ= zx~jYDd8*zHQ;-u!fW?Ic0Rcgfln_w@0Ris;0RfGM{`66zbJm&w0)kCpAuOyQDJ)E+ z-~ceSur>h!(Fjg*hv87&Sa|2VUme~{JJ2*`njpL8nGmL0;F)0+jf;g(GL|3?76}(q z779r;9#IHK7b;Z=k#R>VJ3oK-em}qFJNL=*K0B!Fm|xEBxL@|ncBTix|5*?l3xZAy z;xA}mYz1zkCr)9ex2g>Sit7hyC=jK$ipMN z2I@zHa3011SzMGzex_aEB|y0jA5!ujv)8 z#(6I2c}Ng>QIfb0(46(F3B0YdsE1rc!DV0|k5?e_oCw6Z^F6UUNcVaO@B|Sae!b@! z8q@w6iGTe)M!urN&oxGIZ z*VB3*BTZEI70fB|)aMYKrMCgw-EYSmZ;B{cxgKo3)iK3!hk)#1LC}5Q3vzmWZ&C=- zxR6$?Q={YRUbH#PEh|9uH%o~7kf)WDmwCd7yD7%^d|||<^=%rzO-TCM&f93jFHyYWFKYx>YXk8K1@RgFRURJaTNjRQf-FIoF3w&FCkyTOMp;;x>X!;6 z{VRBe>h)B!2Q~Acal(~12C{CKeNdlsai+%AEhPHjAkF17JK=o)ualsyepo@Ia|jUN z{F^Jh>dc{DD;85wWCEYoyU`?Ge!SG!E!x5eWw8bv!^d$p+Bba+r{6e`Iv=X%44owHn^%qjp z#fC)6YIG2PhbF-BTKwhVZ7QxZHY-W*9ng{B!%PIIVu!xw!EbCJb_x*A8|qx=GicWu z3OgCLLf@rpNmp0X5(D+Cxb{AHcrfVjwiO=ZeIJq-daR&<0}*^C)MRi#R52wr@ZKU! zMXqIEQUPf;Iwg>cUX06aBES|9utgN>{?rW0k1RMq28ZsCI|<>@jjaKqy$OLxRCfjr zhB6Epr}GAhK?l#O&+WyW|1qS&{*nop3MmHSC? zr$oL)6uUc-fq4u{x_hJ=(HfMySC0WFI#_uNzZtqR=s2#y2Q;SF@d6e%!c^cok39B) zMPwy+Z7OuK2B-{-05UnTwj#WOy(03)yaf{*axiXVt45*>pzK72^^qAkFchXdr??o4 z%NqQs)}Ucl!!8dxk)`-acn5ul-Uy-{Y%_3YKv5mRfgg=P5v@F^xhc`ipo4k_cJ`?f zW+5iM=e19H+x?=}&AXkM51KG$;+Os)`}XtY%mXW|7OWtwDl8ls+F z*Kk&_&w?pAa@FJyDA8yRFiAl!J$OAxQc@$jiBySdiJgh|i7kmul-UZM`8v6I#i;qy zxemFo#azP9QDvz|7;{J@ag-vnWL#*y-_5@7^{H)RUsAAwH;^@;G(g=l9`PJm9{C*6 zv!JsCuxPPBndezJn4eAGOpZ+{O~OtcO~#uWSzMbRn}=Fp9byBGfiys{L%Av4Voxb{ z;h21^yoLPx9QgwJB0ypGH$R*Q94G8m98_#6>}8xH?3&1)NU=ztNQ+2zoHm?;RAB0E z>Padh^ByCwF|JuiYH&(a3V6y)>Nx`vV^ytPjd_h*Z9%n4?M)p`okO){tw(K9ty&FY zZDs9c?P1Mhok{gvbyRKsQvYJbBIy!+O;g>3J%dAueY0bM!|E3HmWX}0y_O@v7R~<2 zw$ASMKGP=bR%`d%2C$1qxAqS*T)j~x<#XvnLOC^^j-yydm>ym(x;FLCzaF%$D zSBpiArHk{S6{a_$L8Diuzt&Zgnb&2LcM*uDET(srsZ={lZ*Oidej$HMhJ}YUg{8pI zLT5uyLLUxK3a^s_ld6<@l5ZA)-rTy?d%xL60w$8ZRCf)XVS+0+6wX#2JV7A6S;T_;{A8!(G7r4h)=N3=o{wS!V5b%K3EyprGKTrv*3;3o1o3F)nB23&q95|@j?Sg zr352}p1WO9g-TiliIX;q|1)6^Lgpk_HQboc9|nDdG=_yFdhcOvE> zZGT=5x!LZn=Seg>BG7%=UG*{J@$2I#Bmv}WG7G&brUOP1My1|e$IDgtucRZA+`J4! zYkQH)P}Jx6=<*TuQRyY=87cNkNL@VoIyx(Abt?L<-|oB}V5%s3jGn!FV&}4J=?zJ! zN$O=O<nAeZYdDtRkP=vsa)rIl)co}3MZZ4TY9da*TUE4c%^t7I;c8sy*;0BpIM&> zJ0`s^-MY;;mvSDzZN#okjv6FyT_zLIo9RKeKe3@Q*4}ZQA?(W@QJ$Lr#y-%P(w=Hk z-3i}8+Boby_5fVJ-3C4P6%S7i-}pb?b z9lX$!=%0Ddy)3hybMW6rz1Unoml9g@SJDpA*VAaxMbkpkRnQi{89!?_=Cx1P99465 zV75eSPpl!YmAso=9gmxC?wdk~$t?R|_{D`^U4d;rvx0n=17RR?$M^T3cz@FUW=?$y z(oGN_#Fx9g2~m$AhDIG}G+3u}fjs+ly!0i^tn}D?A4mq|#R_vF$nFm*{=F(PIl_QgRw>wk|<4lM;&_k9MsB ztiBNy8udcwnG%XS>G6K%NM{+K>F{i5t)T_Wp) z&Mk2*e%YgbylQQ~#t_PCa3tZfm^U*fyTOImV|`M0E@1_4xp+qY#P^Z}&~2q)0L{$8 zqjSf!owN__QUD%CAIv_Jk7HzMga(k0DUb5?P=8qW0Lv;#t54TS9eJvL7M`iUz1bZL z(=1&Yjxos&*x%j9wwuOpB;mQscTVYqSbIiG^r>(K0o`M73gC;k{#& zlcM81^X9Q{SAR%z>DbptFF@Z$QA1`#b;gK5omlnO&XaZ!!=>$|XcPaf$tOcBTO(2` z_t-MxITy^U;T+w;gln1xs&%;BA;f zl?b&DSqL>48jI8t%PQg;Yj(0l0ldt*dC-qc@;YR3b zft@Uh9G>n+BP)68{odsAm25xQbdG7By)vr|-=kjAtIDtesT*=)a#cys@pA;M?hUWE zDNt7kP=s0LG&Yvzwq8yhd)8Jm+Qy!%J2%k*6#eq_&ROo5>n7Jad3bBQ(~iaL6%$n( zN5SiU>p63=D|Qc~ed-kAhI*bIQ#dAsnRw-%o(`**nSGuw&q!z$36RR6eCBe>vPfg- z69mt|=V7DY2acP`u~o#^#2Z5!q&_0FB6Lr5I5aaf8?UAJ*Gu8oBL{Qq&SgRhPCa(JEXol9orS60T@h#8n>V7nai& z2$#b-`2uB+k#Kkz#u%?yFj)xLhglmKdQ%(HC^WD&YqfmpE$e^RU)HGBkuFCqZ8&ip znVoYjKSDpB}5uN2kQ?EC~B%be|cMb-Q81-GJEf51t&YiQrY;f?)`fhIE z+wmw+K!2Vk@FEeR-aW{LtwnP%OAgfxq76C_I|;*%h~yq~elSS1VsX3~iVM$&VT`UJ zsgWMk+F^?E96bK=oxCOE5LaA%Zk3ND?g|e>Cs~z}$u6e#-g7$Tdc(N9*_C})(-~REW`}XUN%E76b zurA(O9`Ccq#C=pp>}L+%2b0BplQrm#;}gZwrrC26iW$>9zI0!Gt(s-6_sos0Mp!iz zKN+%#dj6j$8(7B>^15YO*V1+Jo>lFz~5+r^s#ApRS z8ZdL<(glW2xz$r~A?rr^1m_N>5aDQ2WA<1Y?o`uq!2iZ}#k@zI2u12%P~4=%ryQYF zgE0-F4lTb>6n+rGj5ryuiQ`^AYp{LEi=b`yvF4=;@(i{J4iY;3 z`}YV`(S!a^{e&l-WGK{!B!wj7l)UuUI(Mx);q5U2HF*W73DZSaW=V{5J`KNlPmgRo zxmj9S#p_M$lb!qtFgXt{LflAUKX5`!nUC4s(J_K(l&1XC@mKA<%=V51;m946VzvVd ztdZAgs1qatXIf(Cw$~fvi~38ht+ma%qv9Q3;&0?byv^^id@aN>eC0* zAV9yxH7DdF(lYQ(aD$U?drQcpDOEcA~wh*S3l&~@;>cfJQ6pNyD2Rk3#lLLQPQ z?TshR&Mi0i-$LQ_cL}j2HBKOzKeI!5bb|)@(M*Dvc0)o0XvBg_348{EDt`kv3@IQo z8iJGZ3(pas^hC!ck7N>!Q^ZoND5+wgum&RxF5i@7;a{xx<`J;rQxQ9^Mr0T9vr0G9PE2m87)v-U29I*V;^8rg zTuJ)nUh4Vlq!qRe3+kjY2|xGpPfLt)%JV3ys;s|#bDN}`e+ZWhJ#6O;4bCd$cPKVl z%4iyI9Ifp-ncRIC*EG=27`+Bxh3eJuTzy#U;Hl+3_jLA@fBN~z_&f@^_(=>x7v2`} zQNk2v4k-e<3zN|yg>8g}>vd=pJ9ja~2yL(f%^Zz8p(noPE5*!Yar}f`L6d2p8JFpC zhI(7hk7BhPB}xxS-J52zyu_X5UWX>wBE0;h(*g3xrfPUq|5-!b)j=;v`A&JnUFe8M=5<}-$Xj7pH5Zel})xZFIr1_ zzUZ~jhal{+AfEmh7hsG;P^)0No1`M3VZx@!I3vMnxiOTciF8!(s^Pf)hu=s!P;5ik zCoO^IPP`tU^MfV(;f+LV25a)0JDN#&2nZtya};tZiE^NQf&`^>N1#-wRARpW=tG*l zWbtAducxb*b}~C82WB%vFrwm&rqrjdGl?7|O@9ON}?p5w_4&n`& zwMwUDtAbEhlRU@*%P)}P5Ew1({nye^E932zAg3Fc=#71c>Zq~54p29gDU`OBtSJo= zpO-~xwbe5_r`}Q_WJCcYpJnLNptP4YN0%cPIagdcak)Ac5B9FE1p=r=VD0g46t2eZ zi!M0N#4qR(S>Qeqv+iy(oL$ThT~Nl5>KjUL=S3s+S5f52Z6=t@kC2BJ!51uLrtv0t zsWI)SnmROU{cc*lih#O;fn0!1qT{ETrnK!W`w(ZU$){l8nct+;Rd&`mlzDxBac@+% zV5NK%OmM>&2~e`mvKa={xkaA2U9jAwx-A&r7wQfA3}C$m9`+80v53{B|VZ0tT>DS&|RxN&{_v@vlqAab*@wsquk<0bjO z7F-{{|5Z#+LiB%4oUC|BG-MTsgaHmFM67fybPObXutY>eJPyXDTuLHh|L*?rkC(*U z$;pn3p5E2fmClu!4&Y!$&&bKiNzcGU&%{Lg(Sp{|-PXy#jn>wY^#2d?pK(M?9E}_- z?3^qBwnYCL*T4|q?8Hk#@-IPueg6MEP24R0E6LXJ-)em*NdK=IdPX`1`oG5h=*si2 zQZ5AxHxp|O5epj=TgML>e4K1dJpb4JA2t7#__v;#|Lw`l$?^A|f2;X-PagVzDeyOi z{=c>Uuk^!Qe6T$9f0>>S_Nxlk7Z4Bu5J?e16*th6OeilE)w{t7GGbyfvfL&Gp*V%Y z{1QsG>VQgw>Z;1TX3@2-lh)SbBmFg1*Vh%~Hj;ilK?s%{N>zbFC82W|N$A7jm-80D z?+YgPgfX(216O`RrmEE^Z(_i2-flr84(VHdXs z?nEG9e?AIPAP`%Y9Cl#;s6jr2Ac7WuLK}GcW7v7cZkYcq{%9pofaXVpe<+WA3=WD6^1n}s7)XE}D?SJ4{}z9=`tAf~ zAJ-}H6X{RAN#X?hrwVU=|I{BM@O5y$dmxiKXU-6V=MPpoxl3Sw@iLC#6x-;yL$vvN zB7!B?8sEwSySjL*i3_pyq#%s<=M12k{L4s>&3|n_vYIg-pry%yA6A|7$atrz%(b6~ zQZrKAO+Pzg&M|yO%al@h?3@O>DD^LRCTFcO7Nb}Zb?u|ANM*+FftGF4nWELhe_?A`&5c% zQrY#yiq%x7`jd6!%d@+(vb1z$qI^?ikKb4#pIE7KE3WnP&GhvoT4+=Tie@ysIs(sH ztJyz~KheM140u>_1Ld9ub%jf>x^+Ar0E9}4#iNIDzB+=XqUjb#+1Uj)_Qf;1#b*~% zGuovr^XHux@Z&3XI)N4|$4ae_>l@Rh<7L3^m-lIYv-84E(&x#rmh73dGL3L~zJ#;a zbMGg+*$h0^&k|kU4}4tQq;U`HM+rchi9TrK3 za|_kuYO5-Ywfi_bT_EPShFHIa?qNN%XF|m{)()?C+MClYY_5wv@S9onQJ`I0o%!}> zGz$e)#0rJu$82)W`GlHg*r;kqbRDoicDKJB`8Pn~&+$Y@qxKE+2!!pSJ4i6S(>c%HMme9>t9=wU?9?Tzvs%E*I7sFn*Ftw`0nLl9S$UNNIn zr$M6nuckXMjwqM@zSz@5?!!z9x&jEQ0!ZKXAM1+=vH;(@m^XF5pj9}pOOO<`Pp)g7A>}}p9Q!J_sD*zya8S3YtdY$Fu>Q%Tp!Cw%F#L+LXfBxL z?A~>H@}Y4yH*gx$Q{!U|k&)v@tOvsI*htj&I>su zq3#yHzaqAt4oFHt%T3xCj(%T`=}e~MZlLW)|5GuV&xlZn4tZ@x;+@Y_zr*_dHBMSkQyR@i-RyGDSS&lf=WM`BeDPFe>$upC*9vwx5Say) z$scF2gZ~o)41~J(k$p}X-EU58)Kg4tQa$d4z`){>h!?@YdvJ&|2m ze+w8%Tyj_qW>@NJrYbY{=F-)>W%4GNk+AZe0LBTX_CBH5EEhe?5US76v@&sqaZy(| z*y^zHI-atQJU5RacP8C&C_+Y`@tBhI`##Z3)fOKj(d$VGkNW0ucJ&>-0oqHTrb3Gj zTk)6EoeHnHlBsDgJUp(Z4lG_YOY3hk>Fa;EUGR%5HO;*j9bgV9i>5ms-ZvpBrh|ll zWW~nWnrG`i)o}%jEr!EZ{$o#mM>CI~PupHPS=34V(d1pse2669A-!SgNr{%F%` zBZ61K`fB=wPpi;~=T6b#c3;|BSW$+PX`gREU?a^JA!lJ+T>s0A`>b$kkIr<;i?pbg z`gLo`vwr_apo6`!8C!1Wk#DgAec^b8HEX<{$Bn7?yWM;;i`g(}ZNKqkABBDnN*IMB z9^R)3kCFs_m5;DFD^1F}#%84<$h6C9%>Cj){P1o4YvSd~!Xf(z3)k6X)6KNed(i|t zL2=@Z^kE!KmUvX-=i$+&CK*{yPRi(iLUVxF2OaR&A81U<%n>rF8T(ZqzoQEQ{1oYy z2gFuQ{Gzb4FZly^35^2&8dy+3Rwe#b-A42Bb8?1G{&`9*69z_GyWX)s(aCMT0QjF6 zc|i#33)AzKZvSu#KLfn*G)-ar6aD-Iz@Y_@Z9#ZgApdZ7pvchtAJm}tJk?;}PksUd zhCl=&*bI6xiT)?dd&ze1P{W&LSVz8*~Z6R~EI8F@cp14Py8mj+o z;?F=Ho0uXo3HyA%C5U`SuYZI1=Lyh5`5qOOTVEvmm5&V*tVVs8rwTr^?ck>Z+Mlxl z6Y>k*O582*i^PZu6d=WqlK*V==?_KyDZhXSa)cA|{rHEVDTxo7!@}qM6X(x?g<=Kl z?HMsJFdn^4FHr8d4dMU6;@&7?las}>z3*_G&sI}^a~|NoBLVxNN0aG`ZB`nmlGODi z!>FhltsdW=&n<3`=IUL~^}Nf4LlIf*HU+qzPwI5bpLTgzSbF-?frV!LIoZ--Cn_S@ zoeWXy8)|0>ka=kTYq$^Y^$Cmyx=On#8k1H_Bq=G0xM8qy(r^;PF>#u z)Mp}#ceYfSVq|D&66_L7-%Bvt_pL>L9>pn&0j@J!el!o?la$JZr+g${YJ`?b8P0Y$ zSu|hyAF^1$YN*ywlOG=3YL~28e+>3q4n*O7WoJ*;YP6Q@@Vs}K=k~ZM=}b;c6sgc| zn%^HjU2e!vOhl(u(Qe@OdSJjnM_0JsAGi4UdbBIc^CmST(Jd`4)qCFCT5fdm4zJ!I z%*3!-%}=s#c$w{FUwwKQEVm;t@{#_b1rwNwiOJ2Zviwwy(O?EO|6Zap zuyMsU|6>`Zm?wC4+UcO!*PPI9PnKqE+ESU#xMWic$6p?=4|78a^7#Aq_iEqj zQGc`BEUTQZN8&I8qq)y=v~64Zzrq6+tBHFcwmLSZwee7aY22S?GSq9!1j8dZg(B8p zBGqtC);epvht0}e^Wyb7`~By$fna*D z{X&nHK4v4J>(ymJTMOG~D;UfC`Ra+(Q$|LBIC1f8FsW5bf_vV%;{W=5e_k9Pk4jtb za=xCa(QI;UiW$@rv%G9iNOOXS!z9vV{h*U-?e5z49QrZT zkw;|)V+e~$#iJEGJh)!3M(LmQ3=DGbVAh)e*?b3lmR3zZuO7PcKHn!tg{HI3%`~^0r;*}>d?u9eC#R^#g7q$V= zKV7#711Mt1?V?ePMZ#`b8C9m7#bl1)&BwB%JnuG|PX*N0*47VSzTR)hzdqg0e3^cK zobWBN-x<&nrg78Yww^odd_GH{`Yj)(GOlHE7Md^M$VGt}xzxU-%5f0V8-kFJL9NCy zcrsU>j2TcYmsuA44YS1iGiAgeLK=}wlyZr18+sP_QaP7GxHnp%v!OwggnU`83b~>! zS3u=?Ly{>a^C3b8w^o zq%zXFkGq#k{0|>)Zm#E|-eShdTduB8=;GYxp8IIACST=#srXbvud~CXj5zk`=K_&) zAFhGx@-~qyOmD!SLCp~IH_*EJdKD{cPHapp%xKQ#YgKjK!tq$M@w7tkKuzt{B3V4$ z!LrOw#X%&tCezdE$h}G~tC}T3_v87B<%~4hBF+vpxcxUum@ z_hOBrwelB_A^7}-`obp-IjF-d1oIz9%N|05dh~0wlCpAxKA?|wvOZ_?5jy>gzwn

    @w2*xvl&J6h0UMBCHEM>_568 z(|j<3Kv9C7KeYjYh6n};FoA^R{`3zX@s2{753A+sg|w%RIkQ=$OUc?8_WlX3H;WiL zbW6B5QSgrxNq4wFVMjQ@+ASw<{571SYH)sdl+R2(sqCo_LPNN8jissg=|aIJkNSFM+=9k^p>N@@>A5Bwn69^Qzl>d*!>&ZpJr5`2cv=5$bKc}buey_gx;?_ zxtY3LbGjU02TF+gV|io6SF~3i?bSQXTR(X;RaAx(I}_q=#d_eUz(YTTcE3&QSY{*h zx^KM*qjdb&1On!04?Je9yyy^E zPaemNYjmBj&`-S^fV{JL(od&UEIoq4G5&(xt`LnG90IQkeEIJ!cUlUHGDd72k}qL@ z&l@4PK-69}EwnTz!@3|_MO*!FNV{I#sW%pf&vbl#WqB-fsW)(hS=!Oe))6^>-^0)X zoXa<@vIl>OI+F|i(Ip9Ye_J#Pz?QWsJ4s}@@oF{Jz#GZBLmq!`h_7rtN^NKM-w#gLz6_XLfj%$rutywD!5bRe?+2UM32UTIfUw&N7!~UK1ex z-D#18c58->o68v}X2MzGWn1bFt_R6+pBiiY{2k~Ke`=L)Ir_sT?egLrcsr8N*^Tcq zDz~R%S{GLlFeqD;sP?t~bP4J(Kl!uP!6J5eOp|sDR$uufG!zP7$R|*Jlp#7sFNWqW zoR;*=wKpdoZL7e&9@4h1cww*qZZr_+4{J@8t{v%2nHQo1CtOA{Kzh_F(JGpH#^Wm+@-6;O6SJ%`FUE!kgko>xBBi) zG0qBbLVa%bCV_B&LsKFV=tvvJSVJQ;tITm;8!?UbCiXM2-koyu-iV3mwEQHMR`!T2 z57#QxAiTJWvB{?*7fb133~^wndhrZnAtW6&f2lX!QfDwC7M9hi-OT>yQj-Ld<;(Ho z8)tX?{=~Y(&svZHcILcDW1k?7#Jly%R@P>Dlzls$-AWOoZ+E8}o?b%Oif1Bp>81x4 z&WD9IX=K6vhB`#G&`wh3g6SS>M{3w_97Z&{)7vVeqAQe1mEOB9M+Pe*^!#pEuWwUJ ziD&?Ghf^4D3$ODpH`i83Q=eVmqfCvPaw1cxmT!;56U$#hJ#VCnz_0ZSLQPMLXR1zR z_eS+|H#ROQo)*O0O3enb%QV~$e?V#1htQ{T>Lc%9kKT0Cbk)6V_K-dwdj1{)7rJUlGc}wS)%~GOXqaG;(J)v zsl*@PKp0gl$iG|u3L0NcPzCG2CRday{AOyzkgLi<9XHhl)`ZRCe9DkIJh7-r5iNY@ zyT*PVOE*R0W_m$Y@=Vy3ShSHNIUilI;hd-;T7KYmp2IR(b@-HC)nfbVs zw5nlEhPtR?Tp)dvJ}X9l{>!D9J9*gW7YW35{~gSD=DpqjMom*vAMsW_pZia4L+GFU z_7YWv!d}d;oKmSm(u{EZ4hY%*c*}tp_wrL|Mo>k{vHv)Y8Xa@Tmu1%zx++_<8rsjHE`_9DT&LP2KN78DNE2t(miBS zSR45xS6$H$bqil9d!}Ra#hM_hTVpPE6XI`62m+A-@eTJ>#2@@$QMadb%%P+i3)@Rz zF97n7m>nYPW7KZu!S6(y%wW$W@Pf@o0Vp7&3DKhFn8eEoY$f3TgW7Q-9~LF!mHkP6 z1nrgFrCimLjP=${!V-b}*?F@1+h6P<8--zz`cktP2n_)y@X-Z?$Oszw0?R*R1a-|> z%6c3)`G$-3`d?Sn-Q_YtyfPAM@sR@p1&8Q~|3LrXJ)_W5f9>{S+UiT}D! zqHhrM84O@9BWB&71_TJuC@V?|er7%&nk#;dI{P745s&M+%7#lNgd81G9LXw1=KosQ zGSF(cP3qvSf&L$}3INgEs_d}F5nnCD=HRKz%FI_oHzqTZ@WMu@BO6v`#l@hOov%%1 zzmk6~b8FV7k;z|+`{j%bZ4U-Da&-$|b=$+Ag6H&|okZsRE9-LT0}~_fe+=M{Oh$C3 z$Z5Fx-pYJF|1CjlBrp|IOs4oZmE3&8Z?Q~H*Gut!91uNsb90y%_h}a?|w45N3 z4zx8LS57e(LurM&vYb2v#MTKa>2|$(l;YoV2I!#n6NcEMG3P|h7Qq8m{f^XZlUH-; zYx4U*##RlBh=mOg_b4tGs=SsMexY<`59LQ!Y05UL5H6(F=-+bMqUT<1vM@7Yv)5_- z(;Ww{Q#QU?ulgRWR?JD|b-Red7Tbh?d_gF)N$&~#JUqSDK6G$-eQwq-7Rg@-vWIW*aatYiTQI-Y@vDUA^nL^hpK4i^Vy>UyTp+R!9P&! z|BUHhLP4ZSlSd!|4U$shuAW)9SZ#t;v%gq|&z~mTMfVgpJ+L9%l(`b+QjsM)Q&$E& z+IY{RJJpu#>U<}uC?VszInBR`M-9-9X^;N0=EF*Uswr5t_Kv&fawh2VZ2IJ)n|rq2 zc&`1lB|`3eXkC2&eg#|6bYHA}mM)Ya=2|Q~x=|49nWk{RuUhZXqwGrC)IN|Ll*t;6FutCd1UON{#dbIngPYEnyw-`R)1<6bQ;8n6_Z z+cG_5-k38tijj1=9z6;NX^YV?$m@YSx+?Rh9v*Jnl(-DLUQY-56U9<1Qcq3?$gQp3 zoE7O^V)tF%Z>V{*aKN6TsM4;Z^TsZjycxOS96H{a(zvoqX*uHC_8a^)m*DuIa`Vr$ ze3;V3x?FyZ6BBtF&k7!HSCk_s_Mg_Shvt4bR-TJO_uP_)x)327gW+ccNXLkr%j5_0 z*uW$G$4MaXLo{OIq2zPQNqzwR$Xi2VV+MmxV~>}`7XRyyBX>82RyO&XpQ@FK6*eBu zv-j4jdj-3OwW{cxM{Id+cdQ@zZ_{Gi{7<+PN5vhEMVM(w7+6yv@w=W=hjqYMdF2;LqKw88;u(93RqaNF(sI2W901F!3_m;L2vQX~H#_NYPv8tKd2c z;_lJDMDk@drBY3BWG{1u#OODv!R>fiay!IEmoj>(5?6`YJ~>Oj zY34~uwV%Xezc(t;)r+~~`urY`1+XxH9Hbkzi4W1^*Gu39BWo!v93OQ~;&>%*3DvUn0T%t-8wmWqdS`1?QEF?B71{<5 z{0q7gu83?9vgPi5)Y;!0!`QPqq93&;v6 z?QSV=C$3uxuNLw^@f}b0+-!Z%?`O@=02b0VNRvL5sY-SJX~Ei)aru0>X)4Muwh&R~ zMmH#~N4hus3wMQ?aBndL&F@|HwKZ{%WVAJ5in;#$HsV)Tr?koyHhp-DuHEV9i#36* zE~mH`_KZZ(5b+>TB@(*VK?Ku$H`%wi%Z*oCpBkzra;$&z8ega1d=TOrfnw>S8OY_o zh(ZvEfAXELu? zc9%V@Y0u-PnKmk+o7Ozkr=Rc`@#A&F! ziT=*_Wj^ta`1e8#>NTuoIZ>nhdCzLJ1q5rS_0H0COKtk%!d^rWj_=4psbH|sK`8zh zkRgXtL$h?ZS*+r2s|8*`oIUyU@4(h{WUD|kj58M7@}YQ5cRULfEr9Es{pbJiC!!#j zpY{dPBPxpdE!ANxUlACr!p23t!p0ZubTYe}GxZNkR_Nn-i8hAIVhFa)Z0HKGvQEYM zD;5Pj5z!|%6?@HRyPdLSart`qgu|i*h!`L+F;8Z}S;>6uKB(ZDxIt1){_%P0M(wK+ zFNAo@KGS>3*3M7)3-G@t@DnDbD?!GBQ=m=oGN3P+<0sa^;P7#xvXo%=UfWCN6B@q` zG+AT*FSBgT8We9VC$81=mF>-N zIN}Zcv8Hd7yOH+6kr#{usELqkAp-UY+ciZ)jb}^&uGE@#zC&AhFCtHr+q1G-)F%uv zh??>1YT&mAP~!9RW>n^XQB&jqh%q61`q1-> zC$vx@yP{m?!jIVIaQOT2;*agXT;>+a6c~stO1B{m!(HT*tsz1$JwXu^V${%>N`x1VoTyiL~&fuSpDf5p*E{FY=uA8618k=#H4NSz#fC1a2^y z-Cjb-i120~+9P}+G9AN-N@s1R2XNW3&cwOT&V|n68OGuJsA$ozWsK)5{>96CLj86> z*%BlJU8W6NifETs=fkt`YA^Zeo|O$pxe&t(EA|tU2*p=TtvDMWjP&GYu7SVM>XSZ1 z;d;QsgRP@1YFnC{a(^lCSg7`I=$8>^Y0FAczK%VPi1b79PRDM`*taMsNiI$ikJw-I zA{_iHe#?~kb=(f~6K6SDYIqnZ`)VpisqsEH`rU65G$dnvrMMFWHju-Ogx*uT#R zv|OJ+H&A1T#`g8SfQ|HW@W zz_!E$wsU2aehD?83P2Kc_}ue!L^3P2awL=b%On$SY_*4gzkfu8C;FYh&k$Z>0xx3A z{p>oVCl)N?nG?~ZqSTI`UBN2aAf<9>*dA-BY7=OReg~NVb zhNJRks)!q#yE>wM+$d;z@z~l_iZXYqllkC(5Jz*s-TP+ZVTvx&;d>hSMD(+iomSTm z?K}tJ8U%^VA^Il7p5q|QF`-6O5-n91b;KdG%%}>r_KE0u-C?v;Pmuu+Tpw}EKy_K5 z)xYhC90j3k&QWu_x@3mpVN3lZq5B$+l^N&iF&+opL<1)(XZsi%BW{ zB=LViO}vrA@M$so*Ys7F0KET*{{rDdxmaq(wKvJsYIQ6Fp#Yn3u+JrX37IwR;D;IT z$OqT8;nEgAP?R%k*5%Q|9(w?$n} z?L%IYgq}~l-sJS+FQH-WXDMS{j5Xk zJf}K4T~U0mJTEK@aS($^7zM^mJJhb@X|S$750uG~={u&0mXE_@dg*?oEZQ(@x)aVr zD(~&jv_DqJdlaq1-){jz+5qdnUDXFchKsS8J?7*ujQCCethitC)CSs> zP&NlaVD=r2$4iMFql6RNRf!lV{VR=Wwu#J5h1vKOI~A~Z?$N@k_{!>_moL0t=7Mxg+%7G%7az21R3 z2iAy6n>0gz3QYHgx&Drj4==iE7Xk!@IFjwo$dZr=o{(nM^K!%oV{)bI$q})wy!X>% zN%T|S-szP!DXwh@VIdZk$M!)p0^-{|A$rqY=9)T3Hx4VM`t%OW3M!6PQ$*Xen;W~8ZJKQuvg_I& zMVFmFZZzD9)MQnY5bbv+C`Bei%CFzX`&pyhGTP~RPqZE>;HRdk{AKCtjqQEL^a-KRI6al zD+Vma7Mr1?s+F5o#92;ZNTA3iuRm=+lyh;0&-4n(2FxCJUel@Kh9j5{TF|LeVrSOl z7v86hQY73MQ^oW$`S(pp3nejdM93B& z`;Drzsh01W@K+BNDVNa)8)`XCkiVS35KNa%qd3*xePJ)RD<&AEnGB!T@(Qi^LbA_e zxNf8>UMabuhHLt2BJjv?U0IhJTHjCcpRY5HpZl0;ha=#Ox7cXHb*W+F+xhnbXWxq}QrlHm6M8ul3b z)1jVp>}_F0UPC78_E z=^Gg9YT9;_bf?)#+OR=mn~iO|v2D9C8mF<{*xs>i+ji2}I=fHb@BD%_Gi%n&eP3Gp z&lLl8=Eo!A#p|(F7B{=(S47?!h-&+a!n}Fp_#A*c%CL`O! zyF;$A%j=gh52vG+4)?F)S_5m<+#ZkGcHplGTeg$lpTW}kez#YCAe^q??da_2l(!rV z)czXf)LB{AHP50A?APJ=)$IC5XhCb%e;dIj2iIt(v}T~+{Osc8sB$WfNwo-1fuGe(wakH5C6G#uu#PYv_LzoB>U0!|ASbt&7qy{kNe%o2&PQ0k)*|}8D zs!DE}?~d2#F|bhEQf0nBKCvR3by*fEHl%wgNeL*qC*T(%4@{yU=@ptzGU zqXKJXq%NMub%~2rpPb5Q$k?r?8x53e@PkR63w2j*ZEtE; zHjQNzACKibLhE5jY-o~T2Bre!fA=)OC~TQSo(xV}>Xr*Br&;1MiOD^woImA|>u1Mc zlQ_-l?Qa{EXAeIDwrhob`WSvsJrwD_D*(-Rs` zhfhrVco+BTnb8Rx9iK(AnPYTWuIMnNEG4Ok4jhWFV8zb=iJpl3IuRp55``RP-DUH6 zKTSBUrcH{0({*U`(g;i6;5~}Mj*vHZXhb=PX(xsvK*{U0YY`UK4pYh%4 z)||`{!>S7vo`t!Vs6wp3>WhW|l+=5_%fQ9iXmB5G@i?IcGeCfJ3^)#};HK#V5tr$x zyj)!y$J~e#MN4gdx;(OHQ=u0F2&eEZ>?9QdEAK&k1KNrsC)=hZ*ALw>zL{gCePvF(D<9mn0PEy8PNOP(a zO5<91+uGE6y@g(W8fO?lAFhI;u^(RADXj(Zwa?XDyQ)O1uToUt6%nPpRSlEa0rtkrVh6 zwZPBSUGh+sv&AeE9H?Ra8O`OOjRnr18uURiU^wFGEkqpJnPS3Sv)M8?e#E{t@t#i1 zs^;dhUCymA1-uXeS11DMzAl7svyRUl#p%4cN1<5J=SZ#e%a2b;9Szh452DW`RN&0U z-g&-Ow!Y#bI*m3 zW^4}93oc2Zb!hp}4!%HDSbk!h@&@DfGK)7sZ-t7q#dTU6?!ww*ma7<$Nn_S4Oxc0E~nxX|`)V}NBtw+r-vg5T; z4gDcGnt4?kfE1GKQiuy~&vE+3tJZFYl`?s1$c@5 zh7GAmM&=ourthN-WDOM+pA(;6zHcTmbbQf*HZDem&3bw6;>Z|EZ4EST;qk;GuCR)@^IJPs9s{+frX`5MKF1NH$`z3gdII#Y_Ci6}7kh!D-6YziPCj z8pYp|)*d$g^j>|I^TR5@&lfd1WMupP*aoU~2P;HdRU%{e2>;Jm8+lBroPyT@rC*&I zYU?a zdeM;mx%Oi)K^a!O4>xzBpvQFl;HQVWiLoBvBZ|~`{hep!@pd4c^bqYFAzVE^Sz?X_ z55AHpZ^TsF+M=aW5sbFk&6>Mqx75dAgcr6fRGjtfX}O%bwQoyD=gf;8OL7d?C_5kIZ{b!*^10^q=iZ$pFna9{41CEjgX^T z+he~C%*<6UF=3#~b9nSLttiEtDYM(<0~I{!{7V57-wr7EH?_1#R6=;GAG%DYh@w+| zx@jmyEK9W_cZnoFK?OX(KIhQiD8zx8!>IXuyl8uP$?}$swP1pV5$QkK1ZBP{%o6 z_Nq~co?4QMd_AAFqgA^lCgOwJ*Ov9Dct=*$r0vv7_5ZtS2)+SF?TunE4TjOThK5PT zP&!b_O?}GPmeij;RG+U)SWKBLYWI{k$gDH&J&L61{|@asXvEJfL*4Q$q9szzA%!+W zt2r`x=5gTHjjAg1y=<*6w(mZ#Y=-)Z+SCukf0|nkGeE&s+0@msfjF9`tk4^|da<##c=E|< zV@6-E83$OAYb~Y(TjoRW9IesIJ8?DnmJydV!TpZa7__QDXCdH$)ZzZk zxLFQC4;ik{-2W69yCrc<;xm@!<+GGNg6c?pN$wJ%aaP#`-K@Kmko4tyPC4QYHv`^( zqw;-+4Jo(<6=)x`Bm)c5n0!3GJtT<8SzY$M6;$vR9wE#eo}!4=LxtDx>*-{+7LI9$ zj40>X9*3!e)UR#!a?gb2L^i_eaHJ{o9pA1)sif`mVO#)bs*cFUb{!p2**890?a7Ji`gf^k;xCNS~fW zrQHP=*h4Hb22sL2y2;mb&D6SFp1R<(%@vKdB77u)kJUNDErPcH^9||cHQs_jv|MoF z!Oo2AR0?=+>yyLb`1-yB#fKriVFN9Y`PTHtCgPIVCuDTC!PF~oUg?$7rlJOKv%94; z^CRQsNx&q>za`NH4w8}Ex`%bDW%+S?`PAI0X~h`f)CMJ$FK9^>rHWj)W^Qy0IyVb& zVDQ=Mriok|j82eKgoj@U5CX*>KJ;Jt%XOwW@IRi1z$#vtDm#9xpdUzOu_HwS7^!B` z!jOmuB}>5=c>aR?ccWoK#p0d6#M423jSi%uE@fs+svp+eK&mzKp$j{x8zybc;~OX~ zKbLb% zv}$$BGSSzWcjv3dxjZS3=HJ44|05>;-d<4{R^es8`b9g-+VWXm15VO*7GHdYC(%DQ zKI(WyEopJ76@TeqZx)3?ifft}WSJ&=T&B@@LNxh=ks_4m-7j+7T}6ikM1wLwr-eNe zGS=Zn^yUctIITESqBp@eLx@wsVb8Frc=`AMGyJP4V}Wx%c^m1eLBo^d@^SMJ>0crK z8(<`)%KGC2XQO#lBy91Mm9DysfSRZ77-f>Q%ufe1)&-NTWyz7V%FxTjBvT(jBdOmV z^@Dl(!|vg)S+Dx&w3bLo3>*W6=U|cI9j??30Ez`zwGpyjJ!>-6X1R)(03TaIuC$L@ zevQ;&3adp%f3J7uE_UMB+5Gp&Cjh^a{~pg+Fo*f4ENW4*xSb=iy#I2Jg>hm3?V*kG zTvGQ|SCdcn+CIU4=Q5WM^^#Bd#Tb3e3&tZi-~g!@x?n2i$*8&_nt;HY_;w5tvq(f- zGKW86A=;kAKWCTEMqD?0UIp}F-wC!xVv9Kqp-#e(_g~4XFZ0`rxQE$2G<@Q0c!%q@ znJnMQdh)6FM#D1)dp5;_Mxm@JORou`dV{XeizJvju46REEuw3kVv9rA!jAx_SPuOJ z4i;*g%|Us@kB|}HH4f=>UdwZ~)7p{g*`N=Oq@(4bRvG9+jRc{|%xByWXa6oa!JocX zhmsZGuOUo3_s2{^U5C{WSs%t**J@I-PPiQCMmI~YR-L)Q6>a@wrjX6LH!8K3lRL(q zbT7*t-z$}IJoiR<{g+_{HXWnZs3Ss>A+USvDvx2Jil9@2>rQpc2S-}qMHI@sUJ+lu z3t?6vi$lx+yOdsJ8=J&rM&%^a{!pxRr19gwy*~`V!EXKxE-p&O2s%J3KP?c(neHzLW!o{2kRLm&CT>g9VfBst|GRTaM ztpQ6MMa@CVeiwSesli+8o)!`?az!h5gB}Q0Sh7yzyLcM|;^j{;2xa64M?}Vj&SuOl za4b;6YaYcR5EeG@mqI7#iocFsgUeg<;p{yHQMi%6Vp+cna)ROjt@nKxOXG_DsppXh z*GT$(wpPk#OnNf=ck@B4tkI>sv?$3D64tu?QH-=OgvlaMCx9eD<_O_3NY`0t-oL(! z94zO3n1Emc8bZ;8{2F;vs`MRm(vg27V$i>Q|6pAq9n)A<8CmI4^2&FzEQZuU3`<2+ zH`di%cQ}M*)#ZK(nD1D1#Hv9#@Q{WrucVN1 z^kUmozwAs@eDMn3a78(L0#jU0s?R~Alh^e%l-39qM{bt0pID&4O3ONO|3^SXXMl_? z())FRI6ycOr}6js8j^pT--2)Rb{cUz=jTHzqtsyylA;i4X&?Q~qWI&z3GV}~MI+ic zA649fSVaIhOJ^1XQ;MkAjMuQYMUy`! z{fIkLmnIzNRj?UI>ltOt6kG6F&&?cTaBaM{AG*Bdt={rq$6d$sz3n41TDEDGHarpk zSFw8n2lorDPJ_Bf$M&ZAH9Lnx@JoL$C)1Ak>+@n^9+- z`4W>D5>|u#m8Gg`QUg}pJ!B;H{(BeKc&-?ycG-!PBwT6#NaXo!Qj?5!;Mi;?1GI76 zV8T^?NyXj&Xdxue2>-#x|H}e^s)?b-Zt2R3Vf1c5)tFJxKM+QC;-f3C>csvX z;;^;I>OVUx@GBHY0;xOQ5F^#TP)Gvh*k>jaw}mGQcMR5%S#7TvOEm$vWqQ*?2C;C~ zhByw%al>mKA6!Q(g!n{T6(~8SF1R}b+~w!yOhuuBA$m&$r(3-{u}s$;V4nW?5f|0iBDBxd z;W8HeyX}>-jMM!cZn0G`G9Wm3*WhL+HHyQRZf#v z_7v|*)*-Y89l~91jTnC2tXZID?5Y z%tCjXZ=Qi(9Q9jGH`c;z-TPnVC8gtzQpU`EcANSfdQ-zqGOulgOPltHS+MOaJdB)Z z)P9D|)b2c6B~cWyiN49!nzLv6Kiz%6N64klv#e-zEL6Bn8WQxN!mpgYG#cyKDvG!X z1yC|neZ6Jf8FA} ztXk`FE$b>YD6tvYT@oAQ^t1kdz}}`R)ILP))$7Qb?~6BDt=XfEiEvkb~!T|2J+;g=5h{K~ywGTcLeQrR!_k>y*MgmB;|%ZD~{Oml4T+Ma!r z_+18O@l{Y5h@*TTBJLS@4DSdr@JXvbswqUDX}zcfn}hP09oFq|F2XgQ5lc5H@3?rI zW~>1-4&~=ZO=iFQzIV;I6v6yY(&tCUP!U)Y{oPm;m$xm}(w8PW!cSei_k!bWAycglzH3JCW^ZX3<@ zdNwtLwB|`#Cykj=u6dpDYB;MfFwMIWrn+j(20!2~lq~iG1V`IDE0?fkpM9yx|OsM@i;ofC=CXZ0srmiHS zV4Y_xp6inj!_8gRXyyM2dBDAa8mgPy9XTgjR`P*K66PhzH{2yeD8?vxY$2_{)f6!) zchdQ*=9Op{?Klo}7*pp>h$N!cZiC?j^zjsM|M`L^%mX=f7#cJI2Yg4)TtAca3J=t$ zGr%g9yCB{KEcX>2GqNS_KYE@T54axlKC{}}K8<&vqUc4;kV$-zw`CLetgcPpo}}6> zS+o7}?--dR*|ey^PObekT1Mw&^xe)BkM<~mWG%L^)_pn~4KJxY9v$I?M{Z?aWg>0t z&+=1O+vHzk7bp|P1hpHmxT>61%ehaX(TJ4kAS@>{8p`2NTRB3NK%Ok6-Ey15xDT8` zG1kaYe|DFxyCr{<-0o1))L#9U_X5NL$6s|550jPlenlS1=^P_-5}l^}fV)2>OE|;K zgW%pLGEJLeGw1$8C~YR=z7T}NUn-kYG=Y6As=P5~b6{kL6#Mto*fs3RgOcEv*5q*` zl(F!sP=pWRSNHRjtcxnRuSWgvWP#Lkor zA4_i%->9n_k+v$drB}c*l9N8QJb}Oe9~GPKi}Gg|7j=vJ*38WS?{z+gW$xG~u0tU! zN@lFcWo;%dN$K?S8GRX62zG+m7SCBvU8a?bDYMPKJN$vZG=#q$CLgzS+ZI4LZm=aX z6Ma2HE5R+ia86+&3P9GtvzD?SCX>g3J2fYXndnP$`&C0az3;TCp7%z!t_LLLnXs3y zsmbNDirR&%Tk|4-@t>~{tli+QyVqQck-gnXqwA^1V2m=cBzxrT4}a{9QIP=b1<_0v zgYG%lRD_(Z}-$))xD}qkJBuwaGmzyi~W$8&_e$}h95%wo{CGl z3E_=~ZZl#e3uxDe826?}##f^CjgLQ4S&L}-;N1Ed+Z}Mx5yDPRp&EmuYY7&cF^iU8> zVNHo`%uN99wmz1&Zcz3jg1;RFFWM-j9)P4$Ya@>p5*ziyV-!RhRY#h41>WYWbyg!w zjJMT2cf)-;*A$gxBY9uCOR1RPg-*0V1?j$K<|wZ7zs&(9<{QXGdBvtVo_ia&u>wZd zPQC;UoV`fqvJ>Y^i4%gKdL-aE5&R_@nDPm*1$2v6`v~;)70@$CLk`-gx`IiKeU?v-8eC2Cah3X+?P3OrmDoKF?SfHv! z+MB$8KNoGePfXzF;dIU> zU9x({%**GQ6JT&@b1Z9679&fKhrw(;d( zL=g`SjIvwgxocLw@8s>HVXv*6*5%YOnks#k zvc1PVpGm5Jj$z8J^3%b9z>K06C%+$C%{ zypsDTwv*_UBh#%3RJL;`shp4H8)$x2Fy=ku0mZF*ygB+S_|aeHR_dWfJ?umqNUe8KdKv_tlwXMuAm>>n5)+%l*lUU3b0_HC=kDG<~Dg zT%)U461FDCg(yb!v5e9Jmfht+P#`*V*>tlo5K)TJU%M$4PHt>sp16I*{MXxVCyt_W zGpVdkFWLC1Ax@--cQ9#$B6|g-g%Z6wGhv>g+QHBGbtVCYbx-=bhEfUNJufEo% zafQ(xhxdX`@-?0{!)D2bw|&GlF4GaTyu&iVEna(?IpjV2$)))V0g9J|VCts`#g+cT zA(b%ju(h2E+eU9TOB4?LS%eS%cDa3-GyP_ zpYcD4av65hL-g?FBPfLBB|ykjG+BZIscIH@-5Y89p2<70EcP`iGJ8uo*zED%(#%W1 zm}2y+BLo0$c?S$(0m7k*D9NT2Gbi(Rm!1ZFKZDeocHDu zJ;<*IYm|g((pV$$b#aO@HO05IH2zV?dH}^ru|{TA1>+vPEXGx3D%#59n{_Kk&kO4X zZEIW#aC_5FXJSq^TkT(mwi+ZyF#%e@(ZRK{%Jhx0h7sI!Q7p*9&5_5S%AvdTBqoX% zOEGr@)tN=)or&B+tR2O)Zs<2{T*qz@;E96i90CK-`&Sr8fK-H?)Su|?(eA&9Iiov#0*Ul1xoT9dG3bY75R=qjET^oF?ie$4SRPE$vQS_&J0 z1nRtdTWFnGNc(T~Y4>p`{&z|yQH&KTFn*7+`l7_N7@17+cMR&EJe`&KnbeHnVzms{ z)1|DB&Q@4Dr9D;xo&NlZl7&4Y4x<`MUHL#FJ#Y>CbB@Nr>(lB}n>>Q2WV10a~( zi3rVgdn(E{@@Y%mz@nYR&f39rO`G{4;9gFA#?m+&0vbRR>YK-~#bQmzDQ|5)aju&; zR)D|4Q@QM%4o*UVW!G{zrVfC1{)5ck2$psk8u`6)}ab<|2I}^etPIdn2 z&T$hkj^NwWXS9_@0jweqma{){wY3ipex`F)<(N(bYjAn~-NC4v5eD_i+YttGVErrw z^_7op32gUQ!~Us+rqI~}Wy%N++6)v8w!%EMmN-FWT;9!HJ{vGw;w zo|Yi#A7ISDe!GUFjRIx)s!#I1&4$zl6?OJ)-)E-J&A;8?GE&x4 zgBol_a$8(~1r7MKJo)-2Q#hy+JzUxQp+xuDCi~nYQ$!a32Bbl}>*NHGLCUd(AyZH@ zXJyfwD$5|ric1uZe#zm1UoUVw2`@hDYnVJ+Lpf&2%!r(RNzt7Q6UsM|GM6)?Yl#m6 zRhWdzD1m;ObYzKWMkOeEYG5>bc4H1pQ{+tOB+D8b+T>d{vz06?ngO8x0Lv%_=Nl@_ z5Ngd_f7$Uk0L$bre;<9X_F}El@RS&@#JdXaAV<| zN=i0ag-~9q-6Qb@*)BQdDoo^JZzl^bqT|GKG!PmoIR}g8g6YR!JDdvqb}DLXik=bS z=9wrXlre16;ALp&y@L<8_8m6aqcq#m)hp9NxIX|Oe&K-4R`tfi&t_6_p-7$EGI-& zA}Qk%bW7LYbA`CnFi=;caG;28+N)*u%13PkT~AtvME@9NmDOm9VP&m{iGplp))GV; z2#uSj>3k{!Zfx&LK%SEFl`4^FlkRT~HrPC>XF*58<}u_hW+Jqy%q}`s*#SniL2jBh zyydH|@rlLN7Sx4Bb)S?|&4G(`3UF^cmq&*99SHUjbp$k!wMIV`O@^RCIHD7^{%H}w zrG1$^62%C%#g_8IcccP;D60A;3YEw2?YHU3`bkN~wdB)}uIoysaHL=zX=Xa+@sFv# zSw8L0_Bm*@OA4ZUWUMZvK#W8r(g;k+LXb?(jR39rOoLcar@{apw%F=~{}Ge9vz5=& zYk+>hs0^7e`%M^xPw4RKP0%o`N{OTVowUV)vWL<~KY0=c%QY8w&pvlrW|gcgLsUlE zz;Q0pjPdUpvs;qBxHH7eqtTXWFz{8>@xCv6cRbAD(8i8K3Xq*wvzi7gc==oFhV3&K zJN=i!N7l!4a=N4e;U7JUwpUvvUTKPtS$=@&s7>3`ousM86!ZWNJ#_e1OPm-tQiG@U zX+toUvwIIU0)Vsyio=mOZt}REAybUtrVKXQPy&Kn?tx8#y(KO`<+>H8Wds_pe1mibJ*y+E0P0cp#I5Lk4{*y^9MRtSh zz&vxK%r}`zjU1z9l4YhBmxD7qj>C0sW%G1JbHVpN%rgelr%>^eb*)lZ|&QXs<6py)_K3dDI}R0>iI zzmc77vHD=W{LExsbS&b9WAitiN{Tg|z$52Mink6LSL+V`?A;!A?-Q0pwZ*TW*X?;f z%ltAuVT~J;O~5w3-0^C#{4*H>zN=&DC1@Vc4nZB=e~T+EoR?+7 z#dDOx&%?}YuTpcu!b&Y^kBmE(twxP|M-yvm%lMONs#=CK8Dz+nG}iB~nl_NHMbcGu zAJd?n;@lc$x8$sk%L8`BL_Ze{2|X!!nC!c=S4i*&th7s5Q`Cm)PxYkgu}1B8`)mZ~ z8zE5&ETUS=jUn5SVghQpLqc>NH>MP+8XwDMGN!v*=7)rn(t)5*O@%(LqgLnlmU*kla%Ow( zy>^rm^V0(D<5zjm$In+PKob=ry=!}~VWZ=nS-sl+)Ug&H9+z)P*9vIn9`h3`H;ymQ z1M0o+;wfdizdbWZ{p;PdlGW18qn}nPJ&Tnjl(BFx-}mTiiKDQwchZShv90B9*X_|i zLE$tQ4(_uRXI(vK=6qk(UN#_LE%|v#kYgsT5+Ds`MP+k`HW10{zizqQqEJw$PSZ69 zB6qQsL2qfR%M!A;ODGy2dir05l^jmq>rU11Hq4(9r%(PGMg%9Oz`IPvGiDqe_V0n1 z`b9rzl1#_Y@=c|NeQK@}gBn=@nWkMqo-BlcZEz)C(b&{ywt*-CnuwS(sLM3@V!E25 zmDY!{4Yv}Jn*+Z2Zi0Gyml;ZG>e~qfFJM@x`DDmiZUBBBb+8B(1)v6==Jp&5sN+6; zKG>VDEQ%Gk!a4^hKRyru_*@bZ`~Js%leTnIlc^F@gvn<~6m_nabRMVlpw;(%4n-hM zgIO!>3oP&GHVnNgAM zDL4EB;g{`q$c9deah-=IK?I{4h?b}UeS3nSK^BjzceDeOMt9pqa^0PG$Xq*sl)#UJ z)HN)T{XVvtIiAm&hil;`Gb@%?W;^wQoaH`ef|SqiI^QZVWS*_)c$$aosBCyPiJC#y zpRC_(C|}jq1L#g?!fkjwsM~5Th~1qC5iz9NM577`FO@;M8On+1$p48(mnE6vQ*MHE z7C6RAZg($;kAXCb{&p@y)f(@1DcMyY)xTT)k~}eP4>UvqazMRwr`73W%jo0CK`uL24c44Qe!&{BiXKXe@_Y`NKG+A3xQ&bnkKoBr6v4>#IYk9b68V4Qq z*ZC85U{&xHSB%m)#KT&fy-Z&EhLm5CeUt-hpS^yMAV=Hr_Fm{MbCEaOEyB3A4hm!4 z4nBwr7pB|OaZTkuEsw?I7yRwtuC(E-{B*1K{<}emdp5hI!7puS?65+elsfpX?;Yk1 z3Qq5BxkY(-N{=sO?5KpsI!q~VTSc=blcYD3zu}Y%Ycw*d?>9)-q zpyc+grXef2!K#nX-!Qy~KJFl<;NkG`|3(1EnH*QRNtt)iYlcd)Tc=Y|P0l>bJWcUU z;3*Z}Ettu_9(3l_KS|{a&y{;)m}U$9rqea8|FrozHYoA$lATQ=mwN=Y+(i=Tt5W0J zI8sr&&b2ksEq|SQvgqJ3ULOqXC>u^%vP^hqC?jko#BOai_%l9nRdIJP;-A%`hCzkJ4g}8h{Z>-0NRGlp3Ib`CLX0Pg_d;rNyLrt7bSR4cx(n zaUuC`fALhl5WN;Lf=m`wLeQrKvdJ={pp~cG!kt;HWTKQh%sDEM(MUG6Jmp52 z3rRpbV>)wa&-P8CFNjwB)w9yu&o)-?Fgigg^@&q^d6gJKsbnjWPBWw9=*4iNprb&=O{ubD!|(==+70kuFj@F`B@3 zJ_YoWjAoeK-Z4{^Oa*+CDI16_2Y%S9rI{V-`4g?$alg69^J~f9$-g~)ISi`l!vS9m zvfRHQ0lz@PqL|F{hNC}vkRowibEgIDbd2muzYPe@y?>^*{sBK;en^4xnlL68Op?W{ z`^85(^1)rzqc~zho`m3StwInup)9>gIxa>r-ci9Zv|2u|s;aqq&j-mE^~MYigo}0x zb>zRpp6^JLl)vi(i&WkN`lzmdI?xuY#KS9{D{^|oL*{63r9Z^`<`dwQ#?B!g&de8n z$Yw$^143RR?(kE*IM`!s^7s3ygQ7c48DOiB?50ND=3Kldu;(F}k3 z@&QB8msT6{?X32EPLU#|ViSyoLYI?mGX-_I+NEnkS<!~B1 z#&-0#`(3_q%NYJjPuh9z^A$ZS6U>a9ekGrUBZ9nx-FFbVmNwWBLl1Z)%@^9#9ECTT zLYKFrBmh`EK$PNw!Me}c2l4OpV2P5i2UJBjqb*tW1S9;wgssBkCOmv7Rs&I(Y6^8& z!ug&zakE7P_oyU$K>$gP$sUNr3nOExqV`$9p4<#T;yew`KCn3;i0QgdfS&LB2~CKQ z=LDAe$!-mvk*uxim9o&$m}Ljh`3;rBOs+kn&w!zO&McOe4f_}Zo2TAAn0M>c!hm(s z;RRPekv!9i@p;|*(lOhIm*!6R>KO}vySvt}H{)-go_6|RZ~l;r9o9$~7Ac0vV9tc{ zLhbWOo^8)biR3G9DoM(vJHeNJgf^L)v)d$r0IZ7mu5;QuO^#;y8>?VKtYQ+ufi;({ zz=2>c+wYVy2va4amg%}GU;Xk!>z@22dv&e156iN6Nyfci{KLAv8;1KLP&mQ!iv`@( zrltsgL*rJb}?(K&M%>~FTAjE(5gL@{=OR3mqm<56jA_3?D>RnfCb#5_b z@w5gqL|6tXOt#}=%T*C%^cvd4eK8feHN3y%Y(B(TxXKD0ILNRpobu@9kYzVxg{T zY?O-VrQ$xTfqXfy>e8Q+a?*WgU??n=BY>0Hn;&S%f(`sJ_oCoO5DlSFwAE|_Qs)W% zYmQ>kBW=|hd?Wwxh=Wu`=;vP3v@sI8cZ}UBZ?huyunO({7}UJRa6(IL#W%l$XR4oH~3iB-o^f zQW1b>H{B&OuJ5HxC?k5BLl2$Sy%z465+ayH{(T}<@0K1TXl{GNctG~u>sgQs2}lO{ zm^SWaazaIe!xuZNZtfNzWg6ZU?@5yJ=kZt!w`!l`cIf@7MEuMLcRs2A{zOI9weg%B z>JKa1XwT6>IcryOiPFH^rufzy9ZIiMe@cxd9qpkliNGDtQo4%}<=-aM~Wr(DZUBtgsf z_~LEYlo3@zgj(t`!fSIIB>x@achLI|qLLV|LwqF2oP|oMiv`W+`MYVYtY5wLW%bxE zU?HvF%0QCi!03_%`8!CsM4t>fyw*Tn6ZGSkcpsje0^qp}y!9Z)3Y>`pBDG$)(tj;Q`nQcsw z7g@MDl521}M|{_T*NSl|P9+!(w_IqW*Pd&qq^GNg~v4bK=*~*1!6$rba$lhCa%R>xE#k*9KCC~n9ki+3JZ^#Ej4GrIoQQ2<&jQj z4!C;_*KeX&a;jNEHK0 zc(!k8xgg>9wx$3Vs z3LA#I(DQG)e2*T?-$hXO&!-&4RHk~Ci2dz>H^@X#IHMNxi74)6549f?H4*QIU7yqT z9ceF3A0ANz?LuU~e{r#-1)jerzj^rvZhkplBuz@4%gnAsRGA#2Ej1g7#+OI_(xdQ$ zYR6Xm^uuVNmwyI);dc$U2X=c4(pmx{6QY=nLW z`tfzY4Wf@CyhdxXF}=5%4Nv(vizdXE;+JlUP@&H6NW9H*OT(pCvc_VS-xZQX`$b=1{E$%e@b&0UQBSpmSNe(*ybYI+JW-j>x;2sPq<;Isf@rCB z;i+Kk?td+gKvN<*yrey(Ve5C&emMnzS)(eX;UjUPY;EL?@3MwFl1Pcgm*;7YUe_DB zBlAHXC;IPJmLhPMbsqNw7oVS`ufMJIlda!g(1Ds8%KUb!#WnTo+cssObio4v7#f9;7a9LvNCrXZTMR6gQLTH=)L#dP(IK=hSC{=A>i_79cCd50sZCdTGZ&{u(1>-s<$^6u3=O6{P$xOc2 zWJx@%lO!Y>F|Yca&&){vRh!}=s$lr? z<;{Qx6X74RD6dNyl|ifNytOIvBQl?0Pl-LR40bQ}v5&3N{?5A=Vzd}}F^OS5Tut%i z@$gA43$7pkC%~yn2<;$yVEn*&SKFt606xcHYttIP$vg;6ED)c!zf5NN;qX3W_5-U} z>Ym6q9hw~xrua_SA90*Rp7EY(ZXjVN*7NHd9rWRhlgm5ZKia!h>Z2FVyKk72e}(46$YAyKelYMxzM3< zAH?>G{uaiH!Pz%IbMD+PoH6L99;wE&;strYDdz4P4IxjV2ENrIdV47u?w!OlEzLNM ztLck!$>FQLMlB;ji!qacMHqJT`IXN$Mc6*KUa7hMyS+alXvmYtp4cIPmgN5BGBJjm zES#{7C=lH&8nPThlEB37c2^(XDq&vtTPmo)7H9=kTnRYR%Q+|Ii8+@@GLPd0L^B!G zg{{7C^MdxiwDFz5kowuUp=KNrz)X+o_#j!O?go`^;cIUO_LnT${HBBZIM0C}9n>cu z9_fa>58BiD!qK3_T18>gJLB-NJ9sW3_n|H4-{xgn*bcj>+gQ91Jyi|~HGpbQ!0S!U z4d%=@_NrI}94;x<@2T=Fk!SJ6+MrL^J0Y%6r_UqCMrltWlkPu{vTC!cxGx4}MwHNO zQPUwR$@rQ*amUshe}<^WHuW|lRx!d(y!-ry8j;~^kI|vlm6JP=xmbl@+4k?e7URv? zaB3$ua8&p1XBhA)y{6bBUloSQPVx5qi6xh@u!G{)owdmlD|iIjCA|9ypbx7qzIh!! zx4ZD4%gYHuftgNSWnc_9Aj6bl zr6nqKD73BE*>t>dYd;259Nva_^S%4Wl$078_N4OLD!upegqNf44~eo>a?xSEivOl( zQ?4HIhoD!fRDFA_Ma(La(;U2w|M+9V=;aShw7w#^%sz2V%Tqk4$SC%p3)qCf%q@-r zsKpVQ)AwR5mcTEO3(%0Ti?|RAn}fC!idKwR)YJHCkN`|A<({|9>=H zgI}fJ*Nsz6y47Ucnrz$FgsI84?V4<3vh8lhWZO-)UGMGt`@H|bIp;ag*?X_O_FDZ7 z+yIOUe3LV@jGxyKI7#>guax@vIrRF-j^>0t&+*(+ZNLeP9xW)f<@2MYN>t4ky2$VQ z&xER2$-iD-yYCvhm~@y*%yuWzEf!ECwrm!NGG17^QeYQ{^?nxZyz@!e9Biu0pnc2% zyPkb2eN5Y^Frn{P4>k>$#&XtUpghr{z??e5T?UN|fL3+=#?mJQ1QMgpkN%OXVXf!^ zDsKJB0Cdj6-b1nS#u!3{)IUaX4$s)SrowEE5pw_SMd?MhtMN@x8(le6D>knxDO&VJbIfRs2P(}K{@EoRY zy&}buT=fSV*JWx(>W`VKj}4J7rL4|X{oO#Y6nxS15A{~L&^?5!n9ztj3LkN|iH3w= zJ$JpW@gEl-&iIv6wH$oPTqFyKCZZbE5GSNbI*78zT#Z;^jz zn%5p-WOPe!bXF0Od#HnC4S{uV4fo*c;B4aMX4TL87CLhKX7S+^P@~NT7BC{TR)yGc z+-_0c5MG_{(~Iz@Xjhaiv7Po{wq%?cP?iZ*6ef4Gwx;+ucpY*XQ#6S9FD*QlhXn!( z)g*I|sAr2RIj@Qn)BjTV9jcoc@>r6dwP#=U2ErK5;IdaPWRA%kT~%hPrwKV$Wz)xB zDmHEcVzrj&`P*mp#yb@IB7=ThEYt;ibkTzAR-pDv_iUui;!a~lepxnm?e$H>!nabo zS_&{Ey-bpWC15U5{HP*(L}z3Wf|Y-)mK_FfnS<5mdq%HZw@+kXl`a$?DGn_Ov8XCi z>8X69njEhBcHOfL7Dw5Q3{XL;UVv=nsjuEdCaW^(MYidC7%SXt?9BE_;1RE0iTtaq z6v?YGe!5yHU)mODAD4)I|J5yl32F)Ixt2VK`w>h5S5Gu%lb2}Qwg{B;m0a|YSo2-M z6e3<})JUc>s{EFBdIkm$_I1gjjYLLlnCU@gF$lay-bX@fV$f;&CE-=fbp1Q}6_wT+ zb;{9YQ)>)~wK{U+-Mu#P$}f`E;J~HoJjl;~^Dek?BjS z0cD&Dt-j~k{_J(M_co(}#l<`11TBd~8(D7$jyXcPxnbO)CR^E;T2W}U5&y?HUD(ohatrubc8IWq)L62As|eBn#~ zbqX-&RAz@qub=y)K|=(~A^Pn`M{~FE&0+q|F0L)wA<^VY4PpMDX6l}S2;I`XI>gXz zG{=;7hCU;Vc|A>L@l9Nx<>7ilPCL5%cyEN7z=qZAbV$JKuwL# zd2lfE_j2CRnO~$7-_cpt3K<#PhY53bqS?WQt)W{QfT2HN5EDS}{_bT5cLh=OZ2>}o zzebin9?HDlwx$!iIKZ@)=ymL`_3i4~G=sM*=05pLdF)T>s+7VNLP z#0p}a*Y4T3@GrV%p zJpphHFE`joXQ?kJ2DT-zW?fQuc9ZFqf6qdX`y~BX6)1+Q2K$9Y?1NES6p!&#DF!bz zPC3S^t#NbCoZ#0tZK%A9WYadeYULQE(}NMrX{+hXBxCy?6ufN+t4|LW4IW_}1>%7e zvEQWEViv94Qz@ryboT-irL;@C0(@Mv~G*x=>6LM zPVbrOll09YQr@~Sxm519DXs{M%xYoMfBjyU|Ib6~Gvy@Cc_KvJBUu$h)pJDi86rlJ zn$z9Tke7^(Z2#&H=A6gRn`Ww~FM2dEYJPW9J$t^^G^hFkrJ>^N%#GM;%Bfo}FoX#^ zRzwW0=S;5Gsa9T~>M3T4`AD&=a4Toy+HhPU0;O`(hg5w(LF*RwygcKB#UF(@^s6yg zuIS(3@#Ip)bAQtzY4^K$^O$at_tov8RUA2?8gi;+DZu*xp7bjr_?1jYtD44(P-nP< zr8D1w^&}y+dR#Hb(o3C3U+*cQYP7+0ZtVy8M=kPmtA)sk>3npvH%ECjMk-fHa*`k# z!iNEWG~I8HjE|&MsPdY>w+;TCrzl0s4q`ghWHo=CjK|5NG%8fQjJ-W^NoAli#&x`H zAJg`Q`8y+5R-v|qj-r7FbgYcZ@GR#Mls8r=w`kOg$w_dm zFr6NrYn4piE)T}Nz!GB6xl!UTDs@WFzgU^PXPlq!I(>ontXD%9|0Yq-xNOV4DdaVKEtZQ;yt2W( z(U0#hM^pCCfW3L=Fy5ljeLo2068!##PG(MBAmfkLH`G`#QIkc$mD0ByvGSi?YC!o6 z>YKoq+$qM zJ9?iF(|B-t28jfJUcpe{ngre#Kmju9&o-+$#q>O=?KE8J2J%r*i)!{4I+59A!D?Sq z*pTp!MWGbE(vdGG0gNTX)9W=K$B#l){EWyZ`K#UBDhTyju;;o6Q3eL(wL6g}$){I6 zOVjxPe&5Z{pB)m8<1~|Gg67*HV)T2?&qLr%l0Fc;$EKpL(uSB%{b$6VO@Wop%4N z4=vr3~$Gp(n$<{2%)oQ6m8>Eq@L zUm3}&0_!fuKiX=pt+tK(juRRSL1-|s>VBAMf#h!7LzftreWkE7d4t` zG}IX{p7PrJd$Y0rlXnVjiA=(RM?IZz+DZSYCx)M?4ZDNc3D?h` z+&~$;-bWS;xfgyb0k^JrvmX;_1?9 z*kzk~uzOKEfS_^y^Zg#J!es(v8MQ9L_QXT78Q7f2#lskHCl0F@ zwAC?d>B)zOan|~15qZ%hAj=hNQhE(;a`O~zUU}wTzdW?4d&xFoP9Jcoiw*sbr``M$ zv&y|_;dE!-XmM0L5d`%hX$5^(i+pN!Z1$B?uwMDuMR*fr0D*r~jE<)G$sGT;k8QC? zymG$)-_R^)nIy49#WqiG{_o);7XsNtq3PHy+#aGMVnXb-;*rmmWfRIRLv7kT#R^%A zabIDEV|jk{y-Y4bla?;{%}r6TJFR;%xw;s8fF>B%(jUdC4;z_3&@T&grPhjra1VB9 z43f2Q$Ap>S3rRS69#BRmRqAXDlSVcZf>|g0I~I}yQ+Ji^zy=Jy!J!^RvZ*jdpHv|; zr|A`Wg?+f*AZa(67h^DqI}#Y~iM;k-g0nW3m`-85IWe)Z5dk+NZ0U1s^_KQMx$Ax9 z*Na7TE3&D*_==DUN}+zT=Nt&ItLzVe(jSVJGYh{6auP8w?>$GN>WZYQmUqm4c+R>d z10R#G1s9Uddo6ehUUf+(R!iG}%y{T6{jv*&gBfvfe@)6*D>i@H;^<`#jchKNVk=+M zN&Et%#(P~iv>_-bB3d>9pIAdj;`anF04kt&{g(V}MuBI;K;PVl+qA`|S9^Pprj-vf zx;G30!?Ml)Cn4hhBqPSzL@zo+^~ON`ket78N6{O>Wx?&%~eGFj8* z`Ph;;ulb7oc00;r=&6dvHplZ<_vuK16koLe`Hc1yjqTYk{$A{1O5+>4hBe+Ez|@^1 z3FY_<=!W-b3pN^M0({6Ch*qmlsUF+9^T-RXGDv^D>%|}~3zwaEVxOdP;=J}Q@O&R0 zd4?Cha%0v!+wfP-_+7asb=3m`NZmv1P7P2&jQV0{T&aW$ZW(8J&uSWI2Z={mm;~Rs zdvYb7J)sDgk)tne0|}sVf*`x!bUZUNd;IKcB47-eXH|`XpP?@gWdRTMjj)>6*H($y z0fO=w6a3~O&5x%`=1PqfX3SyX%Zpbx5r;uukP1_68Us9PHmfI)KyS>BuW<0HanoSI z6tVUZ;3ymPp-#O6!ij76W}J#$SS>YuJwyxjuNyC)k~#4wma&#@y_8azS>^XK_j%`> z#c0|b!bS*mF6C%Pq4kl>@6q;McztZ;4&ApqljtoP7>90)7~s2J*WaSAwW8;*{|)IJ z)fy&2>d+5LZVw!Gg$)1FkM`F9gpq+$DlKA3kLOJu>Jy$8u24j`Tb5-^U*>qFM5wak zCq7aTAfzMz+l z(fVfWAi0;jAHP@-)up?%8lJ;6`4`jIMbFJPZl-@jfodZtLZ51o#=Gu|Tf+?4Dzt&9 z9|o(N92i7$(X@so?a_}jwKlt}_`c1(GDjME-t)!(*8)hfgHY`GCPk1b`e;mMl0fDf zwpRISQCWlP*P1#@(RzaXvXcU4Dez@7)3!itCv2+g2|Q>EkZogc3s?*V5Pa1Q^#^0* zt_w!Qq;*tS9u!7eG``UY|27WnnN}4hhPrt@jjZh8#1#=Ekz0IwQc5okfU>AJCkMp% z+*`N#{;b7F>`aTMtv7SjX}aIZH}X7m?fGzhj398Vxy1R%Xry8=PIi;K^flwvVD|Kq0f>NfY)+Z77E zskE@5EqSg;tAn)X^%2y!;7`@yc%tal{t?Z3d@iJIQ}m{)Ctd!%QCGinnaes}P1h3c4>e0fL52|4;%uF==}>?CP2qh8+;pxb zX*IZKJ1{0M5$t=PZ@l!XJe!RB<_1IRd(T-16o#y-Y^rP9D_HN?3uFkH@;~KoM;AaS z*TrzdcUwjIK|q9yg4zV?KebD}hwkU3-W{$%v{%_O8Gf-7;2@-Ghp*5Y-w(uk@`z36 zzf42*hQvj)Cf0SW1eBGg{@kiKU^lgw{LiqQQ2ok%yI^>DUV(4_6hU)+9iXmX%@2BR z2ZcHX%jhHwYo(2r8d2DLAB?3aVJ=CN0CiQQsoisL;F2VXXwU*VRH3qAaOE_>mnwd ze%FLFTvY$vX*8HTSpiCkYXg>(8+_G7qth3^Q{5{p-ip6hicQVwHRz`gx-A$}P6(8G zHgmqj2xMYwNJ0J0R_dp7wYg?k zKfFB^6`rd>{UIWBgHhaiGlEb}&mQ9i~qnYunu(;7z@`IbbGt(>L{qBI|xFWSN=;OMa5*2SR zWLIw)Fq#RhZ7F~cDDm#|me#^jJVVC^b4>ZVuwS+wN3-j$+^*h=3w0kH1m-J1LEGg3 z7s94QpE%a$O;u4NER35qi1S(5ve{CtJFjX<%gHI-lolS-HqI3oQ#hZmL~oJcSr#uf zUor5vs#RatDI_J;(1h>kifw(%A-?*1eRf!a9|PqiI+CJRV7CV^iw8IvGTMg6i=o7E z{K^Av+hA33w%aAjkdGd-|4rIIlhCo?NPzldyA`(y4WBFTJ^jRnr_v+FW3S=QMOnvy zN~DMOSHXB<;R=KwQK1)Cna2^M$(EF8IbyC%DoD|olR4Q*YwqHM{@RIvyg=L zpkzRTfrjxEDJdmYu9(G_WAyuQa>2@TZm%=J4pcX4l(F8bUH26r0j3C7M`P1HYc{Q+AU%rK7Iy(rXixSW9W9&_4s zX4fjj9xhZ5I5u!N&D5L)in-8;r}r})d-4i$>DIhlxXvUWDY%Njl9DP8Yi_52>`f(c zo+QDsMPqMkn|zE$$CcSX(ki@U?fBEdgAJYT!FvCiSn9!dgHtGx=;kM3J05#ma|%0B z1;T-r-&8G zmRY>ltVQFQZA1B%(AnyZ#`Krf8Qj+Nxb|*RLk|k%wW{at`$zI_3azxvlvvwg8v4G| zzeaYX(VMeTX<}i1X3I`seXvo)$w;HUxSKa_J&Pk}l*#)$`UmXEW_&+G)EO#N5cs=fk8!+1fuP}bord8n$J4|^j>mhD6z zC!FFh#RQ)Vzrv&|j=1CG#iG&E-LLWeD@hSbKmv4PfGsQjWLPJ=0ooJi<$|-7D$- zJ6A=d|IqXXPikuwieM}qhSHH=3`i+%r3Sq*5 z!Bw2CL+*m;nk;)IIhY}4juG(`BWG&*o1~JSapf9k`U&A#N?PFg08=MSo|^bE;A>{t zyF4Mc6xsqKyIrHyB6y39no!H2(`cUmNJIL?+o_(hd{4iP=Ekr1upvi41Xt~sN81qu z?X~DftW%HM$8kuzE<|X%TkOw$+r>w~tg|pi0oOc}-|u>@*zEi0Jj^!5Zg z>=o`3uH&_gNN{V_$}D5*)6%WKu~bWLehhB2GLr+szCv7Nzy63ZDudxjF2S-&L=c7; z7*P32ZaX#dww%%Cp;Rb_uN0}$P+c0qo8_!7K>4b$;CeXwMrockgEy&$^XvIJ7sYQB zBWRSVTwTJf6l%Xo?kH4ZacVbsGk>-HU`A)}_D@Fl^M=SAhluoY_VbP>oWSD4K@v_Po;vRttu#K~XWFm!-38{!>mJ|{sV50%1x z{8a0#(xw_KHkyEBvAwk9|B($_bM)Ldx-c8T_<(gTz9!g;Cx<(eK!87o22n%lw}cF# z2V#8%QFDc!zr}T_g&HiRJ=_5Y3L}Ukd({#%XxTIs-fMw1q~+aoF9*tZYl(9Ym37z(h=of!M3vT^ zuL82r@K^=vG5!d^i{lDb?r4WrUBG(p!KMjIsunvxGa+6oixJAQ0&mF-+U>E_r{Jz? z7)ePqUYeej4lHe;G{u>D&m<4)2NiSWv%2OwbdX9h-cr%mU?=5Xt>3B?s9qq(8FXZ0x;%K#QR>9pH zpVHwe9BqmX;cF$j^1Ue;=P&te@vf(N@ab}Ah?j#f=&)y&;zuQzr5<0J%7pd)m8aa1 zer;u5#M3$UDz-fCuMN_CR1{OE7Pq}6dG>s!2eVO=L;!ivNR%BXjGs|(ieJ4}Lze_5 zOFcbJGf|5EIh)t-yu!;Av)g7PmdaPIQRg;9KJ7-~PWxUa(ctKJl~9yHIB>n;#2*(s z@u-w4@T08#IK7#|rDlue&r6ZlgDCBA10r$I7V=sDEU~9Z6ySQ?eHg#4Z4AP8mj)Z4 zOBCcf(t31VaEAV@sWD|RE~dqos~G2b>J*8fyj!OJu&;n0viiwpDFer+{+gx>W`cHD zm>@Atm9&cuk_#kq`c%V-wpP8jGR8QY6x<`0z5XtJT)4R(eP7Je5bgl6B5PTSavOB> zZ31MKN};XfGw}nFu{zv?Q{4tJGgRGYsOs3z(+S1xAPDp+yoYe}Ee)8zDGp=*gzP@5_K24mR%8n_RJA3ZGK_}n#ZYXXH6D_-a z0r{)`LZNpfjow=Nfmz>@Tg4azkSSC2mM_LJNBQuo43J-xB{4E?Su~SW@Ix;uPDLy4 zQzJ44q;h>(8~qsf-=e9N!J9Nm$4XNJ?E4aYu5jETUUQj!wk^(@3ndb=#AJq$L(|m% zDIki+=0b4_3t&bcM?y<{0HF+N=!*L46>#C<*KpxuZeCB=pS71?bI*LHe-aRz7HGN~ zV9QNXA^dfn=lWn)m{jK6zm-3>ExQ)Te%_E^b+=>0w@&j;>rO*bGXlm@*B?LC%%Cp4Q-pLfRDjA)cKm(Ek@tm;uyPseqz#Od z1FZnxT6K11F##hgAvf*&ni|9&Y0l(t3bPATn>y$Pk&f#!Ku=}x77Evqd|Zc>>9ci* z0D|8mGpA|hB}+K7*N$eAE<9$wefMYG{({|XAaS~?<^K8XGzs;SCn%QGq&Nz5M08dQ zbPgf$o#TQRPHgKhos!-PRw#VSs+L!eKZSe+FYPr;2!27J9?HMM2CjMkaCz9jx?^Ri z7qCxnl|;Wk+RyjyAY}`~5@3)B+3#K1Y_~GmuW04`$Gf;OG z@+H`YVmmqIwB`PmZ%F5!(pB{MeIZXaOmq`bPe_rjG2d405&M zZ_XK{`H)ClBA+U6{YqkdgG`V!EutWb5$beVL6x}z;@Y~uA$(@Z*+OK>lw!o(2ymZj zLu;s#F6|sWuGb>5@`weTvg^%$w+C2;*Pq;~TXDQG8g;f6<{+N3(9eo$#OqqD9WX{Q z<@tVVMB{EoN1`Eqm)5sM0+K#6A+Ni({-3X#U2Ywojsz zV(iJ<;Sjw`P+0ADhd|QnRUy$8{pB1B5~O7E3KCjB-N%#J5U=Vrv7aP-8kPe&=XYrLCjfFptv)DI1P34lMJU>^Z=dSGvHK3TsO@4l8 z%P2pKIN7XUYz$VPx^@0$&Wl;9a}#8)8r~v~db&~) zAQhj6a#1!pWvbRT?Df~~X7kNR+o>caS;@^H1qkINiV5Kf``P;TN!W z=LQU!rpcqo_0Q8IkfULNrolYQKuyYU3VmeQj{s@G02!&8(p(n;K~tTHi`h!&2FoOw z=h6gy#h>-jh$N{uhJQiw6NBV4r3tr0m&l)&($G(E7-~DqcJj~oUY7Z(Yvkcrk?(Y< zt(W9e7oJ!pl(Yp44c1f)cQI>zQR&~?@gg==k1952Wv;PNhQ+C)$Ybzq5?WXp8t8j! zL;v_UCr-NsP|1)#ad7{Zkb@@CvVfA0&IL5*&*Bg=+hy~I9s4AX?9-@#JSVUgDA6QP ziX#QV!HK7;-C?}xjaU>HV)IzZx?2NVUu)J8pBqbpdyYVf^YKa<$F@EmO6(^1=I(*m zbcd)2pAvolE9>2tHs?v+HhubGFM}7KYeiS86SgbF==?k*s!uf{z#Wt@3GCvu7`Gcu znuA149zWMH0JF~QY=gGoTh|>qdE_(V*mwS%Rbd%*_PlKsait=Q(Lx6Dq?R0X+ljnN zYVX5oslbvnB_68}1;-jXwf^G6Vtm`&Cr{us#8uz&Pu!4I$SWGA7esS3{;JZkN6kS0 z263C(fwKuv3#uNbKiy5nH=V`DZEXS_?IuTXRFyQvPAtkk>ZYss(Tch?kZUUUG47hy zm!VTz$f}BFwNFU!Red0a4R1)c-$?ZMwaw^FC^!#g|C_>VmM7>fU*VR+=p46p2hVKm z+5P^K#<9!yKj|=d2x63=gv4QPi;+I*SU#!I1Fw>?KeR^{=k#+64gj@=*E=~?4L6we zl9`wJ<>La1oZR_ph9Wt1UGwXPOZXmM1U>A1650VsD;$3_EoOH39xq5=P(SbcGHsZ4 zy)`1*hp@0;pkb{56F$S@dp8T^@7&Tr&yatbhu@%fvi$xby?1KAvZfGHabQQI*B;KP zKg-?ljI|3t>tWD2%$~BV5%cNgyYS1xr5Bpgnk^PyGB~yDj91BP&su&x9l$2abd+kA zv_V}6#@5&!aQV~=3S;}+611?g9KP|sQ&BH1xvIw@JWVNmZZ-wS&nk_#@vkdhg7i`BZaptyqVsRiqD11peTONh z$+veC!bP*R;kv_5LGL8EYSU-;-T&UGc_g**e!IHZ#X@w#C?a{lExt zD^8CAH^B}C=t8EHxFg-Wp(In6bdHk;y3CM2g0M(1drO>OULcXPxi$d-YKj#qF>l|5 z?W{7}!lISe`10wQ6l?M-%9cto8QiaCfGtPcM5u)+l~$RKwHDx_^wD+6=c_!~HG_N_xRXFapsi5u1VyAz@B-V0g>|M1(llpMR+wzF=bD z3e~FqIz$QlT6(^SOhlz_l?iOYCV!a;5dk24VVbUTKbFbAjkY%aagud%lG!VbwQqU3 zew@B(vAP*{)AYwrum*0}F$DIo=bZ5EKi#iJRE`cN&#$#l0K?z(s5lz_b*b?8SXD*EYeMaNX^E%#x(Aw6GZO#_ z2?c(u%j0Be0+?1yh%P6>t#8xlJ7rW6hRp#cvln|bXK@^RDyt+1vB|ph<&N?{Cxk}1 zR-Cy>lXmAp|1uS#oLgF$6`YsR(6iNR@i)JBrjd7ME8Bk7 zvhKgE=`guBPo;v|uV7RGSGTswhqY%wrwV^AT_%`u2#$=s+U0CKp47KL)raJYE^PtV z2Xq=w14uZ5r^zz7ln8;;PPf^0CHQH)nlPx3F@$sOSJjbUx4u;_lGGn@#N2T)P7L2P zQFTJ;~=QT&)b%<&fwH&Wmc>umvv<3(OuBw zurlCvej~SseYJY>82QNIKp^ICg1?8BFJBK^AS_|fMGo~c3U zxKA<1=)XGu1HT0Ezt7sm609}>{T#1?WYe-;n;XoP5&-1Ds{F9-IzAZcqj2Au--Rb( zR={phGpAPPi|8~JbD}8OV7S1&t6EJQ;*K=*+agK&FB) z{8L3Ecr6i;ylz^JyVkGWhqeXNJM}b$q2J?nje0~U(6=9#CVFZ-K>4A#qm6-lZE&_Xi%HL!9;N>B{0#!L-n)iBg=ZFG-|NSW01#)<(_TNB1Qa<0Yh$A2+_5>(;P zPwYQw_tCW}Xj?{h4pK}*$ZBwrxAM*!BcKRqmccNZo`p^KKI&Jz&XLU=UOaRjG*z|Y zcjZ(=%-W@`fUg)Jk|2H5f{mF>r6~XGZ#vt#Ln5} z+us`FManPdldT3FAvSpx&xD%kT?lB(#-UP;LGbI`9|p@gd%JKD*uDiCXTPqLr)_Jr z?dHer40kYMU5IsK`14?S=OZsdeOF08%(}+gYBM>!4|4{G-<$qbh2Mt#$MnA~A%M!^ z2p$2fG{W?Lj$(bsUGSmfxzsloO)3-mT3$_uZaNLIF*q#)pW}a+b4Q~e<#MoEGN)BF zcH=l07)_zgE(lEfRrvPrYLlf;-!1vo~M;On&3w;3jlap@noi z>HK>Z>i{bmfoTF$f#|MsS6gf?eZd&suK`B*F-9h%YVf65-Pu(_oHyVsWFj$5sG`%)| zZ`9i%N*ukyT!xsHouumjS}PbHs1Xe{PB9huaZORVq?&Kq55bfnl)}rJ+-9noaWK>~ z#~h4Bo(n0z3?&|&ITq=NM<)#hb>p+pd(C#yN?K0BzZz$aaC5j^?5Y(lh9T&U&fl*l zE~`b)gKY3;Ee7HcLe86rDE&c1%@j|&7FV@=GN%XxOYm4B55Q=d;iQ!M=>L~UfwW81LEyCnMi9e-elY zTFK6{{-Iam3ZoR?4|xHg|GU?e3meJdA9OCn?>&kt?xLcGxgo00pnnVBw+F^L} zZ84wkb1jD7&oB79lpJ3|E{^>-XQ#ManNkQ%uov^Gj}{LncPQyhraL~yZP0Fk5=hl> zT1?d5V3EEg!^6CL>DCx4- zAnDtG3)JuYSSiP-i|yZ}c5k{YBc}NbePFPUAa&^)VG7!((~Hs4SQquIc_JQm zm+Gipm~QjszSF5QE?Db;2Jg_b@tl~d#S0tD**;+8o3CbhUH&3zL*YX zq`A@%8~lshDe}H&1AL?Z8#rjVOL8iPUU7O*jm(VMKbyseleL@Kncc{o^P&`_YBRR2DqH@+U4Im)kJ*}Rf^LH|f18%`=ov9m+ZmdKGWXRx3JZm2>LkzL%Vp$ur>aaw>LCFa?0G8T62s7x6gSBvd z@4L;1vi1JZ1)rG7yU2kOlCx$>k{_KR#qgWmPdWbyw$f;pj>an{lhe|VN#Sz|3dFC~ z2p>b)t)M-z4R(#=>*`})7)F2<3preZvx^0Uz&XigjRX-?w3x{%30FZ~QP+tz8VmfK zHc5N4BAg(Y?)KtV1{kBv5R~_@1N6Z5dUuoEmtEZNX3AUR*aU-Yt8v`5q`srLX{X-R z@u=Pi=Bi1n=nMb*=@LYL||LH-w941|cbp?`)CKmGo` zJImj0(U=m+uGR!*vhj5cg^!H5Rvvt@jXY)W}c9tTHF$zg6-o7lZ~@p3@mDr!FKC^K!R)LOUgu!xc7M**ai zeCe6AI`HRl+%r9&^{1oJp(oQ)?W6eoCSd+VD3WeMn%Cmj<1yYYVqnSNz5*RE#{FQI zX7p?_H=w#w*L+Hd6IXFAj;~?^_qCq;EuBznB>dE=V;i38-9RWp8v!F0w@zc&-`Z5n$Yf1!7xtXwvuz(^YeviZ*29j|<0NQ9Q%76f zo>1TXN;XSGZ?p5dGA&J4EjXTDneL{;U8_6v-$G%a?g0Gv9xTY`|CTFC5_=hNscl?0 zW^6E8^?cV!fNXJN5)M|$@qDSk3W=&2m;rLRW9(sELP!t37IWVPF!jN_$CZt&Y?^YU zD8@&;Tg8A?zWSExFpT@yRZKvRna2X4xs^>}hny*5ZCJqhDxrj2gRAvTh*L2UvPJ=` zcL~ipx(A|6F7oyN))bfoWGX^2{>tICq38W*=q3SVjCbPWxLpZZS}u2rnEDrLE#VUWH)tWplO4HUIhm>DTru?bD18YW-V@TUGn6$9i{=XYGs*~O74BCYtL8DQTyE&&Yu9yecjhQ|`R7DVRz z;d}5vVrfGch;^ruCq5cJgA*s}SC!$yD0e)|R~OhnEd|LCFu5Yg~%TRoQ}aCbecW#on(eh@3> zZorDC6>y9!QycbMG%kQpElQGX%R$Axin1!~T3}XT+baNBLk&!`XzbCMEq^ zVs&$$1g*IcmRQ!2ULi$YMb%h+?O}vr&5!gDjG6_d`NHG;XXHi4rN+lk`h~|)Q6ZhX z#q^E61Ja+0+@SEufuL+5h?<^Kr?>?*pcPS|Wz>0c-Z{Fu-YQ`13T?3LIgW-`JsTOG zyFRA6K0^!ywF0JMmmW^Bw@FOFm3jD5{2^-*l`*ESC)aSU@oX^$~u}q8L z#dVtmEc{PA*#rWv5@kcRRiPfxrN@#gxFOR`C-3=5LRd$K>5fm@7kn099Y!%YyZB@v z7aXyGyaC{QKr1#>B({E?e+X~v0&!AZee8a1!VcdJC+&}}WpFT~4K|I`vqmQS^pk1L z(EYtv?6@W&!wi4g^7?&+9ddN^2BY?sMGb;EG8xFjX7gv$W7m=1nzvJZ4$I_Z|C?g= zih=JXoFuCqKQi4T_e&bgZrFs4cF!T2_h*_72HQdzJB70iD$dyOuTGp}MqWC#Yt++` z-~38n2RX@=YgnN+Vj$Q8#SG5xtB*?Mc+7_G>z*0v>&^D*A9buC{v$uLNw8%+U1%|^JWpqA<~kd zm<2EoV2&W>DEaC-2QY_-o(7I5XthsbpkyvgwO&31SK{vHEES!fL4%t*)i}&L@Kf;r z&$|XHN6-+ycZ4-Maz2UyFc)IbDQ-z>Qri7J-yN<%p#hN*9LeXdpHjzsFa%!|?`~OL z^W|mKzY{dgS!LBr3#u?kx_x>{5VXOk7ll;%*A<_I)K^$b)pxh7c`=yxDA zTOt4ioFxvu5|M7&1RRmC9!NX1n#!?;)}8aZn6Lp(z~9&tX~xNFeBb+1sOck7$9}aUft=@#$WW5d=DBJG;I;*T=j$ zwYJRul2K#~hd7F7U>IdqjC^E*v<6n4ke#WGRs2CAc`%#B+AA42e-z(q`8_?X9BBKnE7?)^bDWfngFZ zK0@YN>_&b8torZ{*8UZ_iDv7D{HV$cU%OxS?q#v7Pp4>08jJ$ewDT6g37gFWm#3)5NXEYbIPu=Qfa{EH&{ ziE4SQ#LYVa)}r((8Jq?HN7r&k=GHgGost<42v3HQ`NnmDS~HQ;gakPN^WU=!o&!tv zn`kUfI*02Cny>JkqUliD(Hhxs=VTX+O*LBU|EU=owrmdXBeJT;f0mnXB_{N@LELOu zaB%mj1DFpk8fT?&RICSG_8@h3BNL$~?WC1q>8(p+&KHK9>3PSQl@{y!qU$pRleohP z)U)MHAi3m%>xwY$DPx~qlOsl^H%<+G?6$z>&me5j#{}vc#8n?cU z2Gc`_8s~cLZ;Z6QQax%x`=TE)b*e}`Oc>yeFfAA%m{drDwMH1+5J~9KQGUmx&MdL4obYji)L)_9C zVLG2z_fgxss@-i)UwA;Rsei9*B0VobWJKu|f^4b4Zlm!FP!~+KfP~}0VnN8#pKF8# zrp4o5$yb1yrK8Gap-OR^;mo){e4mymBt4F~Z}i*bA6%#UP1lZ|oP9GiD2O9!IcLV= z@+jVk?6%uRV5>ljd7Amwe-Ld@il8uTZ(aTahh>SM1 z-$!ZQ7v}()$ui(Bm@y2WtUmq?z3JJ0Fm-s|%37krqu(y!4Um+C$S-xkbYCW90`<^{ z-W?fE>piQU>+TyHW)yiAANE=FFZ#5!$#8*%s6YNL8i5*YmV-%+GrF+xd$hpVInxh) zhLw-i0Ti;Z1<=r@S#~6M6}y0bt-?h>Ml!AN64GZnnMWE62w)OWwf~72)Ee|c{MXB> z1NnD%z4Yo!Z;Ek)NE&a(&FhM%n#tsB2qvgEiVwTtCwPkn8xpbt>@E%ZYlkUCNZym= z#lbZ*q6U9iKm*T)Z)$odk6Tcq)W$#zd z0f&nMi#UNe#3!*gx+#QH{}M!$gSjUUtud1M5D1T{OpSb=O(>{oWmLun*96npU#E*5 zHKgr2u&*7|3^G1weRH@VAtC?vNlD{GWtRR28)k4a8xV5VQEx|k#y2i_Z7$;jl8~(> z{4dcUYJox(1|$n=iP8V=i0csx+YlD(W_WanE&pzOZN;#x#j3J*k6#$157qWPu*VY+ zrVFuSbW&+sG?qxJs1Sj=9S3c9uVm7r5ukhZlCN8hN?xd>JAGlibicT4@oI?rCcEMG z;?fb$3vP=U^smEF-o_UCXRZ3le24cx$_SLtxC=7mlY|x0gfMQ^4p->DA!t-_N!tjM zsBn0VZeb72R)%2m^8R!G&CtMJSP()!oC!19{OQDK6s3d(MC-~}1uq>QlCNj_gJxydaG0G<)@AY)8$Cil1I=}*RFd$9{G?ZXL8 zh;c~q3Wx#qnYI9|AnX}oWpXPFioxLS!zJh5JEUxH{UR>M0&KQfNKuUV(juA;|7`l6ak?uYN`hVW% zJny;AxAWm#*UJa4duGqQW9=2cz4qFBNxD9w5KQb;Fji7uCm!No;Vm?gj{DdJRLzfV zKP(hLBU+V4jtLn+_)-5A!oY8b6*aUwN;d+TJ*}BM+X^Zm#YfCuW@)iqYe-s*EDp6P z1qmc?9PJjdIt4EG$kId|FB{mq^3wf&*twIU#ytxuFX?b=RZ7@=`NMK3@NHG1wqHZ{ z$YOHX6ic$@lkJFOnGv@}_BcLx_P+GNkbLxHJYBKs{fZMpQA|QA1Fu;+%@a7I0WjR_QLmY>o`x< zcsufAEDz*Jv)wQmOB@KH5-#(T8<;Vj@NvVC8XhHEGVZ|O%k+Hl8NICp;r{H|ArhOI z7*`d}8 zcN6wh zR}2esHvtKKzD&@wPgr9kMyQDSvFJVzlX=tg#>luIoc{NJY+ z#3i}FF5Cngv~tPfd>Dq2sE?rrf2E>C$nTir`i@G068a482(xHqu_jEjjan?Uj)0Hq zv5Gcsn2E^V+|Eb0E5Ke$>Px#1sKJ2`f^LC~FNrI|ty0$NVSg$FFvn0!!4DdsEyatj z7s(9ZFDHub9pc%8>$^w6&*8aB8{#Iykocr4*LZo}qaIY8a1-Kj=K=FCm#2ob<%9wp zF8jsfG=peVz|?v&6ZngZPl79dy0C`0V68TL@5&Eeu|E3|P4tC^T~7_u`n@nj68nCT z=%V)%E82T!q=_7aaaRZklp0)Zy6C~sE29z|VilCi6`74v-uExp5%Kfed9#m-j1Lb! zLE_8He|>^o%|cu@wBsijsam7|oj739jBtPYV=>i%Pgv;Do+6*l-F%KO8vAdD0Uxo1(j;Fst@~B=1YGcojfm~ z&_mq#@J#o|zPl6#JOkPsRm-tR>7KRvZ zOb!HWaB=FWxiPp`K&b=wnvW0l(;dfbCPo(1LcAl65?DSL!C0S~LxcZ=AZfDjdHQWm@F9iF@lb4kc3a%MQ* zj_mFB%#Pgd=l5TR!h)Vq!097|%ctd*Le5_)K6$A%SW0di=HbbdX2Wr%*T`hnRSLmx zD1IL7j}on?R)`;dK5ig|G&D;Sp5!gM(9MUigB0-L?dlM8;kO;>Dp&jd1Bl&)JJDnI z8Q|eo6bw7NPfld57=>dMFj}ZxD>vyZHn1p+hwAT>O#Vm>lIl)Gulwy}H({bz_jpHp zB(<{4q9h5vbgH2gDrvyu(AzMz#Jp3>!WS)DK^o*Do_%Z+zXI(yqk|9hKn_mcuR}$= zbOSzey~&>0`0lf>OJf((3l}6ev5zp?m&GUtG5G9CGB$vFFPpvA*|bOWIY`QaMAdI4EGI^JoBFHoK&wK1JThDn~zX76%j@346UnXLk&p1q|0Otez$GBef2{x zKt4?i`EKd0DDb%`{M~eT02AqZf@^q;RBEG7vBi%f)Oq;LLEQitR#qJXsr|j`rwN98 zsHwcY`pk8-jYBf?XkXZW@<6QoMzFKmasI&5YH*%DUF~;L$4lic=9Y#fik9^Wwal?y zd)<7Cx~I-E-H(8_l3%Vd9;CZ2Ut_z?%?4&N6uS*BnTNp6O1v&AI`4MT_QWaEk!7kG zbFVbFX@H2#>1bUQ_tif__M>|wK7~|S33*m+Ani0E!IsaU-rz&>ly-km(smuq3DT^%6Pmyc=!6lrjO=4odDK^?x2~JiWI}dF4L97cSwb1@ zSf|gHu^--lsO4ONau;Q}EzrB_B>{R1c8Uq+eVG(qu=}$gefE|9Ix@lV5d3n=xmwXg zBcUSMzSfSv`#tw0Ut+p9HS!lHgp8R+2M1({nJ^~mF|>z01USB+%6qv{xgPTJk*pUV zpU^*zihUvo_Z7e2H0B71WcPcp%Lpop?H22KtD3t05l^&V1hki`ry@T2=Z*Wl7RqTT zq4fi0;0@zZIvYo^UKV5m7iI5P=X<`~T*zzaSZHDQ<&Zll zyfpH@2rzuvh-vuOJR>K=V^x<9rJU}61mDA=Vy|E4X@m~3k>*@%$0~Mrl^!$Q{*l)Q zlCvSNEXYm~Jukrg1MFLn57Rt`L(>9pNv0jsxQQQnu%Gyd`)4b@r2j*Yg2Qo_ z5)|NgF1Dw_<;~wtJ_Dl%93+404?_Q>v{yte>-Gva5=kPW>(7WN zKMy05(n2V+r!G!~W(+dq&B4CB^J5$6{^wm`?`WC|M0=9m7QUUxa&Bfs5LmzMQq$w{ zO~j9muziSL)7fTP`ICF`5js6$=xDUxPb_M#JdCgR)cDJW#>f)cc%Ymp_kB&y^%!zV zf7h{>tnWrs zD16*UDX&B{c)k7*_Olo=sOkda)o=ob*1u?liwh4n>0kL>z<|RfeoDiVd6dN&3(75S zsC%OiL5?dbsy1ofDJ8N=bMQ^ZI~%U4JuCe6%KD@@%0@$4Cshbrq8x*4;lVfa6Mt#x z?Y+s_8h;$;u7H7wMDT}c;k>h-CMJh$`a=2t5LRXO31~K^uy^8cIbGK@vdpqKT+Ea_ zoJ4+xr{6@Tf9fQlosXEK)Dn7V56D}D$5!>~{}or7=fM@FhXu#E%vp3(Ya9>h-tO5n z_!};61vfo#TCni)<3d_XLTffNW8;NP>VCU#0VXgjR*`M0_>D4;p@KqQ7Iov616dU6 z8--C?0UgBz&C*-fCiS}NzUtq=k2r#p25#88H{+f$o_{9(Kw)XC8AFqIUmkwDja0J= z-Vx$IqJhp4{2Bcxz{X*7yTZTP|Ed1(L$K6KTzql9 zh|#SW7nFoXH;fzTqVYGPBnr)t9qYy_NlxHpal+~0qenEd@>1fOX2PYj9_euj-TUZx!?O?3T+=@igCjMWRJk3QqH-{6)%xNrZ}&iwyb zqlbPVC)V?a1@FK2k`foLnvk->YX9f0dn*19HU5o5{=c9`VK8^hhuVDA%$N_RW}p6( zi-&{6WMpJQ9j0*kg1_;z39!nY7Qgz!_Al zPRBS~{o(C8^eWER#gR&WvB`Xu)VrGjzsA{-N}Q}4i}1dpm)t0@;~a|rR5Wrzi2Had z&2BL-{_I)(mq3e2z@c#Za6U!+zm)qwx~T&mtipFN$Zv`DV8{PWZZVYMax0M;phW&} zv+^Ll__1Q65mnCs4*Geo7Wz5N|Fa{Hg5T3TiguRB@K7TDZ|~mYj1J+mul)b1aA;_P z6O2{wc2);cb7|MsIBa;4ZB>RV&j=xNIHGx~9^~JuW9Aw1Egp}LNY6B%llfR|pP};xm{$8FQsh({`QDoq&(a0^{X* z6`S>qRptoPiqpoVZX(9NSQX$NCz8&UxdECh=<$pw6a3D%ZmzFB{L z2`;nG2??Grl)urw0l6QLaC;it5vMPHX3}vU-@BNnfQVYLWv#qe zv*As^c0nAW@Ex6l6C};Yk~`P_=Bn_!>PCN_H>`GKf~|I>YshnCqEv9x#K)7d$N8Gf z>++_aSjRq-n&*gVt6v|(2Cdp^ze zJnN?>Yr=Kf?d{&`7}z-Zaxu+D6bu+&824~HIxLIa%9~6cd3#xn9lv`z2N4@V@S2x?yRP|qPz9F!R0f>b+3iS^~F7T*j?z7Tlo3<}e?$Yzf z=B?>d5^R3;X>lMjnfTrQ{9a)3R>QNf@K#-MCy=n*PFQS~ujAmi8#_YCPxI~fg=ja? zeT(^b{x-sf^>farJjQov3-i8T1`P^q>*MR{x6kGXFR$W&374fD$HViJkc!dR&6;`a ztNQ(Qu=Q3Z{(86@v??oy5B+g;sQ~xeDyi}2qaN7Y%$#fItUE@S9$lu3mNJ(bDF5*MC(SC|Z~RDgNyEdodFgn^elb5;Xd&H)<>kc%+X0 zrFbzm-{qjfyg{lR;F#RV$r5PsVy6fubEZrxdBNkmwKJW@T+t|?is1$&N|WvVkD8KL=%2RijLBFSlay`!Flu=CYu9xv&i|*aS<@zxt97 zOD{g1m=xU`(r7k>Zf`zsIoYtCft>55@|ixb(U3SGfnN?%33ple% zjSkmSArhX>xB1VEvBCl#XN}!)?=(CvJR5JAS zv9@Oe+%yI6?#+K1+?!G+iU+-46VRzF4k3QFAd>?c$b-qoOoCU3TKR26ST2P@ZVQIc zx%pTjLnya()Kv-$W-R$-VB9;h4rVZ#M{p1EWfnXM0DGgMbPLnEh8i&=5oO@=+na)` z!^EiVX;$9kDWkpv{?bV8=88DmRDGMN_57uxmg1e!L9bH&18lV%p;UJb-tEy+(e?3( zk@X^5*9%`y`xsS9T-@M~svrbFSl59VWkl*oeW7SBQ{l&7e$$88ni^^H)5_ZgQp)Ts zr4pJj+Mg3?+&(Tro8)bpwYD z0V#`=vLSXvkUM=l?j83&;bW6K=CA;-=A7?BB?GQWLl>XnC(r&Y1VQ6g&y{y`kjp%c zbMwBN^1I_oQjGd6D2u}MBw4)riNM*k#soEt>U0B-wUpy8Nwi+oP`XsCP^q~h%)6*Q z86Qs?UA(L9=7!kn9UZC*EUMbr!q<4kgfnJ^_TzgNfOWZ|Ii_K!ptM z3a|&+8Sagd|4f5D9?P_Mf~}pPh7%xPxVTpL#LnsYs*{h>ImS)p%=wMxj*+tQcNz`0 z*BCnaKU^2c;Q9vO9&Y4Q_6gLqCt_P#Lf-08@zwm~Y6Ps9hBwO_MKsJCjkyUGKH=>S z2F)(UM!CR;ltYodF5n42XgBZDQc3*wi0%fUuW{1UBK4wfLA*4*zEM~;!WC#v2RMmk z(FN*jj1m!Tnz_CL0AWv$VFubvCt^kc>SWwuwuwnQ?&|o4Lf=?xkxu4213o$fsSmdW zpeq4uJAjxtH|9O73{aqJwgK>!t*!q}q6f!W(Z&(%4iX(Vo z_3cdneY8(3v1ChOB;nnSau~`{PBC=kva$6T^2KU14<#mQ>%_pI#^|)2@%#A_0fgr}5)LNv?NnX!f z>b=4=jAA`y>5fDn|Rah6kBn9)3X=Ha~B2HKG|&;Io)5H67gLV=FajZr~^}U zyL4XuDx?VHHh|g^1HaYK5*2_74>Q3c@i7kPaWN(OiupdzhAVlUfn3JWO(wu@+_|1* zLh);A3KuJ^Q}}IMS^o6vK0wv5>%r<^-~J7J)c zhMcI6*K>ec2ptL05w)Ccq@M#T;|B-)yh_b?YM_J?$OixMt>`A$8|?G(Ee{N!aQO>3 zmZPsDkhLrPOGh8Cd&d-^tzl7yfu*n=fHKzkow#8`|E9p>3RS>hd@jr1atFc3$pho$ zcEOslkv|G`ZZ34;5+-=;vW2oetx?L@r8|AFAZoK5Ow35?f-?{49*)tO9DCzHFfvA< zWl)g!j!5x)vqZDU$qU+L?P0FKK=BbZ=x34aJKY2cQO5d@J`7x50nFADStwe8HuFw^ z#KU?=IJ((1UmI4Qk1!Gp{F3^0hIeP`>uYdRxC&NlS{K%)>ioEIKCk6SNc7bhN6A%m zvPNf{_=5kb!;C+Flw;a4F7~dqkoARYh8)UQeOCn*qjbF@?!CxQdbIgC#(9(KU{zj^ zft)CNbJjyaWVN^E!aE)qKPm zSX}z8vhf}9VQACV>sm!-dzHJ3pQwy2>hEFc5CBMl<@$|bYOBbBck{Orqqu_Tc7{Zp zw)#PyZT7DL91SEpw;OysgF9lfM69_(P4C{sA78>~KyJ$og8;cQF!&dLl1?!+PNq_4twS8CTxu>RP;7$Gmtc-+>&E2DjN1}$;@k%_Ku8pIsz=WHQi(|x8 z*;2`pE`GpLMP=i63fI-SdDY#{V7=8~%BFz4$Lokx4h|e0GCaTFE^9w2aSZfGO7v_h z%3(3lQyndCZe=cfis9NtU;M`6#-iPp%*rB|&e?=Rrgw{8H8$+f&NTI!Z=qj73u$W4 z?V27p59JU&VUS|>uAE#jfu!OPaGVKqJt&5DnXb@?HdrDFVCOVBl|RW)aUvbJx*_2l zh8oX~R=6%;F{`A!Ty3`RnrRu0_Wkt9jk9Off4{*^{_*iMXQLMJ+X$MbJso{ zYw{zc>y3*-_O}AgRa%X#ePmto7#`^?e4oBE#O4&sAsPY^rdO+3zwKIo#85RGP_S4+ z;n(*_T)HW>Vqe{&Fje{LK9rt0ZFOLYGi!)%9EG#MBo!hwc^+p#nla~%9v~;>WBv3K zD|}Bt*<;-Bhm|+cL0U;gz0YYlaggZKJOiyuZFSVT%jU;D1}B^wvasQi89MBz!R)HU z(LvYkd<#jRQYNT=Cq1(L{TsQiw^%vSb=kV(Cy|9{PwuFMaV=EicZJCN$s}d51SK|= zEyL0p$dO0Rtl2VnkA&1l<){6f!tZ#&7g>;Y+<4CKLzh-Dzku_1g73SH(lFz(rd&3Q z$njoKo5bul&o89!?RhHbtW}vma3!l+RT07K{rr$nb5mz z6~;v?@-jvIXt~F~g@+h0Gq?7y{l`3;MA&IV^>6sgP%C0Z!;EudgE~h)5;i=+BpEw^ z=-(Yg>6j^}DT6qUM^F4IUQ?iC40z)&(+Lxb23+ytxL5po%X}xOVK<_dK(s48($4?Y z(ps$Ti_BK9FoMfLTPWWGP=A>T#ZfgVcz>ti;#jZoG>U$2?B%x&%7D7lZ`wMtl!UE~ zsBTGYZ3ia}=j}6lV;4DGN!0B-GNluzv=nG5xD@%({j25OeMbzm;c}lTrzBHXOq;lr zUBZ>)34C@5n}JL_Nv7^=eYj7Xw89kaB0#tl=|_RNVOxLwLO0CDYA zoe@RC@;Y9V&Rvr?o?%^gi3TsS4*kr@7wH(hGl~UPz7M=670Mj-bT?V=>rYXtpq5rv zoWK*GT6|iH+BP@@EbH3Js3ZB5=T&_!`8&V~$)oS5hX&(q!18mbt#Fw}63fqnyJ_k> zqlEmvJcAs|B_5laot4T8Icx0kf^f0XI z%!Hxz!n12wy4zig9%J7yYs<+DP7nXOnCFEBdl9dZv0vY6cTH{Z@<{mJ?w7Drlg*f( zA8Gi=i#zYq=_Zk+_4hv*fa%G~6IU<8n5+tM5Er+-z_ai)D&#^4hV9XGvhMXqap!GF zCeco+Vm1-gikLe4hOLP!-c8yz7uE7n)zBoXN#eJ;Cwgq)s2xmWOVA1NZ{M`J3kONc zpCP-uF)yr}kOn;2%9Kx|IL}kBz{zc(z1Eg1|4XCs*`ot4uTr#|bC={s4b`(9{(NC4 z9U1vV=mDDBtgqgJTpe0s#s`Y5G7NxBwOleYRT6+g(V2 zt`^;$qRpt@Bp;9PpKYEun6oe>cNWQhzB-AjOFD~T>c{p9eS~D{G=<5GghDTd@c2y- z;?r-B9v-n$#1>m~B$HUUvj2SX?`5%%kd~KStU&+#_@6R%B}Ai<>IL)njg zbaq^Fa?cHg|E&>tSv$=SF|V&nDJ=i31OJa22}V=IbF zd08Fb$1>dhls^Pt>Vy-n#S2<)eP5-Y{F2{7_Y|{ud7SOhLT95QW`*X;B5N8p3)$7Qbg>cH=(JJEFb*41$OzFD5AWVA_ zY?{MycahK0hs+0hY6_`cIXxtm({_n~)gAz8W1@8X!d<7<~> z#GW7e+cvbDQx8wPi6vx+*$$!#I0$Ze7MQM2p|v0p1`o? ztHKH7cUEDE&3uVfsA{n8taECyx3lTM4he~!Ir?I9cy|#GWqgvDat`*+^!cbG5wPVXrOK7mm;ORgAY>xXY?r-OU%$BCSx22&l3VnU* z-()QH%;L@_UgeEIfkMdDD;oONdK{Q%a?v_RG`dEzV3j%3snM4-6f+3OPPQ|GD&6Uf z0CTbwF)4Cq%DqH8=?kUMTo4mr(uZ*0j(@izgJP#h${_riwl8K(dXaXrsP|g-+}m>M zA3~K@{;gSm6v25mTxS^16_*g)i!9qy_Mt$+;w~GqID!lxJhoetPYLQ&26gYE6uQB! z2V5m`!>!p^tAs0cPR)`1=q#5_J~xkB1r`cG7K6QA+s`n+%YTjxaybvv$&hdrd+(R@=|zg}d6UHvhnr@2V|LaRf>~Z} z!0Fqy<_q|_O-hy7f|4schFK;XJ$?Hl_-)&**IKGdjAr?#TD)RVVE@>LpR9~yhXl1#R5TqQ&7eV=ZqU;sdaI)5~3 zkm}>(x%9JNGZpH8G=>(Uc4-!op?2PECDeq8Qb>ZO3Jk{Qp215il6lWAi;BQYqfo<} zJ;F*7cUR;F?$86+uvpi+qnJ;JRPZGmvy}eIKah?-%py1cN$}+*OY`6uol}VN2s^ib z^s2N4A7J=Geo?3CC9)c)qSG+qTQG+_dvCR=`}2JVjZEk#f>Am43zLTrl zZY@L_pG*Xtpr;k+qI{=oPtcFHOew~Uit`~qH5gK}`3hp{+8QMO&AKT8tfq$%Sc+lB zUTyqj_)8Er40hbpyp~^$_flD}icEToKlbFayMhulu4IsIVb}YKf4RG85(bcbi%Hvb z0bP)ok;!G(m5YixRn3hBFN#pPt%n}1Hr+jh zc0BtoOi`oo@JO<$a_|Bz^wtTW&GL13`^lzZasgAk_Vm^-=PA24vxV_|^Wo(IHz1mC z6M*bvKQzvKIh~z8q8cmjb?+tm9A;+heh#kWm3%Jhk+C-)R(gxkKTSAd(R)WLosL;d z2dRAtc8wQnwBu#Wa-wnPNv_%H)r9<-Lx2^AVr*$B@frFNbat~b=u+~Vr(gDpJ}`h7 zf-#{9;)MQ24$Prc|lD$HCPq)PC{XGAp#egXkjz> zrHL9Csc32T%kfRkvJsyQegH@p+@zdb;sBU(uPa^vJAD zPOd79n68C=l4-Ai6gWa&dMvzqCzoDK(=@?es$72^%^`~fg}JosW7^v)N_5v*t!t1a z9O|bE)X)k~5U>wrJic2gvshxaiLH(9Dv>%Pv`YSTp7htwQ6t{$YF){-yv2UtgnfzN zT-~Z_sJjE_KBRL2YIh>ua%K_DS&$TWjOt+Ii8ZA`Vj-|2J7jW*PFBKMKfdggpsf{1 zGpHYPkyY`w!aXikR@s#Nwi2wc6PX@&gPx5%ev;dy7C)^e|k4GCtVKpbDjvU!AcjtG{w;2`GCYWav;438Obx_N&K{W0ZXEesmMLc0+hf#f|!WbuWOeQW*?M zs(CdHD;>H9+n^{Gxv+X!ii_D3ZskPKTyMd&*znj3XNs+v6f|RoKX9v+=9^vp$XfW5 z`LO;eTKfuP>(TAlj|I`)fiy0yNhQPQs|`%oH?S3$E4 zo$`>H#ybdgL_-td!Rx~xq6V+(fl>%dRwE|^ZG zWY!KlL6S1xo{-g}HFTTt3scbqo~;#T#4uT$ubJQ#F_sljtB58YF=m7VjN>7HId+j? zvjt_(7hoR!fE0gpqa2INNMYeJ6H9zLjB2b?5oZ-Gc8BF%v1P$V8`bJO=@9_vRMiOZ zFX-J$dbzqdf7VFr(oTirWhBKB9^hVJPqMl@iBIE4XW ze^8@8{SU(BE9d-*o-iaW;$bu^OWgSR~g`JtZtv=e>l z5T}D?!)vzgkMSauP^^RO1^@_(#?{T z2OIbAWnRH>>&G^Q!HoFd0PFvy892Bzc~y@7pB-R#zsJjisr+jHftQ1b(62>Fi zZ1(Q%>aOmp=c%egkd%ZVEEEP55D*Zoun?a#5YVS?ARyovh|eESw)0Eofq>9RjCgsa zgn4;!rK~OVjZF1`fE4}WoFJLy))(G6?^lL)llPVMX(tG;*(Z3(7T9MP_@g3W;&g@Z z{P=JCck6^%rx3FFrqicY8m-<~(;#cRSlJZ<}ArXuDr>$*=6&~pQx_yLP+w620>xa!N$neGctmD8>puDH6+;e>y58?S2}-J zIbdh!T?O`}Ks?z)Ji-p#Z~@9(T;(0q0@8FwWbx2d97I*;rY1ngg8)M9@N09R-bb}x z?wVd^t)FLwm7D44u0a27Qw3cl%c;wI zTt-81Ex(6jTJ2Yau!69oZ|<*HfM2${yr7)J+qMa7|;-d;h29#Fu%Tkh>|G_Z(ihhP5pU&kZm@b^C2N z+$aXxlYwewE}K!xCvtCw9#>OX2U9J<2`RxDt`&B6yIXtaPV96Z`wo^KAzLGr_j++C z$jX;8c#SI<>dLhwg9mv--!Yu!H)_I8`#oUyb3xkrl}&hnpRdx=nI&(I*Y`=_X3wv_ z1aq*UpSU)bITYvuT$hcezzMlOuXQ2|y~Mv%S^hGI{E^Oxi{UB|Ndoa|QqZ<~1c{IE z*spOoE(79d)*xW2^56px0moO9bDuponLj9lTvTnSS38Wurk>2zrhQx4tjJr+DSrci zcks0Xk{;^3?-+Tj>UHhB(OT2_OtYP+&ty+i`5X%w+=)Zvn>2VurViT>VzAFG9r`@y38w7960KYr01f8;w@>! z>|Z)S>uh>qwejLf1hh_@Y5sxV*_z2%(@@jhTw%vBXBuzt0l53 zv_lts!^OH#;!3zLSDTWB&9&gm!`oC;d1QK=`nyG2j5{5!MFkVYH9J;)9iFW>Z}wpO z8mCsP@?g-(kO|5TRa1(Bys`kWXW6ye{=j)1oX;qs}R|MEI|$tOjf}ZfHqZ~id^Q-6Fl9E76Tw9_ zVZCv&_dB@6k{I%BEnM!`iphb!Ssf!TW0snNszWHzxHAly8=h6}czSAaJ)=ehclAgyB;iVI5)}_$|#5 z`;qaH`w@U1h2EQ9nI7CQ+sNARZ2D$$bV_;>YU*e*+EB;n+VI#g!078C+JWu?#R15n z#FScriwF~MM9$ajg`E3L$y`9bMP7!cC;9`rE!qk?5}F9w5_&#bRcLppK&X4DQ7997 z3;KT2LDEjrNfI30E)9n+hJk;QUt(C|r^K10b82{+ifYR$!z#z>+)CN%n;ME5>q_Hl z=j!}w`6{^T^6Ja#!>Y#`y~?@DuQ;H&7E_@3H z@Cqpo2prWbc`cQM6bE?@a;3s1L#azDqbzGJ{Vp&OvjhDH@uKjffPa=2Ru^^{dKp%r ziHPxx3Q|ax3StqJlb(^Q=1$! zViDC7vJ-lZmXCysq>6H<SVxD3UMAD2ym^C_}+4J6!YgLiC(Ie^1k%`wZ8Th z_eGwC&D5U8oLHUEZ8{Qr5{&|7gVdTfo1*J2&NjEj=X_NZ)o4}!JLq$+*7rl;yHCL2 z*x0!&LadGpU$&1noLBAAba}||DVx!oc3Z|;jvF%^-gmMNqjqVIi_V0_XOzw>J^t@zK z#t}QE4eR~Q9(o1c>S6Ff(^%6p9l|rp>)nHXAtC?AthO2X-Vz)OK6q?m^FY*9{solwsurx`S$nrkL4fa_w7aQH3_ao5G%vgx0>6Telx5w zJ~n4E^0-obw=xNk7o0)9C4rTc%b2$#bC_qBag|@qo3wjx>b`zn4PKq&5aFn6BWt^L zb9useW_ZGBn{>N$>@?h1%zV^bk6fJ`(Ms67Ouz;hsDrgWF(T1a-?5&-?uj3fo*Is$ z?JG{HOf|@D2XDi#AGRMmTU@{0`abs-3{4K*7WKmS!n4e<&+O7w)4|p})wJQ# zt!Cvz9hMYheYD$wPGpYNRg^X1|XWy#yH)9UJZ)5FT)#2cwfD_9(H!i2_GAES3_B2thYT$9iH_^qHpC?!Q0Bfwy*#|A z^b4Fd)xOl$dGE!1y>;a3tT-2^;28cf0-PR99Zm$| z4zh|pS8NZqFTfoZ6Uri#G-Q+=3-b%Yscr8;p>wEo=@v~KbJ;fWB^+dWkQRvqpQWq|?a}GsAzj8yM~2#{B5OlpQ(|4R59dT|%d>YE|9z{@V-r|hLNZi_8g?VC zG`%{zO0^V}h7K7r`9k}dG@=v1@m|_+d$C2s;o0D7UDH?RcT+njaktZ)=J}^RaCnR} zu2N$P3o>&m$Jo{ACFk0)iq*X;ZE%x;;h4(;j0FerOSS_H?H zIK}Y9tN4l}9-D?;=KMGm?ZVq=G4zvxl@FI1Y@O}6j<;bOVf4aoLQln3#C=BLQU;U> z71Jw4swL`$>p1ihj57`pb`Q37k4sx5d-y#%uDu3*7RwuiD9zK(POHj_c4(;xj z^vd&g+GPWBH8O$Zi6kv&QHp8u4-t-Z``g*=86AW6TtK?oxFY*Kfeq)e&7JCa!ZjCQ zQeAckdKf@?&`$r8*~RH-csX04&y7g3oH3qJ^_b$>HLXJXJ?tf=q8QC0XZ22(Kt3V>Ct?tq`g|3H_hFRj`V!d*i*6Z@}43AtE111x| zX(%Bh4nK-Af&F~&Jft(eZ?lmQS%G(rw?4Q|;LbBwyg#>Q zR~!~~dixdJEE4htX%|vw)3%nm85}t*u6ul|e9MhAmsB1iE!IyqiqFJY3Bx#}D4xDU?QRZFHXEs5IMD%=Gw#4(l~FOf_z$kKL)2#Z{?g`Y{F+A5{2J2RW&qy<~T!dgN@yl|`-6lv3Bf2ui}`aF$id zqRJ0*@=7Rkc}t*eJr2Z=;nCTtM`^C;QR%Unh8XIody?vsNfgnPs+HYqjcdniFRSEg z2$sSY*KL`P3{QG@ZswazHK?uMD^5HdanCZt$=B*4V$b)^+XsAxb$YLp7w|0f&h0XY z*O|G*JvP>{EZL<*^&5>=;Xih&42laNGb6v*bKix8wmbA_>SSntU@A1S@V z_}JX+V;h`}q>coS&ks*=X1D@-&@fn3d!9y{W)17bkwfuI_u&({DlCc|mLm?64jV-Z zNq1~IU+eCU6@P7*KM!27Yj-$v*MQY2lTmYjwfwY~WthVgqjFs(n4pZIvR-e-xpzG2 zwDo!;yMJoHtA@Fn&GGC!aUa$e`Hh+5LGRa|-YUfU@yXAkhS_s`k{SJM&J+&~<*Fs+ z_q6rRdMJ5BPcg!YTCT8JB_LYkNcL-BAPP_*iXfn0#Xz$|BA&E`ppoxE9rxhAgz1ii z`?{amxlslQlJdV>3efKN;DQa2T6hXf!gG4s1!5!l(MP>dqp*hU_?%9HLjAbRs{M&w z7$X$F5TWe%s71$&LFE%LNa7j@oUky|tmw6#cL=A1X~=5uZJe*X zpY{BW1?2kWiB0k$_bgE*u#ufwm?;3x3dc9eK_Ew48S7qyqHo2wf&M+Ms+c$!3m9F6ahoK1LQoULTi zTNkGX9D>d|-Vs*)uzdzZ$?1x$NpaO|b+57e==nDd=o55;HB2^6fa0ALbyi1~;sfiU zL585K5vE-70Sk>LhfM#ENK4Bj#t~YwlPfduj2<}T>S*R;WHDsb0@H`C&eU?ekT~Hw_+r)2=})G zU>VI%+Wxt?I)l(6p23-dlP)M2M4`0&Q9r-_EGw*_CNTxU@hjO7re|BiNb@%z;6}3> zMI8M!$}0aU#nUY~r;lZ`e5-xC?jq>{9>o|X3B?A~O?PbJ`?v4scuCYr#-4>gTE)bL z-4dOX`-AdR~=!RVq&PS;<+|7XHy(GQi{l=kOf(%2Tf~P{Z9drmUqGzRVrfq2*lfEpX zH)%ae_z4C_#InX|l(;J7tPzx%*DWXzh{crd=A0JlWR_%;R8*L1YC2Am&OZbT2OPGt z1o)-raakAWEv7b%)sIwnoJ{UKj45epq>fyFS^@7-by;~>ZDX(IICrsgk$ftBqR1L-)?omh|at=NOq63x2I+1ahg7tN91TE`Vq7L#v8?qrXTTFL!)pwGa$%5z! z%iIS2UISMB<5Y!~%=iNNOleYQFtwXT!tB`Xr5@`BsC>+vxYGvbGm5o@A{OQQQm_1< z4M{7~ON`4|uQw72Vh---hVR{-4Oxc?$OLp2sB1sYA&^65KxX`FSZqWzWIST2z)Ni_ zh{_wxDPEKp)jd$EpAUhUB7t1IzFdIN;DWD!sBIAN0SEEwBcKoaC1*vD>c>)%!N>(; zcpYjIFe94#Gff&F7}|0;f6MU`?t{_cuNtVzX>4o6XUE0~#mSV)BE`*waQEdAQ5y!A zC6kQ^jPHe?y`*=gAFHLR6}2@uBs$2T1EoPiA4#lDTB8=MvaGSKN!PN`S=d9^*4aTUi=|WUx`yn%O$5lx}*54 zelYF&{^DMzc)>*G$PfF5Gt@%bG~H~-qQ)`w%<+Q$CdqL@_dZX3z`g(LtIuIiKY&Tl zytN7~%?XpU{gTriDLpttV2sd}&x4imemCT8q43e$qM#$-ee$)c)zWRs_3}1)VtU%T z_v%D4A%laQmHbOd?_A{E8-rinN}a{Az%l=^31&SelSfwvmdENZU&d3$3X3w2!i3b5 zyIbI&MxQ*bv1gt z4_s+H^Nu?s2t5KKS~NfK)|p`(Nj_5b3)a$Ao z3H;gp<1Y?;Lt9%*Rsg`k!GX$wj>^K?06@dS!UCYC1<=w`ezc&paWc2na-=l3A^5*R z{yPqzo{f&Rk)^GXg*omY<7#PJ*x7R6s&&j(Vnwd`4z^<~AQPI9V8J+5gl2UvK`F_>Z1SfA^%Pru%2lf4up# zCp+Me1^i<{|EI10JpE7?Clov2FV%BGsj?jt00D6W3G?yDIs%`hLAuJ$J>KK^Re}n2 zRtkdT3xPI~o5cks zG_59tfFO)w(1ILv@i4f8VTxI#iy0Mj50Kn5-%oWJzlXCqvq|4gwO`arNOLvmZ;6gg z#1$jt2Knt`kj!*^_e!?lgK5~fK|8~25%|5uM~65FDTH!gUlJit(BCfYBp|R&Mfc_t zOvp%}|Gl`0Ku~V^gc}bjg%G}5 zFw!3Nn4oik#ndKj4g~k;d%#6vP+IGGj5z7!@MU#w+{I(N9nZ%HUBYI3~ zL*mmdJCDE8 z8Lpl>(&Hqr@6egeXv<@#9MS^`JW56roDD)0$@V%H;N|Uihm`3+!4eqUhzoZi-Z#*2t9zJepq-2|x$( z^fAP5$6HyhekHo>vWfPo)bFz$w?9_vhTE<0z|5>wyJd{%c)SW_0*_buK6-FNm?x7Z zD4NB)HPK$atq*^7p}>3U3DCp$-C%6nVQL7E9Iuv)SA2bZHkjw66I2?{ztsGQ-JLMzqS>#CRXvWz{ zjGW57P|nKBReAhSaTxPB-GExaEiCE7yhDw8Oj@H!f7#XWgO~QIoc{VfW>S0xUHCn&obe2n=muqMwT?+C`{XB*5sLv1Tn-t2X@}6d;rOA@h=oBl zV>h*C$t~sFk=u?AIjG~hSjHjeF8al7aE5N|sSA+*HGCjQzVBXcE};leJXa@-u%SjY z&eQpMym`xm(;y8@YUHug(fZT-!?HFvhb|T)s-kCMqIQOcHhZmHZbu*)YB3uU0EINIt*rx>WUhU$0)o#1+p`45+&W7U`ihhZeGWDS4#4Y z{>vjqNo4Yc#|g%gRlQh~ymlCT>$Xt!udVtne6Nx}PJH@HnaZ$VST%KUv5ktUz`?!! z<4CoQ2!ur+(@}(3pJ^+BCDn;{Sa+z!j9vb$7L0dICm-1pOAv2#5DwDIG?kn@P#)5+ zc~!&qH5M(cUsN233mGyD%u8ha&dDOPl-LI2^ssAmH6QA*%O%N4+VPD=#C<9!DKow1 zIXtqUpq1*~$-GrH#wdo4sL05l+Qn`BmscqykfJhCF8=vAka$eS;PK$Y zq~zWfrt4V2?DTX^n{*T4Lgb<7Uz*H105KfI)iDH>tM|oZk(VmOMVOP}mLaaRW+sjW4E88`6q$sI?VO4tJ8@Bld}JKd`7hqk4#X5@A>RVfbfb-JonpU%@y~G zz@aIBReC}Qmbl)-?je4SnEpLZ*mSOBv7+(@`{T2QM&z*zLmZKOii2_!0CQbbxzv@1 z*j;;WF27=hp*SFx#hfCjXoyU{rJ~`5f}vae@&f-~CydHVKwKr(Woc;{jl@jaWvDHl z>jFTFb+l1x6|=+C79BQktKP)D*nVDi6}p>pEEMl_Qz<)8{-vp@jR>pZV7}xDH;r*Y;l$4co8EGMUTN>}*uS*MOgXCk6-!buTe4uAVI0g%b+?FwY)c*t$(n=DeqZSer5cn(MQCfM&j3D--J@B^w*GsvF#ejk`{ zBMJyiE5gvx>)%j~`Uh0^6%2F!2Gx8_UNIy&Ew6}c&QRTmZ=S!{|5u{dk|1t5)iy@J zb&(7L9EsYz`0rAA5DaH4vmolac_ObXAi%WVnQ6ZMemGnDl zedVyEqCHwXg>h&yi#HE1Te~Iq{q@|VTDuQwvE8lZ1E*6dR-5f&INhEacS@hMb_{(* z=SQHC3r7424rGg3ug?>9e8dGy)_CXt<=yIhfj~DcI$q^b0qPVa5~+cO>Hw8`F>4!} zywcKA>6X2*6a&zQ^Nx4*r8*N5BO@aL+_w!M_?_4L^@`YzViJ5l0#nHL3X14glB2<> zD+9Zi1IR@MD4s@lx#z*Ff31%kVz$d#RK8fDB0bf9$tdm#X4CO>sqSh!f=lFjZ!FJd ze|+4PpPzqEIY%HYS8pV~pZ#%^ZX(v#7nDG@g}lLF0-MEh!!y;YAHCZB)!C+_T&)G_ zhmDD^h0%V3+gW7jOHt(q6t)AY6-~6 zN0JJP*%0v{EZz+BO-oIR`pW{OwZ^-Z=OLFin=Hg)>g;HD!Z*K)`V`+K?*6{bnW#w2 z_9uz#k2^8d4(I9y=QA@idN&7C)h_quPEUsgY5R8jdnp$25^1bX6!#x@tl~*^qXRe| z6!083krYbRe0$?*B?V*iX_oEgtIbdg`4VYl4XgGkp%@Iq-(mK5cl+XTU0_4-56 zLCh#>>#6eup5`jtiO|u}uck!-!aO`)vt=p@t#Kt9>snoGju)DTb0rDvx3e-J*QOB5 zlUF}3+TP!u#W@ga@j_dO4c+ieBhcykZ=(_YH5@m=7F(Tc@=WAde)eE&u3oL9n-IMo z(M;1=m0&eEOP3ENG^0+0;Y>tPN=uewxWo}ISc;Z_B`v9)lv#58-dt2~ z^rtILGT~S(+P?EONQr@NG=h8Gp>xM7N8-O-cz+$ttaZI^UMgVSd9UYBdqENxO0QZem*VWtMcrG+8j9YG*6RI?~yTyKZG~y|t*XR3$(NQrk&$o>C7s1L@q;TFOV_5|Am8y); z*2~H{oRO^O1LvXi+cI4>4%ud9_c;?vgUuatu7pj>_z?|_SCRbPG@>3lq*S+i!V_oY z$TYvEfAM4*F`%x>x2uE>w3wX?_opF>0^2UIuY*kMt^^lD;vyo^{2{35G272)Et2DG z=j=N8uODV77}j_p-G+-RJf>kPdHW` z>Q9MQ#q#r3VSPpY=@d5Dgm8vsZE79--<2Ln;%PXZEW!0^yZ!!ro$;>9`M85@;Ja^9 z-Xx&hU~cw?OU>I8VmM80=HCg^g-SejHjEcdhjcWb>rGYP@w|Gj+wBW^wP=um4&10z zqfgvjyEjL92LHa1py9p<(F16I$o{C`#p8mmJ0F)#*PE$~VMm`D_4W0UNGeYy#UM!< zsqjkMz{-JyRg#!l!7>Cj5dCY4ctF6{2U9r$Iw6{hLI~lAr;ks^6-@erGx_3l?Ol_o z4aRfBJ5~zgsu!2QHYSDvx9aKDydkh2JLm1svDJk;=Z*|=E7*`*P`V`z3L z5QsF#`^)tg`ua%_cr0e>VdpIko9__M13^u{hkm$wklf@UMBzxt$YB~5rS^%~4OLIN zi-7+H*AEzr>_j(6Mbr#B7ty zRQ{z2fA1)MTBo)s&|9@-*ns{S=T%PY(<->NMnA;8({R1e-As(`qP(bLc?u!=HwvCc z^Lzq}pW*DUeDy?9ytL)y-xES3xx$1Z3(nou>+uJ2*>*119EN!bl1G*$C1d?CL&Tf> zzb6c~!H31x?~&4S79xg+?sv*Za>qfMax}`EJzNQ2?<+56Rrg%|DpmkhA^yjq@cKnm zX?#wckv6j2z@N!do;s|%$CX~`s~P2$a~@U2D?@0JbNgmknkGv095qz^lUDbJ1Ag$p zD^2&g`g^sl0tq^;b|#@x9Fg5QK*836(nQ^T1<89vH&JeYBL_S=*Di*mJ#9}8EGrq?U|{XdU}c4%j_n*h^sokD;lR-894 zm+ZJqyuoNn%i2pO%-9iO(kJr-mnRi`bETv>CiQ0cZ=^anjcV1bc9FF=D54}3)me?Q zthcSo(`7!fqW;PY$)YAEY^EolXgiB@%#@p_K+l)9CgBv?jmA>5jBLj&_tk`PcR-;c zPHg5OyO_#^G50T9Pc;%>AK5x$Im@UDEyt3A*#60FeoAKY_vl++Wa^Ed5HMIw&hm!) zPt%c4IhFD1HxK@R=*AzTOmf^JXc!_)nW!Z2lut^;8n9aD6E;o9bwb98I;;_zjsPD$IX5iAP(;75%D zao6ZZca=NlmJscOl^zRJjh|GMH&q`8aw7b{!Ve#hTDc2PaY?pKKSsF*FvB0D#P_6LndX`eHBvrUZk!Kc74#-BAX1`UnnNh zp@BcqYLuirpafE`hYfqlTx4bv1R#$F19W^3`yjno ziK`NiFBV2qVehe=%q+^&NL+ZGPKfXsB*25!*m%s?y?k+e_0kVA{I-LmY0uHpTC(Y} zv`gPZ;O)ofaazI$p1&i8D*la8|FPYy@{U_lmDN)~=edr193Gm#*sA&tmP7 zkRrx~Gv=l=k3$-nQ8ya9o?+uQd!P30pGnR_vCM}9_s4w4w$l1+AskkB#c`9RpUr7e zRNs;b6~<5Y#FUDN>iYXI^(_5H;y*XeBDrpdO;qGDP=%1MS457mR+cNt^|H|_KkJRV zEu=D7m0r1tucX`)et-p{7t-+nY!SbYJ~0C7Xif-ahbBaqkejjtor>#_ ziVT9i_9oolr=l0dEB7hL+9@SuRD=3kNQi@PSF!9oQn;krB_U`|tlg<>e;|_g6l=18 z4+t_OAB)$^)y4fOW&f{(q{6TdU|Q%e80Q?o z0)~wR3!|2nkM5B~d$!L)8~ERtT59FZwhk0_7kJ#evWi5sY^B65j|b!KsuvkX8DT!k?p2bZy)?FxL` zo%5Xf_#5G>^8@nHn~NtIbML3{;?)c1RyxR3JMeR&M-?^rjolOqd3sy&W3%IXedK4P zu6&HHSs+izYrpMH!Gy{IL&Eeg6RE zN`gIDA>39_s5Ea&nGTEGA2m=@?q78U{zrj_zRN9Bm6-dn1LO{E@GjARcWlVCK<8pWdHtkEJ|Asgi zuM=^c;V@C0SadLGAz#q!@DGLXuM+6^2xsDZndP;N1%>8+BU9f%{Cn!YQQU8Mf}Ikp z{+IHltU@4y5gCzy`m;zJ(FmdSnk5lw0x}5{A}}qYE=ou~JTHy_5Bf55OXoIHQ;rm| z|5`Z$H0XzY1p1lx%Ho8--^7Hby#waddLd}DHBd!>&Gky#$R`PmV<)7wuox30;` zJ9D5}SOo*g2jTUxlVYMEm+%6>u<>$^y+1jBH23KLRmR{$1h$*_5p#WK7L*-7U@wCQ z8sYfLo78GrCj%i6;lcVn-rlj`CXHF#{)7u<8R6yN+n;&eKv@vuV3b1Km1-Wk8KN-e zZ$+;|rLXf=OK6&9g>7&TrTx&z{X6fL#al|KVH$}fR}>{Cd?ufoaFed@i;-@=>qW#n ziOS4*v`YZ%5#`gC-s4EX5zf>CWxvqX@SVyb>E2)w>k2EPqVNB@>ar7{7eC<}RKhKJhh8!4#n8=dEwhLQS}RVQf}RsA$Q$s(^wdEw~0@*&YC2P?WVyLubk?v zgh8gNmE6OkgVXQ{qDmsoy&haLZ?t+vLjBZD`{^k}Mm@Ajb#00w5h}2S+8=zA-;+WD zgh7}a{M&fv{Eo~<+n^nC4B8iOin{_6kVpDTOo5GXrAwKUwc;!u_Ui^?<5yeBNayQ+p!c_s{CqgFF)d0BID)<#Dr8HI{W#?eIghBtU*Jvkx z3mL`+2>$uOYVD{s1>NCfuy_R3h8A|mXQ0E`qJov@NopAbQchBc0B`(bTrUj#%&zYr zUaZDnxpVn1E-@6csUOF*g=YWvxQX;mPNK5*(G^fYWnE>3B%QT z0Yq+@LE_-?46jcg@?dGY3 z${1UOi)1(yi9}8D2dtF_Hll^&aUNn%gVJyjaRwT$Kh<3?B?xBY(Wh8{mLFsgE`N2! zrQ;fo@lUw5G@8=nV@#UzQHpU#{UZ-5=l!U#4#laAUHUYuuA% z2RDpnA@5vVCG4K6ik`Q4gy?e7^i5yn3tJ%f7RRW@O-aQ|YLE@gR76rq5vUaF2QCPk zsffBNMNv3Dihs;pNq=7G+h>^A=DrcV&Hioc+b?RPO*a%H-ybielbCJ7PS!hk)ZLw% zS#W|sbe)ws$SRci8G-_!Y;cAVirZSp+^84hO=&Q2Ez$KOSz^)4GUHE2pB?E5)%Jna z@#8^oVHeGAjHj7%1t;P*%zF!+HR2YFVyJ6QIVxIcKQ2N#T2U1Z(W9o=(YI6+B~@Uv$Ah-y$0b?F8+3LVK^KBkC$z_=&$?K`x1r+ z;wfrtKsYHg!66}%>7_CX(MYSyfzPBkRtXKJi}>GIN#6oZurc{S@bEl=zT_ml&duJ+ zpLc-D?ypS~OrK^fdNTyhEF}zqGp&3#68zx7}TAE|*5&HTp zv89W0*XwO(r-K)j#!_imj+xVExZ&q@^Jq!Noso$7=XKAUJK9DY@WZuo)nOR97K$)) zBc;^dffnSX)fLc3B6Y0A5iC6p6v`TWiH_Ed#!xJlPBN|UaZIh04(v5z{hVw(v?Zj_ z1X5p-AtSkC5fx36GH}ykCOJ3aB^6s$q3*D2hv%YN?d)=UbO0k`^QD^lBBEX$AE>)5 zS8TI`X`?41kw#ZvzWB~vh8>-U zS3E4KM=DgEd;AY~19?%;J6uo#QPmZzAr(fo{#}YlThWCkE1jxYaU5dw0y4{almj@h zP3|BXE=WEgU^T|?QZJDT+=<=;D@~KsuxHzSY2o8(3*R_#jbp4;ry|o7e~u+ z`pE&7jIXJm!YZ6WMeHL^YUgMr`O_`>rUR`B%R;;TWUhs8yYm6*}5_~zMQ;tB{KGy zfma8} zQMV--m<}fWK$g@-TR-lDdJ?nqjo9?@86|mHK0?qr&;G=q|MKxaR{3jn?lh>b`B%rmS@f@FDpvTALFo5C{a#F;lItQzygjcAoy> zGr=$&i27mzTX2K7BJLv{H(sOmX`oh*C6pIJ34V30hslV{uAk%x=r-rxEO(l^vI zd8F2Ga9nD*F0d)4-U+K5g` z{wj5Z{@;6v4Pq>wzc+EFTpn*ysjGvuZ;TEwPbI1><&~lU#qx>60o76*(ZXq+y-8i^ z7X1kI93QLgks!W%brZminTC88e=Sz1>k33ki{H^ecldG~nrCcp2V^_OmTS_iI+K^9 zRN^?eqf}jQ*fNy8$9eY>wvE9$mLq3l+A;3--&oFTkkHaNyTwkIk-A$MtZSb0^8F*V zM*(}uIwPWH4+Z|zo{Q>_ukGDcY`uci4h1C|GtF#=8VxCkWK+Sx6(da8wK4~**Bc^A&L1Q2?G%$uKU%KyULH{KXVIA33?JxPsfEjk z@V4!ykw8AMUDVMyMLBxTzRP^=4giA>+Sd%&CKfJEIN+OSp&4tcDAz6awET|^OCi8P z%gc*^Y~p3QGq$5D-{--?<=XiPe&eCS1uY>~PLRHmh4quh)bMNv- zsZ?vF;)j29tUAbz@ul(NzX8qs**e(;$=Dh_$(H5;Xnh5wwOSVb7OPYkth9#a3ex4z z4GjB_gl>+eCwFQ4Y-2fa4HTRerPc+6!$*4Gyt_05^amfefsCE~oJw7LZP(gxv--?) zGn{=p2#aQLhCeKkdmq07+}JFoefW8Q1*;{82Gi8N0-MB<01 z_l&9ng7i)QV3-g&9k!C?t>$1-j;^;QU-UiXOp4v>(v>$W7#rOlo$cCvr5GlD+I+hO zzxuKvGE#It^3atqT+&lP?NytuZyuS08s-AzEB zBbd^_ai%db;5UcY<1UUluQG@!G)+37IXG!QrY7_2(0=l+F!4^RHIas9e}!vQ-?ut| zn1xl7EB$b>xQZZTp>Spk3b=;jb>3tsD#wFms~vrhJw;A8;M1#ZAXA@@BPsxRvaiv0 zl|H!PNk)dQtFMJ7Ei16mIaHjI$&94{UX3(L1pla|v>B0k)Uzm!4i=fW2CInN^e9rd z33HO9A8JLJ&H#C;qZY8bUsph9Kg4qj+T!= z=r(wkS#6fJhrTQCW1ZMC$}v=*Dbr|OGVR&QEx^t<+c8Ry*;=ZfpHp_mwOA{Y%wzcL zqY+Od_#-c*6F-Y0m5!u{$9uPdk4=jvzt7&;HaRSb7!j9&f-rm#&;R7Ed&z*^0E5P7 z2$|GQ9L#wIO4bHL<#FQ8+zT!nO%BVrY+d(Ilr4Kq!~hIox;R!TC&R zO4!T1fSr?YxND=9h$@pdcH;l5_IVkS0P)PMCaw=8QwVy?$%{8b8HFM|6E>0TJ&k93 zw-vZnIbu&h$lIllW^>%4qdJoMR5LuLa?}Ch$3K3gDJmJ`79;}VG=Bvl#Hy&dek)j+ z>)OWo+-x&wAc>3$NO%buEtDo%xSiD!2bX7 zbQTU#{NLAyWr3x;MH-|-Lb|)VTUtUX=?r*1`Ext~>^4IqL3SW{RY6bxU%%>(xJ9FUnrT zXj_gH3l1>W9o=PIU#x2zRb8}FvnuFQ@+%~%dF-vT4k4zsB2E@*5Glus;9{#x6U>wA zFcAKcuDl91-}FS|+bmC;VE&vUxxQM^(lX*OaiFHq%!$R>bn2kL{REvktpBKa4ToS< zKcKgi=sf1_3Z7hSyb0c97kr;@B#7lsY}tz18Aenyz7dzCMXUVGFb!M*w^`OaZLX?| zwG?=r3HSA^PkuFJSJ5Edy!1>(h zg;hMYzI_u=iQLV&<{_QaJdAFapKukREhDx3@gRL_ z*mjk@3KAUS*CFdBk3+<9Wpkivz;2mg~N4wfA;(wlvY>g{p>64NI%SVp^1G^D8HKEs<8c~ z>0fW0sk$-wyBX}Vc)4&ZXp=(m6omHl@u99=qBV#VAp(qxZPD zyh#oyiTK&ElfuddFvSi2vvauIKa7@+uD3fiz5T6;Us7!xyJgR8HP}+i4nr=WC)nsa zO4%!YwX$3C2R_yRXdL?LivcZ;r_hLeQ4=3?2kh{#fZ_v991E`?3UV{@-bm)!ZW%Wm zi?J5#Kir~uJ8p_;8oc0Yw%`y~aDsR<-*gS>^?Zdl@XrHAF1muP&5dzgu_;&n+|N^7Ce9Yn?xqc`{^e3bIqg!4tRx#IH4}ZlEFygRP{x<(#Qrxw zyDPD`lKyLE$iYa3{)Y!`Fh1gwOom$Y1qUnYfwt6-61BVM zwX?ELqzT9OZ1jRqvBKfo-cyIfqHkeQAb)Zg8hOZ%`f-#{P zC2|Mcq*~rix4Fy{0n5i^8Zz)izG1LuvOgstL0`}_koGk4t6AY}Y`7Xcw6f>dod?U6 zNX_+Qr$7qwV(2dQd379H!TwZjOGmdv^k z-*aZA_^9Z3&pT0}RxIhtjIoik|I@}2%&-#3Qok*51zjcRhLeBjD|-y{*z~mYS1Xj2 z#ww4w@qCI*xX_m0hBafv4?MtbY*1mQU1)MyFsA9I9=pk_GMbv4}~Lh9oS6=;<09scH0>#g^$iR zb=%nqw|K)mWYs%Tj>h=W)F_VT$d{E>0ZeH;FPo4-kT(>KM=b_)A)5KEtn`GXi79 zLxij97dAwkYr<_)y<)N9u;kM`sH5GZd?*k`zkPTaQhi#|G+RWjJNN3R{~K1aX%qWF z(R2afTg$(WR8>}lE@+cwoA3O1Df{i%`%xgTnax*@n2efd=Ncf!aCaXvfDL;NHgx@@ z`TD%t>r^?uqbx&!-LLM-Ytufr4oyl%lY+3DrM8nW*_FQ6SsgOeIf6d4AQZg=rLqBr zzoPu6?l993`WC{JIJ5L0*02=_>X<6-iJA2s9iTZ|5}65>4~AN z&o$Nm5cJ2HE=~B3A)7TQ0nRbvWua#U`FK)_R>^k5|(t{Fl3V=!D=x=a!070 zOlR6-ie;c$M|Ba28~oo}*y#x1m5gRLEX(u8yRv*6#r{#My~hyA{QVk_i;r1k4Wl+! zY_0{RVLT4?WvkkOt5V=ME`TV$SaAdS55mVd7X>#upu4XdRf;;#u$9xF}< z`&)Ex#L!E>1B=uB5riiLdez|^WJpbt*nii-ab?;DVX-kV54{alat79G9+#ISEf_N2 zq}qZyo7dO?`im7!jF1xpZ_+Ssk~qd$?-FIm_azhN0vtHL$$byJP*d9WkC$#9ZmQ8n z--nt1-oE>5MRY|k4Y$Cke?sys&0mkd$RARY#dEn|aPfOEobkA}yBui1O>3s$r-`?!$T2`IHwJ=ZnqRJlp%9 zp%-olu&qZ$b0C);kib*CYZ-h}D!(?J=R2%dN7vd~NMczc^Fn)63h7~F*-m~~R8HF$ zuNI4HTx)RU0+s;1Mthg1A6~0Bb9&yrc$+5d8?fo)#<3?^`iDs(?Rm(DP!9r2a2&1P z6#cQK;qFIV?@k4Sga=0md&9p78P zZiMAH`bmA6G;w6&V53yc=d0lWab*4^I_uLjYZTFC0F_1cXJ@$kucA2jH(`{yV}k&{n7Q6z#}& z*o-BXFJ+zwdZq0|?1)IgKRSOXIxF~eF}(){I1JC*DR`tyqG|roIJ@{5|EBoyYq4~Y z3&n=kJ8T5YYv?a0iW3 zmyQqpG#9Ok%F5pt)4GJ&|v5dYVfMl;}een4Yt83_AI>Rw<4bs3=16D#jctW9oeyd`Pc*+Le5-J z$KG>}OtGN$FcG)U5w;wA3tlfzd$e#9%3=5jWn5f(pZ!H%C!%R&$;knP61(=smc!)! zl37T0#|I|(4EB}CnZ(rMb=3j&%((ZPk0{P8eo4k^ep|nB)~uq~BhZXZCGvmP*_yut z>oU9Um$tG{me5BiV6Nw>9ST0(jD_>ckW&Mbj*K=@WAg#lQEX$hE_A z`ds&#$IkhvMoNVvi50SjEo_m8m{r8ItKvo;%6tMh)to*Xyi{+*N6L1mo6D<`m$jE^ zt}kx}s>k$mT`tVFB@Gq0eT3GA&-9#thX~q{`A~!gK$9hpZi0!-DadjAK6kd)B(xKI zQ;w?$ODfX<@w4)CFL^X9Nq2UIY)BkII-QRCozDm-s?gjfSmJDE&Sc!K6MGaOP8hnL zoPqL7DCu9$CtLx*H}p+b8ILf?rl{pyeKqjpWO-GWrgorQ{$i22%B0OwSW5J#x~XB^ z#5l5eE1c=q*-R-SQ6Bflx4Nx1Y1=0YR153%r&(Ch6y)YaLnR!1GIC3>_X87B!HswU z+6BIAnp_e6Uf)?{Lk3r6$|tDK=Bk3}05c#j%AQMVOff)8RWKXeYq|(xT;j6(h&dUr zLNOgGDQ35pX8CZkif)s{c4k*rv1kqFmRnwfp4HB|fHyq*x2yI_pMTFL1F4WPvLtEF z8R@lLBrC8Jw7y|CA-t>$5=(5UG8)S_4Ecy#neI9_JjGk^hPe&jXM@`s%l(e*nN7W@T(}La29D<`?TyD; z*=z#yb`Nb?NNtWSy#CQpfpl4OLfQ3$)hHU63_dhygSC6%R64nWewschs>#Sp#jp+Y zr>8GaA_g`{8j#Cff&(D*d zc8^JcPWG<|m9Mj6B4aslsr12)%CCNH_D^1w{YH(oU3?#4gZZb6ZDDaY6g57~q`DaQ zVJ%`??xct!%*#NxlH8{5+K_4P-{SXD$3OPe|LeKF(q`c_Kj@$GTA66OY|U$CU1&rg zWV`Up6`tB<*}d^<)_ynMAXCVi-h!7}Y&4;(U#4dY>U>@NpnmdnNdMP!ueP4&vAi!S z7IYZ<|GNOdl2nCsfnJn@1$GiAD*C9wTSUELq)xo!|9A?ykE(s5zY#+xW3AW7TnjNbwa4 z4V*(fT<`EMR$Mo)QRbjI(8{;c7g|*Bn$Z{5mLjuo&Jx8ikyCz3+o~P=gNw4$`DC*# zr{k8~>LdQ^GqQG(_Jb>=NN)-xnGgAsR2V)$qclezJ9cj%d7nZeVBhSg{0j7gkzBRZ zRNsyqmma%5oJ0Qe>ukBNI_7X#1FyzfqDzV_*5Enlvl1Oc5n$5@461D~1b_nlzdTi!A?$lpVXntYsS?ZE;3UO{|maOe{VowjxfigI%SCtDh z`{XcXq@()hVJ66g8f=YsU`1%(*%pj>RKcrWSU3|Zf(n)LYI50T1jC*yZMa2_v0%lP zmi3>S<9t51$eB4oMYmhKUP zCO^K)eKOa;>_pw+zem|F|7o>-K3f462C9qv?&?#Q-k5-Mwx&)I*Dk<(vfd?3^&ZN! zaQeckJQgu{NxVIh=Cr@$^r$8`%jJBugy1-Ehu-8xueP4mO%*MoSMK`J@j>M;{0y=A zpQ|~;0cZy@IDVL^)n;B~qqXryHcq%-JFGKSza@KY61jE$K9@@A!z9lS+Mh(m5bB8H zCHQ9EQt?uRPj(-e+IglaT;T;>G_mlW#E1g61IgR)2c6%;F1&~PGNn3#82DKkkw6(Q zI`edLSJ2sh-IgnC?#6^$eJtwQpwxkkbQ|-|%CALsBLPQN7S5?Kw!(fo^=o>_+~oqL z-p*6H@sjfAe^?YbGH73-9OCF(sc78f)9{Ti@++Z^<-5sk+qY%ld6imacpKMOq+0u% zCx${Rnsi26cCJFNoFO$*J=63bS{+ti%Ylp9%tE6%J*0$ODK~vzQk$z*cyFSB#X&GY ztSCaz`}C^xa+E>k?YO4Q)Bu!`$4E%BCaGfdH?P7Z+;+Z~zAZNwl6Ic(ye5|sw^XR0 zjwJK8={>7_BwZxgq5nYm)cBWGNyW3g&*rFb)*Ai?RRniq%4gi{rP&M(NP`h=%0IgR_<7#C@!tck9w5{}}A`qVHmF*_nI_<9?&JDPu$`_87>#2FK1GBT-`=l&vQWC7l@^ z)|ldvIwfpY1H3Q8h@|}z!1s4JcuRFk*3`FTyq26lo_{Yl8SQ>f0r_)rp6N8ceOb&_ znjGymUU}h}k1^4D_?`WJ<;LxR_bxr=;pnsZ_llaWY&QLx^@zuR)q0aIDYrW`~sMho?x7uy@p$gE5?=-m!pZQ3U@}KL7*a zq5rm@tn|1OdeXn&dE1OPI=sx+IeB*IE^F0z?DKbwULyc!Im5nT$9WOf67BXNQ`C{# z_U3=sj9DfO?OdE#wq`Bj9qNvRotB!_bieCkJ=i{rtlV-+&uAU-Lf)4xkgc{`p4$;4 zO=|(PIyV-?bvox}{%F*MpJ_Se#xUEUEZX>>bE$--B*N8V{i{Rxck|p^ODec$(V4-} zGn=CEcT-p|gij6`;CT2JT8cUA{!xz)wcQM{TkvL3l8)Ain2+0ifZ*+8vU=;BfjE)- zI?c}>=xY29KPX2B{B>vkyF7U)b8m<=VSaG*Hf`|kfCPS>ag7sn|pWMkcQ~MH2FSEYx-5)AsgguH1g^wm{@ijRrnPayh|vX0tfdME&rE^KeX^ zo%86P%7W|%7B z<#8F6h{rAt_bpRGb@jBxaWpoO@GA<@#jqDGW=ezS*QkGW(h_OkD@^GKVHvDj!cEM> z*!UD|X4WQsiHeg9I7SNbt86o7(5YA@Yt+&iP?3m$vO>AhxgY`JlWz{!?)42+lRjWc zL^-CbdwNh<2EARR8h{v`O!Qmq5f+Z1t8CpMAISlICgfgKoz$*6KTJ* zV7E75p4_sC!8jBX9qoxbBkE1u!z zsz3|j)*;DBsOFnHyf1c`J>n}E%tk$H5GtL_{>*NmEj`Iyto;74R@o)YoGG$Gzs@R@ z`_MU8m(zMi0m^c}U-6g+L6<5qa_%RNGkMw-?@p1KI(l3aQxbK%!wWGzh?mVbmiYo6 zPz7v4UfVVe&*>%;+jeYE=5k(&WTU(21r_nRj;r^=z7ZmPO<54Pmg+`*g;#E8$zA)& zI>Bs6QR0RVzDzj};}x9yxpr|zm?~&lXfVgCOU%PZf2{5szgJFT5?Db2N_&V*4Sf0+ z*$ICKU}!uz^<|3+iY>sm%;u~J=_HnWH91b}@0_&wan)K;ri9-DmbBxYoh-J)UdgP~ zpj^qM{LGZ6TPxHAURA>^*L`XA0%CVEy{qje%<{H@TSXFRZ_Wb{1af`ebTO`NHP$|E ztJvEv!*8iL8{}bl#4)Xu$|~`z$lNRgBhPcjhWgCuDK@<{(9JXXd}Pgf>EcsIS3E1^ zA=QE-9Hh8nfjS*K+s?Nm#lle>5UTDg`7$IIN-5)F&O743sxH*bf8!`}Zlr}VJ68pe zq_XMTGQCZenF3exwGiZm7na;$llb+g#x;%X5q=w=X8ZL!g_;t4-4B!BPE_<{in7XR z*F0ZE8Y*wZ0yPQN;obpPY4$zwG7lw06C8~??yeL}8Bmk=fBti^moCOsZmMXUVz-!p z9?v%k;qsPd5%}}QxiGN8XgbvawKRn$^gw{KdGd(`>Y)Zqk_=wxoneXVQlkz3y8YkS z>8}t4vM>b{$CGbUg!HuD4F53Fv;hxPkGnJ3m6`d)N2d~sufiutkTRqke*NxD$ito* zdCvK}U(CmW%F9JmJ=HcGinI^|CYGhRVV^}cr>)$}BvX8;@%mSGJ_{BsnT)D^L6}Tb z8Y2~VGVq>1lk2V&GEXtsw16j`=kM^8b1-)95g*xJwVXR;Trg>k9-&IOt`!$7f%PUAdI^SWDf=+P=)=Wp6W{!vg*%1sS z2pjyQXXaN7VYrz5f;rr3ydD*JYNrl`(V1Pz6KIGVbFX@_rY%zj1qIHqYlIyvB~4d< zZvV?7ifG`ZR5lurc)u*F&SH4M$ojlq%)mxg*y5L+TAo~psh5H+AJsw#QD(*D;1Zh| zPfvM$)>V?J{YDlqE&PtzBG4_I-g`Z-+-5@8nY`+I{;wP1`G@=gpEPP|Qq zDww{v=h}6B43Qo^aL(FHc0oS`%_RLJy4vVq=P5S*n z3YRyX@rBj>eBUj98LWnLq+Gi1A2e`NTNwHbhW91wANR=A6Y~KH+O`H`6L3_jk97gu z^|cyWZ94qusVZ5p-Kfa_JwXIm*W7b=j#a~w0`Io|pV3mKQGYd9uB)q8=UJ_GpB{8AhZW@79r?am8iZY zw(t>Et9cTESor?OFa``#W!WM2v~s7g=l=4_6RnszT2%LPNrkerEgr*L0x>#o_}Yu> z@L0^4-ne?i4~3D(YknqBgC@6VXd$gB(sS*(~} zvtUQtuS`=@Tw{=2fSVS-5yIFaeLzF)0E;d<$so$YbxY2g?|JnlHt~j-Xp)a6%`d&N z5sI4k%ZEdm>%u$Jb`{2_js!xp>)I8Im*!;a1l|=0eHe)=7G*ns;1%;qk6Z$jouGuX z_=}i$StSH#&s@%SEV5_qj9pqHh4cS@68O?hSh@6$cwTXNsrj9rbDpxMIjHU<8qq?tg zb%P3}CKB*5#`2& z)+9$ke&P@rf*a?aV=8w?JTG254zUT^ixDyW%ya~rLQZHq-!d^1lltgl!peFriW22^9$e%I8>fS7-{GLLUEH+Et@&bZ4UbZIlvo zdqKP8r3Z5N)YH|%jV+WNyZ_FOo}jz3lF*rpHVq01*C8AWO1z=_U2mHdJvf%FM9l7~ zFrfe){M08WCX7imm@6>`_cxcl$ z9Wcw}q-TF1Cy-4T@^k|Eg1)tsu$5}T5}M4;DCX+^c?qA3blE6e*G`mxk-~{4tJ8%O}Z6Hes6bS$C2ze`Z8CQ6QJa@R3W+CZtKOxS6ff! z+hY=(kL>C_ZP#9(LM{E{VEK30;ORtDyP8`s>=aF+!N8SVs<;#2G^mDI-A$sr|97}T zoH!K9-WEWqrv`xTpuH%@Q%IZ!)xnZW3lb_YR>kJo;Izm5vyz-iY6*Y4w_1k5W$0Jr z)=gGc!#T0B3{~_Xf*j}WhCv~x!;c`&!D6h?vr@f{msii_A6-mrlwufv>*ISdFa|yn z-&se)Qnn)K08NMJKM>7d8Gw1>5_z7wsE7d4O!*N71$B?XU}@H>z;BBQ;8@7VnrE=^ z<0PQRm{4}LPy;2#8QNt3{sZJGZ;>xvblf!61i4#mL_<3gk6r9FL>tomXtlBQTyg{F zBab~0MHX0Mlt1sI_@_xY=e$e<+oy;Udv z3>)nAJ!m@&P4Na-2L&=s0fPG-40%Xn7VegYdEb6xQBIC3MxLR%@pTHcJI(-Vd3X?u z?ZcK%`KJB;pk>{V{C=G)fy)jua-}@{ar_DBaUW5)%SS6#FCZY*kI~MVVhJ?pcZfeO zXRDoJ8w=D*6Rlan+T#6Nr2aCpD4`t;@y+J&&ILLg5pypk9#@foFt+c5ONYCEPOD}0 zk_*bpce7Mm;*{wc*;&lC{)QDYz9YK6y)KF{xB*GWmS-`TWmjTB7c6ksmVVF!S^qGM zYSy21gOpMNm_qd7bY|=O^*lx&x<8S^twBwt;d#jnuFRWMD{XY*cO#mZQbMRdOI|pK zQ;5U~eaFF9I`Li-7js;(-8mnMv~Tm8>bG9j2jhiNE41Ff)DTlR66PkZtSJSdf3y%Y??StjFh`cy(*X*oFu4(^ zpWu!EJdQOaP@(a%em1wOr>GEEs2K!yY=V=%w076{=Vca$!36UwKBScmwAVw`91ghs zLVCnCayiC@^cXkP7;0*93hKOWh&)DZ=n{^^g|>2({8sy>m&u@{_pnJ-S`sQkuclrl zTD!X9`F}oOfIrelzvnXgAXk0fM*_C?SiDA#{N!_zP~piuv{S8Of?uj_F6HKD zI<%hc87(T)J2wz&_WX=|QeB2VFr@z;*W=KMR5op6XHbf4Ms^UdF*C>G~zT>uYHci)oS|7iU7R zrOb=mJ=>G&{>=n%*pe$8-^Gom8d$}NrdC^VOu!T*!{F}a@zD{~>Xu1ftE!l5>7Z{w z-+i?LT~Ok%0B3Q3NTAT5;dC?baah}@%Saoo-nT$-Hg^dKmiD6Tz5M%jV)6lOEr6yI zEGzIAYuT1lnB}(JTkl*r1?E2zgkp7S1Q~d+Y`)?dvvaE8D5Pi-_J7By@HtR%*f*Tv zTv}F3dw01YJ)(dG{F9?5aUi)n*FtVkttj->s#)Ori1A|l`|QPdt%sTuH3xsUR-A|? zlE$p`heyT%3|e0aqPHEz%DIO-?EbbAN_gINT91)ukfd+5w0F4TPN5aQ$4Dz?rhV=I z=EEGXUm~2F5IT@&RerTw)xCGOyxJ%BglT1lp5iiSJ;3QNz`str9}^wRG55)V5=(!L zjhu3;2}Q=GOwK5pZeK&E)_BVcko4C3#|Mtnk5Up4ns@?0#FY_Bf{{Iy6-jQ8ed#| zM96&=W=O$--hINrBuCa9d#lbEjU%~Af--4PV+ZlVLJzh9-)}D*D@dUNy+drneH8Z| zC}10WYd`v4%&FyaJV8rKIGXM!t9E9XYmJ*+SH|Eep}q{@1$&aHy%@xCM$^^Y#inbf z0`waS6V7}vYXT!f&MWvoAaH{^bsUgJSh{+Ng!-pauJ|K;zsjdvhwcEd$8y`^wz&R& z77K|3$`IYb$R>v*pVVrsnO3#+X<6XXBX9cfUlwH!D~=h3fs1|aXwZWT>e$S#7*C23 zvj3DM^9E{TU5_RLA^1{VG&IIpq)D$Z%1Z_Ku%!=Iur)*4d9V0Lx)XM6%Cy z&-7@&JCpkc3&1jKSTbF>fBKJc}=IM2B=o8~dDS{_BQEH{C93 zDBh7FE?&8zUx&3=u#3EHipO9f28cS#pz@;Wk%r??*Bh?)K`bLDTfc-KL1Z-g%iIN+fHbZJ zbL%A{&_nIBES_}5=qLG7g`1JS&VX|DAGv*4$e8Yp_%Pm5@9AgPB>B{5S4&vIm)xQr=s6q@5e6slA_Fw-&8%F1P6cI7}h zz)UPdw_j%Hr_z5cSvZL>nPYwV*K}vMH9@V-pZelg85Pt6xk4(2eDAy*)62Bb8XLYq zCnKpNI?9A##DC$FfJy9kOvdwubo6jVKEv}Kv@KHi7;)Sf%ay)V!(CO zU7sl;YNed7X_aa?^J{#zb(JEp~<)_GO1v5{t8NVZIeJxkGBZMf< zC5#st2+?zp(dpxX0(K-J3Q({r9gd!s?c3YyT?2Il*lO!?T+DXjq0%4%CYac(Mk!*P z-LxgeuIDB^fF%X=FZZW2!!Uqf7s>GGbsHA%0bKjc`Rj(i8I$>G?;4b($-_hAH{Zkqg=D{MR%6 z{L5bgy6GLu7X;;W$9(CmhRbRsZ`xwgn6>|AjO?Ar0a%n0G0+5U5kNJkV8z8&)U_;m zd63$~Z@f9$*)7B>OAj6)t?qLYN7Y8Psq&N-eof^b-lbOBjAi3n|*v(oEis;HwyVWNGH^(B@ z6ETpdxBDYvhP<44zRiSbEyTHfdNq1+&-sQ*{5jF(;~8IHw>>WQ&*u-_ob%oD; znL5aKcd4DD+&Ia2Cx!kC=x4&@T4{woi9#AC5l$dmFu*vF4bZk}fdWD5Im%=;jJr&? zkY~V|Bv3O5Xv#6f^er&SesWYU|G$l03}ahE<71&UoR^H=0X-RVBhgMh#=OCi zn9rweMBcD#w1$4B13Q*tv#$G_hcI!U4&FZnuS1<8;3%={xknu=)+1p)@QBzH&wDCS z+#*c!IrMzhiZVck-2c=fYpC6gE!h(FN5<>@b2{5JQp1vo;;wMp!jP^krbBEBQ#!UJ zx8Fmk_rCT_wQF<{_!OTo?%}@Q|bznaIF&gTsD8|?rI{w zjGEnmsC#!%T64DgKva{34&u|4eu^EjM896;8$R*)b~6x4>ohF<)Kh}#T94iMGoK^+ zJ{Jr65wmUIM-6uVSTTbx9j^D1A6|80<+lTh@2d;yxix`HeP@|fdL|eqQ?!^XbvFn* z;IH14O*iVjVC1MgovqEW_BW-MnGBQzQ z?4)%s2jeLO0FTkmm;oNr`+B2o*oN_=uztXznP1PKh!y@01o?=lnC%Ob}rAFtN`jXtkR8aNjN83ej0dYRQ z(`Nm49XrWi(T-H<6Uy=Syz9f`tis7Y1Xpp*NrpH5F?%|e)7RU@Rob%EHIYHPNz8c9 z0hB51X<9u%a-eX%ZS~Y!5gb&Ch2+zP;J%S`*lH-2SF!o_?1mbX5Y~{RXi!gVX#Gd( z%I}Y3t6kT@&ldgOv+^P#%QsQ_{P3pMfwKa{9j9lnX&smHs?Pm~_7tc1sIc?bSwTfq zY9ssZ_*$s#*kYOeJ*>1ct{P2aukIG}=hk^ImTFE1p&2fjfk>cqc0A>xC3D}* z=`Uj$Wr>`VpX7c%2hNLXzXz~8%V;9i)$%n>+r2;7g}9LViiVK5?oVcSSG`pc(Za53 z3Q3h4I;RpE10S~;S9?i{h_QLQWI%V$6m3n3wjF6gDWIHa|B3)3{+ZjOdP{dacvH!4 z`(Uhhb-s^rSyQTLXhxQrn4y?&!t3d}o3$C!f*MIMyIA?(sV7ki95+Q>TC9!U#Y< znaonfSA^-?&;7Q)Pv{yilJUXPvYj<|JlTqNggQm?k3@RE3^`?*YA3w4im0`PF%Xsn z_$evW^0eRK`Q%ljW458>w5(av(CZmPSzYrs!|Q~%L*Mn(W1=PTSch(;{w>~+tr?9u z_F~IpY*CHZYQdb{dwIyGp{^wV6`w(#ot9fvEP8#an)(b**E?fNoCVLV_l$gnr~EsK zz27;EXZEV*#JPXa6587~=Q$cKN$)crByvD#NMNJX4!Nu`f4kH=c*af1n6QV30OOjSq3qMe$UYi&b&blih-nO6y6 zr-^=^cVs)|6vt(%naTDoZJ_+Jo%-dAe9vX<`+LXC1cG&zL5F8EBUx#5vH=#JXqv9Bv$}EI+9Ra*WglZKXxmzR^UAIo0dFLA zfR%jOUtH4m%(F!2a7m1EB*bC9HucU6c?aprkSBnwiSyAOZQiEK&I@V|6a%W7VQ_1k zdQoZix@mXu<}jIt6h-RpCnrtM(GRM$-maMdwR*d6}c`>~v` ztF!zX=aP}*C_1*{5xl@h;BHt79UqIU4%__SW^t?M)k zei@|?O}{;FfG(f%+F5rv_uEcGr9GRPI^GpMz=P!+eD`(Gvnft7tzmTLb*<9dl+r6G z@HPJ(tU_xg7>yaMUBi+hoi;d?4pZW+!z`ui^?X1B{VX&JV`Yfkt9wTkWi$^O5-+GS`8lMbDs!(Z%oE-PBy=xH$*>1q5S z{UG%Cb=Pslrdk8_a48_k+Kr9fH|8`2dc%oopMN~Gjo3}iwx^y>NBWo8xG1g7eu0b) z(C{xWi1QG-)!~g5+h-`-t(eg9#EPJT7JP!*K;1MuLdM3dmQ!bFMpa%NQwZXimJ|UR zbXkD`zfR9M-ZH^l+b+x9X#T{`yU3~l6WEsGCb7~BXR2nXbE6~pcs5hiUo_k#2F?blRk(~Kx-$0qmNp!2jW>~DouseI&cg{F8D&b+uOan z`&w>11cR?-$ajG=ql@KOZu~{Qx0)n-S~1Hw@*?VV@GZZ5{Tvz3bOq##T-$lQe7fG7 zd#oMwk)`fZ0HmQ$du{`fU$aG>D!S}|>^=o?9(kM>|O|!@nT!Oo6&;)mP_XUD%f;$9vmk=xj3GVLh8Z5!x-Q8Wj;d#$>ogcuk zyZ1~_b#+zs4#$A~P_`z~kj%IebrbU>f;m!NBz%f(1u++VF$^+`VWJZ|pXaLn zhw&|2^6@aBdjClcZ+c@AE1l*&h!CNhe@R z#{Zc~x2D&6gVKxq_ni;v>@xSQxV& z9P&!3f^>GR?cgRVNzsMN}jQ+0}YsM*VV( zI5b6iQN5=Wn-k`P^+9&9dXGojG-YRz5>tJV{NPPoo)ly0w`lXU!@GI8bNE3*rPVca zhsazK&~i)Gq|sMssBd@OEaHmnuzI^v6U5Q7A7M~Ax=Hxzt3&KpGFbMCb6tJ(Mzf( zcODhejWCDFaa;ntGFU*A*A2PV=rVP+BflE0;@ufkv>+w;Kju8^ z=$BN+xJjogtMfFxZ}0Yh7WY#c63#?zf3*aJrWdkPyMylBGyHWU?1wg6p+Ie-M@Uhv z-=norw*A@BS}(EU?%P)C^9uW0@RH_!_4X!@1cXNKj<^O1$KfmeyP&wSHkpO4qF9!j zGq(5h;cV*o0SHu7?x|2ZnGS)Nj=vSqc#s{TOYN!JBk=C7#|!VxRT9 zd6=GXJ>@6YY7>69ujXL8}3|hmVIKP$2v~*biXL{Wl9VB0Fc@N=dGlu2aKi}21{^$TJJ(NyYyWK1@}c>7U(e(| zwqIZA9&X5cjgHjN1ORU>ugt5={n0|t7uAyt4y}`PuLc2^uiRFPyz}Z>@nP*3sRS}2 z-hR_`b5Wg&i#T@}lV8a}(GSnoqPqCzElDi0gJ+!jh=DlEO=DG+dKrE9dcyr26X9^)SI5Hj3%9pyhp&(NC zq?Dkiq_3(?f6eStj5WxW#=ciYNY7YlNa&^hJYDBgsg3&1MYhDYPrwk?33r)2>N47B z4YSqmbF@`n<}eN737j$+D@=KS{p~_wRtoNL%&SMcOuq1A>X3nKsR3 z{wt!DSbn?=8v;-N9?Yn|w?@=EiE1ztA02PD=_B+#lR$^XAc|zbt`a*|z5fA01VQkx zbui^SYPD*tmDVF6}rm1^y<6J z*9G#)ZtlJfJ&l5hZzx5-Er~;RjK+l`1oRmWvhPv1TGC22X59>*?4kV+%o7nvyZ zgOH?0zB@nOFIn;sR;FRm&^f~r<%%MIQI*^Nrv$~EQq1XO8r0B((GL8v1kVCC0%llo zRJGS`yHo$Kgw1bQP#c`!+*E&z((U`S%RKs_)Y1(|>LW#6AT< z%%dRre}D486f@VsccIJ>;V5w8n^rdaKjM@dkvhsKC@R0kiGhV({--X zUoq0;LAqpnuClj}==Sb8-`*34UbcU(9-ab%!P`y| zEX>h3=IR-ZBLN>m-nvCjdEZ>UFx1qMv`8(0E zl|R;*hJ8qKBdK^mWH(CC{xYauVLnS_dQPP(7;w7X-!JsBfNb8qP*(ghL!6W|L;F|R zEwPgf99i9kKQ8`)0H~Kw)qQ2_i~jB1Eis&~x6oWLUXmi!7uLUHhHXshyDTOiG~!e~ zufcJAvYvU+1sW>T$5{sbPxq|V*28d82q)9^@gn&8H*>B-s}0iL%z>gCCl# z`n(SLHBAC&;4F)?Xf(R?%di8|^mZv3xzj$`>hRo7bu$K%biWyM5y>%hF_c6K+~hFn z8E${S*LOJYjA5%qi&FhoQh;lF2Cw8WfgNQ13rB`L62V9` zGV1+}ix7^1OA*5HBz3NnQ?aR7bgx=w8p&u9NjI$O(fI0DYE;`84Z@TL#C98V1w+3- z`X{M3Pd)V(PadF`9v^P?{|i(*Y}Gno7k)4nhezr{`dLA(B5zcRgNQ{#T*fvewXWWD zdspOv)xAJ_|1$_=-{Er-*YuJuiTQcJ*Pd#{+@1?{CSNh~Xk)VPz-Rg73WMcmOf$9;l{>NjR^l3ubYiykjmqhZPhMUJA1VCdcDtFU>c>$ySdIiQTaUdHRQh_ZR<=O-{22{X^U~ zB9Ik?4(gQ!sIXoHl;xNjE_y<1wsNYMUtCgGTI-*`z5$Mv&l3w^X0O8uAtz=Bm|h#| z7!WAH<4Mcsyr^`+L*+wF%^&S5sp1m7!R=u2JkK$zI3wdDu_**2n2)Zjj*}|`BjVYl zis)DkuPZm&1P}|f_F1#maZ>AwGr_$J!Jx`nzHKcwase;7TW<^6(5;{DJR z9u0-{6s)IlXT$Jsm7iMqWm}t~`M5^H4@1CvVh_W)DFsl~sDhSy=X(yfI?O5@jWG5pn`-0XXG&6^w1x7T^SUq>Qz%^)h!`1T_S!{)UF`TKcT)W<6CV2!~%HibC zYxq1g8vIMjF#8HnLbh!3!5v$%#Ym$ao83Nwk>n`Cko;$v6FUSB0}O0?;XmHkT}4yc zAGHMgFGb-N(Gu1oh*R~f@IJxTbuGY8iN^G7R6F~u>)g&8Qj`)`G^*Taa>ef^J}~(h zp^+&0F-j1Pi} z1yk7NA^C6MeL?PF@BACRvMtQjrZGw=1qVr5`l^(3+n2yxDq^dRR_5v(1gxHc;Rf%z zuWxEPp6$VTAob6tMzF-Bw|&?SAJ_gT{MjS!D~TT@@thAKx05~~}1rjB6&1z7Yi^@aKU2htTl%Ln$J8;BzRUYF?FQw!rRZ8>f&5V{)+ z`m#HJ?rtyJA>p#si6B2+r1w6ka>%6k2424t>ut)x`JVfe3@P(7(%*yKpfx8u zOX*sIy~J1!xlu9l_7i3T)gNhE;%bqe2#-6nT#b@FuRJVk4onMrJQA+G0_9#-{tbj$ zLXUIH+onI+5G+tyJdQ^7`ulmqyhlC`cwl;Y+(!S4V)Z5Oy%4}&9o~}td^BT8SQ#?9 zf`Sr(l9dqED1-MFJ9E~~i2vxlJjvV(ji)U7cG(CQrpik~Hsqagra^hA8NQ??@fyzd ze&_t0q&?4%qAo-@K_St98)g?#hd>~;Beu;B)?;~s-e+a`*C7)W!rJJM zCo}gM<77xW4!uf8k#bbnzSdd4)crVF)<$}~!8W(;VNNT(;-pU&W0*U~2w@!2BxK*W zxJ*>VKzqm7?QIDOIkn({8+}mi$gsIzdT8g?Gs$?hzjNOd3O+>$(qx(tX!OXM7v`Nc zODJX7(jwM1Q*(zDct=4)2>h1=bkO!bW)0OipUdjQDM;Uu0jfSVOnaQ578Jq{%vwyb zy|3!jM4nDD#5tJtUO`s6gJI!0G_%(Oxx{q*I26$41Z+*+9~hp7?F0M8X@3T7@}Qv; zv^3QS;v>}y^WIU6*OHxCplHS9`fNnVxY`@MbFpRQUR9ww*3~CgT@6SJeFVsJ#9FI# zs=#I!w)>!6yCvCma@h`)W*Li&C91#7C-e5a4k^F*mq)9P>)QTVetT&~$AABLddNOf zAsYBf^K!NXZaK|7pAM3>I4}41872v+=sImRyd}QhdXm~7Yi)X>e=UDUcYij3%fQ

    zn&65!3?a$TyS%QtKAGYQWL8~-O<(UFE>nvZ(J#q(? zn%GbCJ>b2aeDXMoEa*$8O19R;D|f>d*8$3Nsu32uY&lg65N1f2-<|7sk>SePd!G+Q zYLA~Xl&ZI4Z@Ei-XT(nGdp)?Z+6txVh&9C{Ev5sBL-pR`F6c6!NzMkW!r@*;(gd1N zMq=_r`;Vdg3o(-HxHaD`kALkq#OV?uBXJ8QrziTJuX~}rMg&O7a;euoAe^r3VEHU= z?fg4yC;6iWH;b0}`>LSGNzGsg>?HT>dYuI)ijQ912_LrDbiS5W<*C{higl4xoKXeP z9{|jy`{r+Br3EI;KSm5WBy_!Nc&S8H3$S@n5&Uo)Dpeh{%NE2XO-wVpyfn)de+b` zP3M&GEAiORVlo?nzU|landDM-{9LY;nrH@v5c$C;ogD4Wmj5cDKz2lj}`2)*^Mwi!2C#<}W0eV=6r zRv7O2Mo4=@{gjfhjP2y+szqk7G-7bAMktP3E@Q zMMW!!NlX7=gffkH0$emK)h{`8qX8Z@)AEYaE>>D94holeI#^Osbi5r2#v=n&fUdUd zK>LT0XyfW8I&xeyn7|E1Xp@TSu@+fPe~A$u0VpgqMIqZkw)*~=)T`kZob%kzP8fkP!2fKh z{iObR>H8I7;b}b)k7qN}mS?>*QvWR$4D*Clmze0KsW?>MMq~;b@;`Q*APQ`0-L-BL zR{L|0zYim}!a5%uCmKQnD$_7l<3ujr%3zHid8b5CY5IMtwbg7rS2P=TPnq$NV1pc4 zhKc$`)DOW3BH9R{sh9?SDY%4xOr<>Yx&!i2D%G2rI>@cB zNuK*};dB@JnI^Z4XgU=2vCXcpfsG7@Lgsm|_S2iD(9(EB0gUUORZ z1vo8yDtr@1&EU~>V{2EfTx-G>Fv#g*bbg>iqE!P!dNbbn`Na?w7w+Ku@HLBhN~g01 zP}O$ZnvUPNyiYY7V2yYZI|JCZcvNMDEDY<=KexaFdp{&TK7wnvq@ob7Zu&DUQ&|=m z%O5Fw;4{c-KwiRO&sL#{?`mhO+sT%RPr4ahu&#c{yJCApMRpJMz z1n#1MkXwps^)gzmXR+nj^sjY|;QS@ExAJYGM6)Bk~-k7L=tRW`~nV^ZIU{4s% z9@Kb3y2|CEVC^IgImraA#wr+Jqs8;5ABU*^?`hbGU={XH5BUvZz7e_?h8h}T)RZ3! z2DXlU8LCcG$_7*wBj-b|k5O5p2XsGN#(Z1k{nQtmQNQ6w5|%AyUn8yKt?ILW4=_)Q zI|hWE98n^ENqV#3?U(C|TgeuQWYcMz+7-=NjznN<8Uq}V9p6twB3$^c%ht9)tV*eM zsV@?~B5suMK%DVMC<_v)sXsa!$ndSBuOTbq+C#QhITL=HzCIw*Vf&87S+OxaX6KM* z(q`(yFZN8o6BlWEt%y*J>-epLuPS7C9KFbxEw!n$IqfuGMpAH0D8e-Ljqz-bL0jJx z1i3@%`8<14KnUjkEo|lT&Rjf=BOaV-(CQn%D|sT*U;I8k4cA%&A!L;ormX9k0jzpy z{(~V&*wcsz>=qqQUb>5vM?L28-)Rd#YftVSB&$2-@3f}!dX}#kYEBn5LrsLA3VULR z!|*v&BX_&RB)n7z};)0B@Jr}6AtcN@=lzk}hbwelUI!67#T_QjU2$B_9CYrjY zNbSkW6ZOwvlXt}E%%)qH?=tu<71DPtoxTi?2P1Utm9W%* zao|b~$h03&@Pk3l(_nskq|OU>3NuV*JQ(D!Vve&IFA(^h&} z2Iu++Q{O}RLf*DMzYWyMmmE8}x5-YDsnxGeqf&B(H_T)iE#xQnuAZf)qEN7CQ6G#9 zm;Z=^#F%KYyJ#mK*y@nSUCgOkP(G5KpY@7kxYJ9&p4;`2A(wRhBYlke81cnPm!A`0 zjQX%tYzF3&|5g&EuoYJDns)6d_Xs7x{|WJ9;cLzd?9y>wRTh2;d@@~R`t}+^JM3fU zdPu^NmE8UAgrP{+30fvY(V+ih-I?4bBZ}fS(8{_AGFcp~>ztV4+MMPhbrUZ3w6o4V z+PhgU-njegBZMf|<5A#0i>@)|t~yTG!ymHVNIEJMay0>oK;)cnBY_0PTBrR5o#sQs zQ6I-el`Yi|iFGX_`8($GR->Q*#d3hsjFn3x)WqW8%tI>;%)Q1a)VATJOBA?nWvW#e zHF_60eH$sv4;&^@>-pb@u4Wr;6lE&89jYlH(@_CqlnRNTJO|Ku0W{$fIs_>(*!$*BC zbSo`iyv<6yk2uoerO5j!VbfpPLT{SyP$67exS$ubOH|<1v4CZN`<~@l#eiXjPUnEi z`nN0@--h7EmxVa_OXvjRWmBm_Ec!=#P8=YecJX!}5d5HPFPB%G+OD$7C)UN9p)+%* zP!5Y19iPX8WIBnU?{+CHIi9Y&Q6peQ5wNr*CL~2#>@WK~4gnRjxCk7Cu0!5N8jo6h zTW9*=0ST2p2;2N1lR$ABCX%lGb`7T-Dp*8^4O#l365utP-#N%h2^kD!kK5JVd}zRx zCtoX;^{)I#v5i^UqovF1-2pjA-7c|e_#}SD2_y-zB|LyGeK3D;-2(2oKW~GO&)z#R z#C!%TXQ?`Z+ac}4M7FyLwUm|F)@=mQ>J#oslN8MbN}Z|iPPS@7;;+oKyqF}dxKhG7 zqIy6jRqpg@BcnDz_csEX5>|e`PPJ;X3)aH#3j8kooElKcpWk?V&dR6jpTC6kh5u<) z6y5Dp@C=uI$4q8$qqOJr)c*P4O|kBPy&C;|zlzf-nBXab(!I$zMS;LaM-I$9Kb+0k z9_p>&e_93hL)aX^e=mpOQ+K7mDFlZc__tyt1u6-AS5F2Xc^dvrsN-l;lg{MT-F`v7%b=a?v+vJ^^vsDOG* z-SdJ~U1muu?t&olQWhmQT+gN#BkUwg`TKdp^B?_Rc>ca=U4T8%j%VWTTgQpH+SeHK z?t8h0W1R6%uNHp4pG}d6jxz{d+k9D9{|k<3MdU&1Zjs0Rw>q1axcIWZII%mx_gly_ zGWDaN7457-tGd)Ku(I?N8E5ozxbEU9`^T|E*`~#MN}#4uk_3s6YiSp(7H6+zlo?(C z#NY<#O4Pn!uJgFrQu|}mWhQ^To<(-$c^h$w@6i#nH0{mK`UT%e_M4}fgv9gX!QCD@ z(Z}YiR7ZRN)l4K5bz3)OtTRUXWwif2wYj3GzFymFr*_dgFyX#Qc_;aH;Q2?d#|U{H(<3@T{7 z+8Np8<`KS3$Yjvsu#eNM9jR+?hgV0?w+bc6rNv3vxWfIU5Tg_Jct4VLV3fsoj7;)9 zV;h^56nTl!ix8NX>qX0~9YC9^{Vbp!kY!)nX3m5w7tlFk5I|4=*bmTbEE8c5K|IN4 zjY3jO|5&Nt2+GkmgvEV#i5|6O{rZ?S>J5jSxbNt#NkwRjM8hxq=t?G3K3LA6eayY# z;l{vb)kn#_1EmzP1a@W_=|xr>oVlOY_c$3f4{#v%2sLeUY|3CS=3h~rGa(n+#p z3C=BELS*@rukt!DVYG-f&19wbb;39kR&{`S+-{4DN3m{=e#Bf{=t$CG4!Q6)+XJ#A&9U=^#exJcqhclI*6u?J232Lw-Gg>4*@7$_DcRuD z_GZ@L8kKAynUPr-!VKofP;b_FF?p`3el{)=_GN|H>9X^X8q~s)IQ1uL`8(DXKhe58 z=pn|oFy0hUUtx0zR6IWrrYyM#MtSUC!V-QR_fOk;S_0$G8Hhr1iK3rs)!_*~4w+$U zI{=ZDl&re7BW2^%Rbo-_#H3CSG2$LMMv`@M$#`Q1c*z@^ICy^pB~ND~5G*QsZh@jx zBoF1&P0tanbi_I3>BbR^JpGO;O1#=ipeK*a0*U6)ofhT7`&PY6Kv!MTxtg-qlexJt zJAw%?biZCLdAFmWe{I%h_H8_H_h<)MU|XI&Z&m)0H(KkVI*McT z%P#_cy#Km7CVwc=xdVl|Yi)H3&(YLhnup#C>$b2D$p*FyTaQak`jdZdlFg!#pmDxb zG`eM4D3dvbVUv7V_v+7Yhc6LYsug!LPjA?O;8a-e;{}}i8H+H{{L#jgVS>gI&MW6X;8?;qwf`XtPr5{Y5ov!w_KVtVUnLw4dui&?b&^X z{p_({;*-cG%=Xf$^_@CNPCVrWmy0qG$+PTL>F>|{g+-+v+rC?A&tZ3q za*i!(81H^-uvkUgUQ35aP4$O9Dqo?SIhNaL&USRIM+yy{+4qPm{oBcTG;X2+WxQU% zY)Z>efCtawK@$;v$67EnKT(wk#@%S zz7+=Bkl+~rqTxmCFJWsqVrRD=3JDf9!oS9SpHgIf!+4_g$lbg^p9F znPo-9Npo5Sa9dEvn8QE^R61i3-zx6gAR{dw|F!$;cSJbv`3GNCzJXC%Cj)EQCcUEy zBo}4Nt@z{U(^M-Z8?Kh8-5y7y=ow?fH&p91D;Ek$*nlq-Mt#epKIAxb)zl5%Q7Q;7 zRQ<=)F%O_A0Y1$j|0y~HZWz!fv1FoO-6QRuZp$kJt9dnC_!?7r(wkrjIl1ScxOoBH z=gezfk`eGVK2E@5Mx$E!>+z~_R=hBA0=bN5?Rmj1|2s9JnRTKa4VTM3U{;-Cv-$ax zYS1M24AAh9`%BvXTq&+8gS0C`LAU7C=Kn${ zAUZ?z0&?x#A;Y%3>eJnPSE)KQFC#e5TSkZIc_tARd3K9R>l}m4z@a+;VP}COGk0p~ z5{{Oh9|-bQzXus@^_Mc~*`cwN*6oG2<~37I>J$BlJ-B3H3;t=Tye@w@{R4kzyhE=^ z>HCcJ#{dVC!7aA})nv{)O`f&@_seE+&)mBAy>y54zyi^==es#NDTH*r zl<+T1bzSS9Vct$D49ovr{lMb{9pVI-VoC-Zw!TIy zvi=rsF=VWVXLQ~f&v;joQ0aXDPoi`{eSDNyp!fG*g|7`_m-KU$PtqetT13Q(zW;zv#%KX(H76lVOy-FFuHfOE zTT)>fu#8N7_E$tYXUsmo-jnlDW;WF->8lgS7w;ge2f>PqI=6RkP;kVkY z2uwyT4Ld45SMh81$cFU~&=acGvciRgix#G@4efqgJOm8>)}E}c|WuGo6))Vc)RoC9M~YVJ=e0?3oHpt6b?H5MmQ7UFd(NC@ zT_}3>UyQkjj?f*5`%#J>$&eiT1swWR=&emONk;l#QjZcKAmvw;owT|6MRa36o)r?Y z2AHJa$tqVx)Q%~ALQ_= z&=BCsAH@h_uY-7mk~+YSA`K0lE#RPw*|rE7C`>JQnLK}$EczCLoCxCRMYFN z4fG#H*lKi3zSkz#e8pY57>&VQYzJBE@R%~XFyns`CS=nl&)RFVaH@*@Q`^&tt}xWwHHQi|t@JnE^nFSmxyOuVKP7PBq3zHkH0 zs=6lUp+R(niJ-H)itnDvZg;={Y)wLvB_Qs=kEfp-lZf7TsS{RnEflgBeycas0l*qC=w|;W~3I-t;Hj*;n-MUX~ zIUH^6Vrgn&IMauIh<8ZO3sQ>IL=SU9bj2=%1+CI+Np1cM9`v+lVk{_*OGO@~& z#?hfKJC|PQ+OTxUwjuTCX0`e5e3@JUXuIo2xJ6|@SvZcN9)9dO?n^=7DaFJxPmllV z;abnf2VJhg`1#4gBU#$&Z#IreI}c)bbM1Ju!QN|tV#Wg%Y$y>9kLqUhjQjf6$ZY?9 zjBRk;(fR{o4vD^u3~pIvvp^v-=wVOmC}Mg125VLlzTw+9{{McnWFQm{T^lMcLtB3J zT7{jr#3)!#ohTTq&EbB;+vl~)2wf&$UT)-Y#30s>$X{O9nZ4yEO>j$#rt~=2+cWtA z&{PLIe=gRTArD-`i9)xi?Jr0ca+bAFcpNP~V?MJKTI#Efx9nelleilfGx~R#qVNdw zGnkz=DWT^B%*)K-t@JK8&d5FNq2&QJ=PlZj-7B$W)uKV->3{d%^4tCed&w#>0Xsv7 z?&lr>r|GJ{KlJUAkJt0LCYFAJkI{C7`!o%ukCtR$2*gD=yZ4in@fjkf%0PHdRsq8~ zIp=u+l_7hb7%MQFLJ~R8#&%QnRf^(TSzJ8%7#ROcnnA(@Vs19FA(*)q2 z+EM-2;|t81=12}C%bRULzXS}Q2k2q;kLW(DokYa?6?W6#IPssi4_MX{$#o)5SG)@TtdGk?KXo!UDv)Io5-TUkZHnDxwN2N0fnZfx#N4x zN=o47CH&vdMOyEOn!P6u7hpnN(o3O)b-nC{FVyj^-Qsrr1?y1oC@>8LU0N~6FSQGc z^^~lX?~S*J@uP@|^65Tcvj6=kO4uM@TojFkQAbFbj}23B{96n3|Ef7xjORo&l4I#wE~CAQHi(V zYhSepI2M3^W(?zxkK8S7GG0pgpUdgh!y(IB;In>)#kSD>zf0D?S9pf8!UA^BIKw}M z!PhPt>G8ayFVRol+M3S~h;DR*K(mU!+b zMm@*s8e`W%xq0{eGN<}ao*LTIGQBzo-HLTCVg}DbI=sg}L=c73l$B26f$1m0zg;fU zG}iiTy-kWRgmHQ|b0MxXv`e8WUm$nB36#;4cK<5&anlD=#sntmYSZA|u{K9GhIk7C z`#*1-0)?$5Cz-*Vm(I;*Ln{_%pA`9_5?kfELtz_WN@lXO$*xI6e3ZO@3e^!IgH%fC z)8LpUu_!3QIRy?QNKB*ZnCFszsWx*E^mRr!Js=`3J_SgYln+@jkPyqzHx^cgd>ZkO z3YC`@8Zp+s?>~Hoy?K2pY^Qo~TA~Nv`VrbEr#Y1-mpyWVWIY;wWv%D<`qk{y?Q7rz^`L=&^cZ1rc}6&Q8Nv^3PZqlZzRA?)0lsSX z6$LC`oWa9cA4QWLPTiFIt1yAP{s7nlD%;jC`IWj@|ATG~4dF0NE$T4I50StF>F?TJ zA-{_szVwyEeLyK9vufP!_4diy?)WYTC2a1(3M6rs%@LE7pXeLHbQ@({xH9mmCRWlUf~QN_|9!)yEfL&HDJd0>0ztUBW3fkpyLoTBFNRy~ooso7kzjgG{r`=cTQ z?}~)&bH84mr{6%k*(j;QLy4f3hQS$>SQo4eGDrb6HGVHs2e@^;A9`_KjfWcAkh(6p zPky~gKwt_~5M~&Mv@r)zV2X7F)Kb225axvNa=^SxD->Y0Z;%Or875^iJ;PH{z1)Qy zd%K%ngRk*so|^?{D$Gi#8p5j|gFcZMoH6F{hQ#$MgD5cK>jcZPJCJo5rkI@UdY3z+xg}eXzq=iIe@I9?+2U&F@L;7<^`0n7Lr2%T#2gCMMlMk07xDFW;?%KQusvZk zCOzo&=mNgC1nS3&VACD!n?4<>>%0*x0FOvb0quMn=G1-4Ma|jpx$<7U;e*3+lt5=? zQdb6xZODP2)PC@9YolUH1TrvjZ~|I4=NI-ro9z|(?1r@3?v)`O#ITmu>&>cdR&|&u;r-TpN&q$#{b)P&P?_%%KQ_FResZ zZj6~4-wog)uNqlGfAtMM^67S4;i}&g5Qn?!tGyjUT6J}@gqZF`+oJHlLHd~Vx8szz zYC5j>Dq5-kldJ4;eyq07W&GpENiuUNS6S?I3fg^`7U?h8nU)ihlvyg2ZTtW4m;Ld( zHcrR6Ein73YnTqX>w^j#Vzn|96Wqy$mWD&UGOz`wiOPOmT#kK#(nL zDKyy=KsZbVzai3QeL@YX!4_|!qyV}!!JA!T@Kj;#9 zh+Tz{5uryDa(b>#TmAUw-{Wjr(CL4N8i+U-8JiQ8WwRQ45obY_S6wYv{Vn->`q6)` zS&#Zuf2}nChoYOhCWZ765xw?L|MSd_E1!E#52|EZaCrC)qQjU%UBobWYqbzwO;@+R;kN&T0s{I>eFg)|8 zlIkIbAI1>GVdM9DxmxzQ9}e>N+B!11^nWgX92k$1X@9ML(NV?HYG{sJMuQO1ESVvD zi}g@tSm`VO>U&~Adc4M#hyol=O&n5tKcr5~N!onS(}_LPY&RI+V6(A+KfYzq;oQz# z&{}iZ;Hy$#oRjrJxu}_@dL|ZaQt$rXvSbZz9(p;VwYZRgy*z{Oz);PdIM|4F3LFX) zGHhrGC8_bp%$fczvRgv)OpM<`zmXK00^^Ndx|I4YVki|2H#G{X7LO97*K6UJ$zc*H z{0O|o+F~y)AvmR+id6NLu|5}sG3vWST_Xh68G_*C8)mTg|N0FD3Jg3^9!*HGQ3)2w zBacQ(Hdbeo)|M4>v!bx{B@PxPXBW}za^fTp_|ad?{v7>uCx{-O?d@;R6pSQ~vx!gQ z{rwA12zK-eQw*MBcW=I^sw#eu)x(jTr2LJJ_8NSF?!xm2i~wmtVo`D?tYKCg&a{Ny zCtAXPSSO2+3^lOP`yEVlA-aw(J3;nz}^~tAe0h@ zo=w>0!v}<~FyPul)UU;*7V%CS+fKywkZyd7TvZ8?;Bev*oWBxiqH1ecDBsM@&FDcb z`tNP&{D-JBi`FT}c{WIkB7!sV@WT#vx`gDD6Xok^ILs50vC*wE>{zNudWE4*vaCRPWdI&ZSsjHhetC~)(PkGZK1mh@D@CY7dz?_ zfa(y%K2{GCZv^DGexOewe$B_GLw;>&SJsiN3shdKOWz+5S$XfWiW~vG?L`T51->Xw zcX>*tu3Ec3fK#eCS}Kb^Pvk~_cFPXc(AINL(Eq!=%=hs8gF|L!jH zLp+qlYH=!ro2dSc9e8vJvHbE8<7~`{ogGFG?WdUtyPdYRcO%_7yiDte3+L(F+lq6Y z4At)k4^Ib{9_bFt8`~D(yudd`kea3M6;ROqgA?#1>Y_Zqd@H>%*6x5-7?V*Hyr}8F z=wsV(TO1DfNV$P8pJKL~l*Wf{U$Nf~FSa{rSC#X4Ooe3ErtAe{Fpqv};4psk`pR+6R}6O;rwq%R=#o zAF;df)U&Q}++jmA1MAe9C63o_^ivBtg2e|0&t;jF785pmE96mF1S5Kss4Xdm>SOR{ zUOn#-=PW(FURRRwocA2a+I$_);dg%f1&_J03V{x_i;sG;d2pN1wMp>| zt0fFa*WU#P|03c;P3V8O^4^gLjN(s@0^#^93dT#8av>$W5U|BzbM0J$g5eF!Yf{~C z0nnlXkWkuqb0BWt8;Fk1rw}uRt*HHw)=rHP1i=j(jJL$o;L*C|dvr)j6{s_guiK{_ zY}$jl-yPQ`%1DbUOF{DT#XB|BJp9(6=iy>W6FnnjUX}g2w)(;{R(!OSM#Y~ehKsmR z#CQfxy3|%fZqu`Nl4filt&|Eiy}r&593VW5EtXGc@io^y-M+q8oYT^6OfrH`w>{#x zp9WRgGq;jC&DZ=68pZ~)L`_34%2^jwg+%R&{X!o<^1y7`UXRgwMQ*b@O2;)RzmI8C>HywmsxBra? zoao+uIHZzrb}i4WSV2pRP*pxI)D!K9@WHL6=E0C1@>=+zfR2-AUwvarnQX%%J6dUPzWSta7`orz4$+2*sGKQ9@ zRDZYwWF7ORHEf!#UHUn-_<{J0Fco3J2#2fPRb8*hvPL`P+RpZ4rx7e{p_9N*GhMQKXZjv7kR1gGI0`yM3HNjfTVrt986ie^P zopC1;DLkCNIue9>8q5G%NpgP;N_)r$XD%&o&2K!BnGYA`l6^zyyWf!A^1PcWMjJhx z*S`MH{478Zc_+9&dTD$`_G(Hh1IynK-+ie3Z=j?d(9K%ux%CEKkNY>ID6+Nm{XO%w zOS9nU?P1v`uUL83zhny@WPdsHyKMezEsVYz_ZBxy4f<~N4c^e6P;5q|`C~a7TGjMlED3KpK&b=Y4Ivr3c^3Co))^_VJm* zmo|l3AMg>mhj0)CVRRi=ABxRj498yv|NF>^UCEfB*Z{NE=e0hn!EWKP!)wL^DnkFXTD zlIrwH=q#_ih=1`k>k~O?hK;A#FQ4Lmt}4*~qB}YIkj6iYFoIRu*>Km=)}Qh42&jx3RyDA1DZc{QL0v>57yRK9C{O5=w6WIdt z_OATu?C?ilO?9)D9i_T9nP=nZ4`tn_uP zK7%nJ2c;q(-ZUQ~FsQ&n(E6mkKXA0;2!2^GmH8^>VZ-jo7hGsBlCc8p9ZTPTEl7-L zOeYl%6Qw6n0ucF9m}fe5tO;jTE9pp-e)i%tQ5dXzS-N;H_jbKl%rV3c{mJba+#n-~ zP^qpq1~zZ@S@KfqWeAfGvO=uXiH*?3 z<{5ae_b*>(qCz?T=`2dC`YcPf&xv6FcP29;-5ngIsyMxGV@Evf+(cvLe>xHl#6rZv z{N4X+?=77AXqL8NEC~{VJ3&KmcXta8!QEYgy9NvH3GVK}9TME#U4#1%=UYflo^zkO z>ir8oYKz)pi{0*-p6;IRt9w3~b0*5#O{ z1kb3nwN$wCE)}a?DG=ueo2yI5&C^*h9nwquy`f4nV!VOQ%Eqx47gVob<$BxhB0$hf zB^{l?8#Qo|3ana=ABAwM)i%fyDrVB$v<)vv@c-Q5*W|>+DDcwWutM??Mz|`=YHQ_* z)~?bzZbZ>m(J5q5 zNyN}A76i6{zn++xf(f~C`h@|3lE*jByNGe+`Zypgf)a|R&9%>@gTBij!;oiR;1xF= zqFUALQPufOdj-Kt_BAXQ0+a5DZ3joEI7;ya{ine9n4JeaPUGHOS!g_}*M{Jo94AxkGY|g!%$1!m#K*|4`MqB042td{;NwekF!vUpSAN8 zMr)W}eAIbsf;ssPdrl{Q2C+PE&Gw8D!O`gJFmW2KZ&AIf(2j4YGOxq!wN4z%MuM;r z4TRTu+fi;a&9e#wgSmn>Xd)=a_5pfP5#31NnHu zvYqJNmo9f`3yJF1vbY<`FU^U<^VMaDR{O&Kiln@{Zro@A8P$UP*~SJKJpc$)M6A;M z^`ymOD#V9*h%5khWywS1Y!m%>oppPS%?*Lg1~&~3H{s*Cr8Sh@L0M;R!CMEXxWIousw9Bwiq}M=mgj|aEgqKVr z6e{|@FyHs^x0e?QjB^s~%HKR^P@=tWL<(ydRkUSA2SrkL2?vphww?Blt4Blm{rttb zU_ZAOOm(aQb-5Osn%a6=v{$2L*~d=b?;Min?B^Y8x=3vrVU7#$C&k(*FdM4_4>GFC zT&Nooe<-HI04it|3lyH9|9A~~4U|?~5yQzOwQA5jBrN0L>(QyI)+{k)g%r(g0oAH> zp~u&O!~uyPr5Ym2!bri+3{dYMUe)9;V`$))fe)73WQO3Y$W~yGvQpoA=7A7%qcT5@ zNx6m zX+sX72w#aylA~A}`1Zz}FH4u=E8mkZnrZzLE)w^}1S$o&qf0c)q6yw=5x+F}J=SW| zsN&K*?_RkWtQ@4GPptA(0}WEP+`@KNg+Vt1vx^H=$s#^S;8XxmMpG&yD#iCXi7wX0 z=@W|n*&%0o(0&M>7*C+*1&lVnqyQokL-~eUB6u;`BT$X7^#K3*$5XIXT#(P7-y}O3 z;v5PVtexY~-d)E78aYnegNV1;E68HzA&&1m%(xCJblbQ!{sD&br!xN$5RivNiL|*! zI6N&rkYAqDNwhF2drH{gzhEmbyBscThs*I$Or;xCYM)t>E(bx|`}3OI_qy z;;=BsK&ENFv!=O@(Y_MK!Hyvs;o?-eUBKXmxGH@FpqD*BUJMp};!%HGqDuoCZn<60 zO=lHSOy8_xblJm*1c%Nn7c#`cvj{ah^dd_D$Bg-z26Dx*AXzk}LyS8@z#K)$( zV$|b5VMT#8?2I_Pglji}6M$qgLA;4fd;=FbN;Q|Dv$ep6zo28|c}^UAspwK@>iF*7 zKWQ-8t4X-DaW|C7T8UUx+5^B$Rj7a|>;lN(Rv!}iciU_90Pqx5a~me{i^Q4mIvLgO z-Dw{_^qN=O7V_`94BZR|&S_fq zGmP-)&Nj&=w|`68^2~w_w83W)Ozc;vCeVn-Yr#mw2118mlwLO9W>W!d<+LFW)U^zA z`&UGu#a3VO?#qIq=TpKXPoa3&>!(B8Eq(D0!4%tMdDSG>7ST~hSuTkYS68%AvFzlb z$lSLsaRgH@#d2iIr@@q@UlA~ZG1X--Kyg+71ikh_#Qm_-cHQ=RIH`+{lBY`A^*le` zdRv>wgBBCH#n!1i@~elTt{1lJk?3P2 zpt{-Y7M_OXUuX}yz%hM@p@C7{0#ypYscwKv4UTLg1{@XiHHM~A9^)FF%tL^ZsR}XH zPTju1I2RtiATdV320+8MM$XBGozC|nOZvr$QZEnbyqMEb0nNXF*Ij)QJb0Tf-1KWy#k9h;` zM%d5sMEgn@p9pjZ9kHMbXhn^j+g19F=crJ8F*m8Dlg7LH#qS%ljPrvJHG_$Lz$P!d zTwC+QYv?wd628lxfA(DL-GgnohT}xA* z=Y|Fp-WbHA1X^MR{8n~d0`bLb2sA5ay)R0Z6Giv9((pjlkn}BNEpWmY^O(B8&Zv z4oq{}P16v(ELTPT3}L@3V+ec<0H|(pml@p;EK;Uc<08rzR^*5T$AM>poJ}T=6qH}% zX%S_0lIjuwQ>u43C}x-qx@>d3Dt^BRt$}G;VeXgWOTF-_|MBpn64)05r5al{er_si zdX$yK%KBNioBHO!L<5~1kCL!IQm8dI1L^a2Z5^L78YqJ+wT9~%0RqhX?$&ST^gx83 zDceE19JB#L}Y~r zP8>NeN5^J}r5zS(;*<{KMv03IM8#9Z$AT}O570yyYzg73E%dx{}f+d^zL(JU&`Gv+)l9vwO2UXF5r z6ywINrY-BX*m51DQCuEHL5ZDLYnr#AEOl4o9t@j>{an|%jj7(8Pg>aMkTD!vJ2L1M{y*k(ZN5wsK$?q&4VotKlGYAM*{VARt3J-s zJxxobH=|GJC^WI+t1t5(ni%n{o{^LTBY?T`4d?aNaeQzQ@2qlh*(pxT%3}L&r|-nyYb*`F2IEQT9#-lLtyV8~FBxh{LYsOZ zGJg#Ipq*_7B_{NR%MCWk#+0D$Yi_mx7#(UO-HWh{#3em#@oPtYWDmi-_RNcl6p{+4 z^wmxWs#b>(5*%~8C)E^1UTbM1AC9lrh+2vEI;gps73oVnfy_PLp9Ot#J zhb}Ly0;lG~aZkk58Fgh_^K*Ixp)9D)AVP+9%rV?QQeqzLEVx>=hy;2+C<#;>wH``4 ze7ZN@cO9pN(g*+iLbdl zR<*{q9Y^;|VP=Mg>_=h24U+RF=Xp3E(0Lg#t_YkPAu3G&=$P}q3qW) zu5Vwg+SrUH#({iOR$QJd{OJ3 zmCb^N(sHtt^w%-*KI*}t{xP@BNRZgE`kcFZ7x`pfSNg?8AX*K%m;qMh0i0Qi{*6;j z?ogiMy^@1%+5n`ig1%eqj+|zxSRxI60hiSc1U&JQAmWtptH~zD0O2tWn(pwKR`qZ+ z4VEFapJqw& zT_n9P9i*Y*+H{gWfZ+>@^-jE)^Y;;(@B6v_u|K?`s66DHa|AXk|G4y_BUD$nJp0b; zaxc7nhX@*DUEyKO77QT7RP0IVOEZie&-W7o03bkFnH5b)Vt^+jtJ<1pkrvV671C0e z?0(AF?bdIF<^>xwrC#3{H z5HWm05kGq+dpXHNd5f1y3@`mrRzk0A%v|Icx5Mi#CQX`F@wUe_KC%=U(a91+m(am} z1_{}P_wkL_MTwy+oi`A#v7>V^(Wx;&Wk{?`bY?um&xD@j1aB#WfQaq)j+!CLd%8-v z!XT?9-lrf?&t=G@rUYm#XSQc-;-B$0;8+W2vdo@y7_xOetF#U?gP~*(PW-WG7cv~o zN@E9FIOO&jFjE1=@ZQW`kN#}`SsI(5_G6sRz=71>HVZ(f+8o78Sfak;%Z zHne06Xtbo1QTim+L9fhoP^36z{eW{K4W_}&5le(X3EL5GfL4)R);p5bw6sn@x^L7p zx#af*3dMmjcgS`2c0Iq-0VcWn{UHF8V}bmd?4&R7D$|$Ro!Sp=64P8QQ^#Swxy&Pm zWh?fka9bIki>Js@1$4rE0i*K?@&&e+hFfFVr`?Ef;%5bOPRPg4=21!_`W)jyLK@kr z3$h96<-D^zz#a&~w%p*SATVoL6k$n-{GnJ>qCYAu^@Z6=-&10CU@9jQw$(! zECPVIo;T*=B0W1N!UVGV4%%rax^MAA`gYA>s2$2PvGpejUKQaSdCZZLT2_m6*1$=f z9O4gSxIr|vR2UC1KJm$y4YfmzOU{l5Y~r65#7Y2=z$Q9sxKT61Q(~Pcq?Z=)SwE3D za@ebR+Hchzu7^*&2cs^}9qcnEsG4Z`y8i@ERYOS^VJo4fef9j7Riv-UoE%M%YqGcJ z3EOKn=&8O?nKFDB_L<8J>HriH7zUH412)4*de)yM4<5RW)>*zx04CHixhKQaYNZ({ z-8$%68aFi3)@dM`hOG00(v#FtxpiMRM%aTz|E;MvNchHgHkw)XpO2jU83uNz+U?Lv zW3?SJ70iTT%>Z*eij6ZbE(I|H0kloYnAqoXlX_wjO9&8_*9|kc6i*{phlDx3K9?w6Af0>23Xu<9G|pvFHMP>_$9AD`Px;5cvdxL*Il{Tq;P-Jy9(v;6sv zHz^Qn;?~t8`OTR#N{{up`omHBtGm1upqE&7&MeS4o)|iU^(a3405k!pTJr}0+xc@=G9)eUlYkv12QxBbJ{E&xPMzL}2v zEs~)Gk;_}ij6==U5H=#O6a9YA5%7s17+{*{+xtX*Gm^u6Dly^C#Yo2uWso0s0NfLdMOYJNHS!KB;S_7Cv{KCwi=Qplmf z;C%K%zDe=j`%KEPeK)l^|2vXVPuS7_i03;*Cy2x&OrOfjCw%|WOqJ!rZ9B@}Va#R$ z&z1Rbu-Coxoc<6&>aiT13(%Tlk6 zRW&)Q9$2NL!K;tH|2Nt&2NCz8X!vuGZB!1#;Ye2Ac^L-@C&)T_aq`8O=y2j@(fC7U(#9?8MNBh^-DX@)h@tr-C&|1m z{L=MhQl)5OPf5fA#p1=+IU~^iYz%<`7jVcT@M6A97(Ou1qvHS}C%^5dB;xZWCI?Y* zhE%#hYG*_p-#o@2ux!&TnaUvEAnsp0_o?!YqEy&N@%RPVnPH3mw=5Gk_R>QWD)`jk z4WGcPcqvHBS9r? z?t_~Ij)o=7kHtWDg%-DA#HdPU2J<2*?{IF6RG%k-{>Mci1@luHF@DN+-tt<$*)sU< zu6DRL#5&#pm1$V<(&q_`w+3sL3F4bIo2PI^L;Dw-V~<-WUG~`VBa@28 z!gCtbP8+q+-0|Pe4_%{b#?HUksQhK)?+rbP4Zn-a^|ZG7=(qV=)3@JI6qS_39iWsRr4eI@pPD)`UqB7FgTeBO$%e!{>1j|j-8v)MXwMgMcK zfHwmI2~@kZhjh{WT~vQ{f{^snsmlA#@BSm8f3-N@Q(Zu^FGiRq;F10osK391k^ytw zGkYTTzYhp}P~QP$yr9Kye9mG1V;B(G0sR~GeLdSu-H%!>YhQy&G`A6#}$yyyMEKOX=86ZQ{j{C`haB{46PLpYb?zRIqB zJoi)N;~y7}PY7+x%w@237C(XEi&bv7OJ_XZR<=|*l47D#jz$WcHXjn6l? zSN>Ou3C^H3WVN&&E~Zsy$6BQJ25`7V|Gjh~KLPfAs7_)C{C}4)pV5=^GdrD@^;hrz z;l(GEIP&5riVPPyh8EeNU82##3i}EoR3YF1)WdB~)-ER~&R>e3m3m~GQ)Z{~P!?%< zNoi@Fw=~s0F!uf$Q*Nj&kPz3bu*ztXC1P;r3%ns^I}5&BmFdqO+ zgWm_|<)*y!=L_zxQ*pdGX8JqkcFq4}~Swi>S#6C0o_yH}awy zGV;{w%lC~W(p~EKbGQEeo%cCsO+6xH#td%$Xu#H6(V?1c%^{~?>=?%vs+FUX%F=!Vk*WjVqB`&q=hX zI;Vs-Y?;;PTDx&)FYR%hgTy$$*>u+$I5!HOb`3afR4HazbaQ;W=nJWIek3obdB|~R zZ<5w>cT3th$0u|O!y(w5SL%a1I*i#JGi12Em+yVcF7jSuncuP0S##;31G|iQRQqBI z=X5WDCz4SSUR1l@yrQY%HufXiK~-U6jhkM|-KS9_Q@cUTZ?^M@>~)s)1GArM+F>YI zn#ti<@S`)$^j4G%?eE5fI( z%U>7V{W_2@s7%El`c786zE}fPt5qm!X;ewgX}iQMxT*W#&S`L_*&JB6o!p5*AU*AF z2bD z!iNxomxNXahbk3X*^7#&i@Mx7QW1mWx!`rgCo%YK;0dMSAOewQR59*K=B#3k6Aw7P{YVfc!dxUNnCHdCyRP?`f7j zoStEOP|nZbE^9n%PVIc&;!^qA5+!Kn9)z;*^mgXMu7`B|?E<;Ek6`wFI%STGviYdB z7-`hH=%bh0Ha_ZR)Yi$-(f~biD;g>k6l#ZxY_ueRZJmYe38=9zjpN_y)1Ex;=8mGBUJHH!|IUAc!~Mo%jV znK~+IjE^*yTdv3ubM-m#doN}s4hFdPiHpZiF?Z{pjDu{8A(DyCFeA>RqQUH*6?(sasvE2;tzP zr^sjwpFiO5y4r}8F1=}s533}_6Hu~38r!VtFCnz|LXaM2M6OeZF!CgJO2QZyM)fg? zZ9auyD$->)yV;kJ%ZQFdCXOd9$Q#^Ja5fSsNWMSywWg-xo85vik*?sGy6w!LQVl~$ zq9*e?k=tMPAbLlVlMMj3FxHU19L_;rDU?Lnw|^O&Q7cZx#>}h$+FNh<+Ttt(+9}`q zgl@_fYU{1ST2P~g%(Xb#OlcRl{nT~9RX?WE8ocCl8p3&aDcZ@Ab{;hGQMP>xKYz?n zAXD1NS~n%)BFRu?bZEVrmkn~aa5YFJL-t*Kczaep?&Hu(gn4wC1ov*2^326ef=JVj zg@u<(3vU>xr$Uz#*Y@thIBg}ruGozZ!R6HJ9SiQ+sq47^?evc%EunCu{8GUo>?$$; z53wXlTa*KHy&Tg`k7V-JM^2e05?EZflm>IXTwg)>llj284zGClYX^N&UKpW>XWSwP z#P>Jht~xk0^wg74q4^*F*hM(*s>i~_YYMJH8^-bAf;QQ4bk*5N9{TaP8_ghNpuG;Q zF?CKJW!F2V8Q@mdufVujCK_vlb;PmR^v?NpW->~`JZGd`Mt2rxj2-moP;HFYO>3C@ zXERcBNwVub2`r;!`Lq<;!NAGVK5pN_bxVd%nx8RUM57ZR9OMwP?nS$T*NlH26QaPQ zwJxJD=`KAuoz(mw+IPO;Y_hW%FleC7iu@(G&6)V|&3=8(l~k$oac^Z=->y+}THKb{ z_MUvjKp&+k1mg-l%mNAcqt~>iQJX=OM4MSefZ|?zJLocFH6Zl%R4T++o3tV0>C_{qEi%ooj>*im29BSWc`qSti(S@aZ|=-AC&K!bGq|fA+9$ZdGxoT)iZ)l-u1*i=aPl-PNEWH%7X{+rD`k z$8y)2=b!w&=hWZmL?c*Q&{G$gV;6hWV+-}B6LO1SQzJ5YJb5iR^Fgk;-HGXj!7go6 z;~`o&zwi#Z(Q_FrWIH$1yRWR*x575s*2>m)DAFejV)xBvva)nR4gUC0;eB^AuZmT0 z@&XP4smHJG2+_UDFqj`=(RP<#O-z&%kGB!bM@Y3$H)*MSs_!EsPcN_^M@=51DmuNC z54G9-xVM=st~(jBwPa1 zeuKE>RF;mQ!xm%Ks?1LY0f|F9hwN-u%KN8F+~-?vZ6U+$Q88hj%x_%RGA`#BhFRbI z3it0hZLsCQql}(?RM`0>`YI1(pRQ+(f}RkkcY@NtVNJN?(F~E7;@3l8?HxvGrlOgT zvFE6GOO49YjJ4OfsrX`$xXdIg-c?8ECuE532y4MSw3*rMhAaNV?FDNyNM6ChBcf<-&RV7>JD2a*^mCTvB@m1T$*^8dv4TU7 z_Gv2Xr>eu~Y`WoT^{>!Z@=1RA7fRw})LuxPc3~JL;??8jD5SADtssSpCbuYD7ksoS zOR|Af1gR;tA6bOl4{4(Wff+ZOlL4-;`$I-hmJ+xxDAMr0E-ew?Z{I`LUwzyhgiBs|MBJa+5-*qqc?kHAvC@jiO@Fq0ZazcbDyGhGHJlv(uiQPC&-B&p+ zEdu>hV0f<>|DF%z&R13x`V${OY7TFs?096VPD(5v9(3U1?M@z{+3`wHK&xVK$~bRU zQ`S?&`n1DShEaE7(qr~`*Tj{J-lux^E%&?m_K=}@-X>7o87R=4;+hRaIU%^+WS4wb znzg5)yJw!=`QE2URyyi0=FlUbs%qAXrTUUynfqM&wo!}AGqNokT1C6j6O&VcMUn!E zm=_w#z^>?noUfr^bc&&WdBXx`v_r00z-Scs$H~-o+4c{wnaPA%VR;7_rt9Zzc!Oqf_T@n4pXtw7U+%S=aHV6?}7*aY8dFa0YQY}<6sMeM)+w+c5OFG|+ zCX+!(*eBTGlY*e`$5hIia0^;`qUYq0zj`8GAc#F;38&*rqOd2R6*EY11lFjP#c zJ%+_v1yG@hi_kx5mykGZNH8;55}dO*b`hJ?5FQoVFI{y}%Rt9_S9b&+``p#Zw73qM zsfrIswGoEz2pz0oHSi)+s!i~3<{grYV}ftKeG?m&jnSyxJBo2<%ZlLx12;U^CGa_c z3g8?mjmL0I>9E$u*i=A}b3V*@OQzL|uHAY;?g?tB+x*buZU70}MKuM$iz>T`7eOdI zY7NCQrs&g#a=uc&rY+wMaw~Qqc(~+!G(|sVVr7DrGJW$1?Y_CVt~`lR8A=Qr7#Gr2 zAurAMA-tkGmcGKRWhn|%<6=8mwYpN$C>qT7tk-4!fW2WHx#Wt1FY#KvD@)}FlMyMs zO3PX-IVQ6#Ln-gaccYbkoytMT@}K{p!{kah;AWGClwjG{i8b&pk1 z(ecG?_r_zV38wlk(OowR`RSTjYS~f!>OHq_K&iUCzf}|HRa&Pei=FxzM4!O%>*BJ< z$BEol8ywJr7vIj<%yTxw;5(+sgG`GD65(vL6#bebC)0vyQ78i*Jx z9y1OaaU;GR%$w^Qh5I+6?6nzS>R*PkPLFSA98_dy4c6P3^|gAA@{;@2f|>HLk3v0@ zD^2d|GSxzDtxj#F)BF{Wf3D@dM-Qc|NInx`Xn~kxueyW#fK-_Rp?|$;y)t>sfoIlA zFkZ$dTHOLC$B=8(+-Is;v2qhEx)q{o+--H{-#4=+dg)I3I zt!t!bbB+bfM$RYs&1Rtg!3m`J%Bxn#hXedRqU3BU``7R2U?3VulR2Nu(mfRxjq1 z;$>;I;Y}-aO#-EH=3S(N(kikKQ1nF{OT)`tKpZMVeav0+j=Ir{dFzW z+}ze>JC|?#GbrgqxR+8ruz#bie{UoMz`R7A8LBW7{t*&Z&o`+ZM`1@ZASsbI*b-=V$3Z+u?!F3Ba4jR__#p3}aa{63;&+OT z4?;npjf13~LE#Vi(EKa59`NBcZsl!TeA2#XO6p8{nLg*v>OrWWGh5ceLYa0y7c#0I zAt3}`d3McNr)3$>fsjArOytKW)Cwsif>MG0#1|qvRB|^@M|UPvkx-f>{?iFOaQY2jn68K<-8!*Xz zB<5A$TbxR{y3mSo(X2#8OW8eEuXN6;xOxCM2=-k5UT#|%-U}}ONy|hDdY6tyjmH7ItVZ29h9Fx+q-EF&*G`yP-#gUJ6`l+s z^=eCj4TO%br*JkQnXr?7I1Wl3NXU8Mw$Ff#Ye1&@P^U8OR(g4Tgz&j= znxd6aJ9K`dTJz0z*eBD_yu#tf z2QDsFX)kXFre=;5zk^n17+w$8R8N_lMRLP=rP%B=5XjS5Lpe?UP`kMazSaFfE=x&s zIY^I(1@l6Re9k-VXeRt^|JDu?MocTd%Bu*pT89w+ ztnwfs2>Y<`pGt-)PijyH8|8TSYi4m_XHlOc2hYqJ%0-A$Q3g{k=h(&BxhkWqli)%M zH|tO4S+Qx`4KOu`N*hzhVowH~gYJ2{k;$Nl#tA7n^mhiC&ZVqT8lQT#^#qWM{fuRo zcYfBZp$)(5!8SUnL5Hi9Lsu7}*|$y2j5U($f_ydh!v9+2r5$Xf{w*@~A816z%z2^NuO^ZL*up* z*Txq=LtrA?&WQ=E#t8jn)GtLw+}~?yL-fBcgSBkeSMRHEMjsiU3Yenw6BEZ27_R-M z^4*9~exu=g=^O2;%T+x~$Qz7JY<97S58s;Ayim2PK#-$nvRc4tqEj*_TGhI$7u;qG zwMJy)`d{)z8{PTG(7%u z*TE*H`=Wlf{0WiTT-uLYul{6|geU-W6E#;^jPa~M)FA?hT;x1i3iNE4n^-2q<5{p? ztg$RCZt^$pLzE08z-waxVnl+Fk&*e5J9^#Mut+wUFkW5mvj@Mj4!7miStY;8E z%}eaWFdX^SsP^Dpn2lJ}wy}K2aV0|d&RG;mdCdCLm_PQXBN1#GI?&;);`rJ8ctYWl zkaTfq4og)z-b_yem2FZo#noEo6-SAW;d0szEzaX28-rDC)Jf_d8Ib`?R(=+fZI#eR1`J$+dTPD}=>x5w1lsg(kOt?g>q%w;tDV*{xjC$CB zsQOAZb}LA|uI}UXB=!dzfuZ#VhnWP&{dJFNg^=mKL_Nh?^~ww-M%N|>xvo_zRZ?zU zZj{r`xcMG(!d66mr-n;zzvchvJaQ<{WAp9hhYChR$Y|s`a*=E%iaq|PygmXl_#iV- zRrRjVPN3@W5Ld-%`Zx&Fo;oP(+f2tVTXJ3_xcy66Gb5eS^@zY^#jC|tmRYcU;aGNW zPt`VD%1!$Il3#6{S9NGttCXCw1e_Y6{$V{m0UZUYGxP9Y}(07NOx^`znZj(;p=Rgu~D! z%3Qb)=-1=;zkhYS{rRO*H8I_M$>9f0tvt!PAxdd_BR^>(-gMUEWyPIm>`a)HugXt- zl*3X378qDS%31f~I`}eFETAY;d*D#3jP2_|MAE=P1ov#pbrv z;RlBVRbA~jy(ta9I+`2XN@u;r_q!kk1Vu+f5A;p9WW$?Va`3eOKrEIyeN5?JdL317 zLH^0Bu6QELe%Po>V`A{~p2UloKt#7VLsf8BjJu~P8CK9>FnaDH2e79g;+2_j<=@omsO n198`Xlntur{hqP{lH*5sxw$zwnY}hJ;743oTBuY&$M^pOTo>Lz literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/simple_prediction.png b/docs/reference/search/aggregations/reducers/images/simple_prediction.png new file mode 100644 index 0000000000000000000000000000000000000000..d74724e1546d5da1fb798f1129f5a93b0f23d075 GIT binary patch literal 68361 zcmZsCbzB|E(l#0_xJz(?ySuwP94xp)g1ZC@?jGD-4{pKT-QC@tkKMa_-@Dn5-~2IW zrn{@Frn{c1r-uUj82X<2SG);!ZtRJ>`Py=m?9 z@CdJf`p_UB?;;=K|5$ei$y`_w?$-zT;)cxWZKTnUp)0^Zf=vVsg3%t(>ddf*;k5K? za*3;MmJ50o5=35-}!w+dj1meu`j>r|HV>NL97~$i` z)y^wujBihf{Ht$~^7ZX}lB8$tE~Yd~kQGO`I4A-kkNk3dZ@OUMop0Awa78M7M#4vB zj3ifTy98F%0Yym5NZTehzF*uR_%CkeXV*GO_%;Ha+Hdxjg&m_f!G}RIeV`=gAb$00 zXD4;NoYZ<5YNEIqxvRYXqDa%1zZj4X&gu+10{1l{#MC!^Q% zB!M7_4s60aF+8g5LY=|bu(XZ%VgYd%__&nxJWCLIJHhytBaBeLx=G`+4oP3#b`yax zjDz)Ps$H4OZJr8F{hhVT!wTNnN?&|TL41mTnTN;m#)+c?KV8tNowG~A!CdRDP8JrX z^0^FA@A4x<<+CCG!a=Qh^Vv9jFq|3C~qu=WSqw(%A@cJj9~Np;$8 z^$tdr!Cb8C#jLdM{ScuML~3&GvS-Hg`;}3Ps?CgR2MM@!Q}|mAZYrBJgiHCk81ctU%Z$h$2nN6{i#Qi=$Vr|!767A-3=@GX^9_>)jeaHXI&Y_WO&;>T ztM57E@4MHMqZKPVPYqbod*$*5zMhEg@$dG7Kd?O1fByM#Er5Gn{9L&Kv>deME2=h} z^Do5u^EHX2;n9v@3Uzy?9A<3j@|KayZCDIA(F&Nzfy2bKni);a_N zVa+Kx81iiNIt2$pi~tS?T=g$e1?Uh#>;{-qADnRzeBZux?0rbTbygHy<#p5~DCsZo z-wE+V{ILkxen6r4cN1w!U@yWQ2k23`Ynx&Ki#&Z57SOBabtST%VEMf>nh(icU zh+7DL2xSN>*eAiHOu0(3d*ldIJD7NXr%v2XL@B93oj9sEwYavpZ*h%r^%NNjZ8_Rm z*##&$lUa6IumxPgj^V}0hv+ki#L*NY)1+LeT|Z2J>~yPbVqK84f&)o`$UvwY#zUS% zi$kwNdKNU6?<@coD6?#HJG0Zt>+#_UrE%Da!|@n1L-Q-MBeNiL%mb`_qkWovumiaX zodS0$cHzhz%tfyn_$;Yr|0Q^{uxh>R80U#rZjT&i;`RjRLRXlm>#EvntB z^Q+aW5UR_oFRBl!9%_s$XDY+1a~68$%jQWI=&R~$#=bGw6@F{5&$V0Lz}gV`7V-^X zkH0~)d%UT=y}8S@4!hCRF*C^8H!@r{aMG97v(nEy)YVaw*q2BiS`fTPLXTR3VGCkQ zXN!BrLqMKEq{p#_-e%JV)ah(KcIr7RUSu7VUsvC~+Z@}p-N?XPM`l3fpi&@WAeJC1 zq}eBN(XABzswtt-FSwtp5I!EpP*NHFwf1YkIWBVckHCJSXyRy4#nQs+!ge!XvkELJ z8Np$38u?OjJhF0%Q)*qp)q30-Eo0~L)9jTl6crQz3Ni{h${A@gnJn2BnI&nv1dIfy zw6TPv#7m4?6hahTv=^-~y&(-My)ylkj+)G@4x7A_Km~hhykzd5r=5#prz;90&vsUhn(i)Fzrpjt$B0Guy32aWB917HnA0lK z@|WQP^lI~f1B-XSL`M}z1BVnxeI4)>l@%LaH(u`6P&dQJ=IhoQ>gzptw2u*3EVu?A zWj`{13<_@uTMi!#-wlrsGYngjDU`92p-X9DM`tEWolVUe4jW+_SxtgVfl8j!0D&V3 zl?hdp)*lj1KrF3POHgxFBPca4jRZK)GtA@FHrmt}l~||RJT1y~(=C_xr1!4&v@Lrr z2+nQ%>}tq~GYs2eCU+v&D*%}OtZB6`y4vJx^;~$$*GAKh(GI+YJL7M8I{>`}2Zh4N z&*hZha+yQlI$U>KaZEE3q#~wm!fxDY9%(*m$Z&q!&N_(RVLU22bse=|lwLXdITu#n zF^|bP@(dVceXT-N++oI25wUI6ENvB`OH_9oh z3gZe7%wG0O#K*;#BWv+?xZR%jo3YB0sH+r8wXB91Uk!u%@;5(j4o09xJP~-~(|_`& z^CJ31l*JcK%}Doc)Bjg+29>Fg9Rd-)L?76s<81j+X6NyMlS;Amku%)L`sG+NZ(sehO}JXS&B_k$g=PM+{pvi&>%DU&jTUmz)86 zFne8Nk$Wk-v${wPUvE1fMKePETo+wcAAUYiJ`6$PL%t-k(5qtDq35HQ>)y6LUxxgO zKP1k|{%K(KP2?g7^RKUYt}iY2&N4QO~#P)XMA7w=FlMv(p@Z2XVHuX4;SefL9e)9hPGBF zX>Lt-z`U~eOi!$L>N)eg$a==Xe-r*}ef3mCV8vff+fQFh1E7nbg`_K^EqFD0(yYsF znXEdjqh?e zsPn~)`UIo{KgORgYjGW-7G4aMI?S-IM(G@BnsTJ*IoP!5$ZU6?be|VH1e-jmM5r-=~_cH4bap8N}~3t&GP8Yv$23WdMDS=m5SN@P*$vs|5!jRI&MMNX|A zr)nPHW(7aH=~7oc9BpI?VC!(+6ykRJXbNuu-xo%N3aSjw%zxz&>YhMf=C#PMG`OHS z55@J^K??d7 zriv$5W*5FE$O|49);5eHbeIPZ7ai%up?km3EljC&lQEv7Z0qv{0!(_aKDivJaT<+e zTk7P`#dtP{!;^yp=8UQK44q*OuKJ|LBw&gk-&kDp)AwJZcP)kwjgaw)DX@M#kTbar~NCyMohOiD@RvZ&y$>{*~eWdM4VIp zQVSYeDjPbNxRsbix7v}4mE9@>D9gUV*oy+*w8#vg6R+FqxXw)M67FKbl>D*x1+i_1 zrGh>*GYgOQEz@TFF0@lFcrbkc`&16Lp@ku;t$bujxVM}7{i+*SdSOa!s&?|=W95_Z zRPD|6_HeLf(ZWEaamM%E?OhChbj zmP|{$w~TU9beyN2Jl|T>?^B#wclFS6(KeCQkQh-M(L+(jmOZtyrR~IUXuHUp#YZ*y zWQb&|M2h4d8VB8H0(dnXBU+hoOj1Ar2aEL%ZjSs%Tks7Zjl-|QPGpv4{e}@z`v9aG z>6KE|a&?kGUgJcIjD4h?{Vk)T(q{QCQSbIE-+sS^@_Gqco3zst{5#&O)(98wdxQSpvN#u{^nfQuQtx|-fc+;J3rnjaHP1|zm({)BQc1p@dSQI(jT@MGBvekP$$>ht~64 z@L64fFE>e0m+(*o>1H(67G^dc4y`*@mNHsK?#o-(5#Pys@4QuO~)T5M2?i^{U_x0(!Ca&4z!+{g)N(XIp@zqIj&7E3dQ#+4s6nU21*k-eSXyY}Sfr3GsymEa z?|e)SW0YAPo~1b~&9;)UIyz>?d-)lz9lzYeZI1bH@ySJ}r8?E#h?A|Jlyvs3NYC?gMNwMY{JLef~jKot`NJCJ9GQ9+Od^l9w(;k`}`s&dqDLtLAp7`S#Jz z{(2AJ^mORwP{`=)-~?ZW2dFnAt8KN16H{Rw+?8zJC%3V?@YbkL9K83&^Sj0?&}&D>ibeI)XT;=FCfR(c-g@3+bNG z2Oa^mev;&T%CBNfdtHQ(0~EGCV&jN>K8`=|Q36<^pBd1&!ngfS#vx#ZF0yJB@e5-m z66PWS0T24j95{4-K@%>uR9r|pVO{}QeMyAan$#Gb76x0Dv>YEtv79mPP{x7~JLVME zDex!;Db!$0{HgtorR)cps9550RjC!Dl7yaxtfg0cjJU9CeQuFXNYhYt5L@|Pgg+Pu zT8OFks(rT1huQs#K_iV&f@DdX%DME>ohj&JAA~7UB5pm$oqT#<%Yp}f^Y%m4a|Prn zY(5+$bn1^Ep(vtzJs)}qj@w9)sSk+rh({=R>94eJo3uk(BEMH<=c2?;=3kn|GtPJc zM|n>Ut=+j{wo;l{=%Oun>BEBOnkz@1AHPU(Q1=q&Pdd*?MmN(HCvVPu%wn2KeE6%6E7U<3gr>yj+oi#Tb4HhltEj>E{q z;KN*M;Hf^IAvryq8|9mATfp<=dqgw~v}81U2v4JtIm%CzXGF;i$re6^LM<|~lAcL! zDZRn@scIQ@$u{&lsI3T|pY=Cp>Br>t3J2@wXhM6VVh)ObP6*w@^8 zlWtQklcS&N&6x=WYMDwDZjd_H4W!v|TZ>(G^|1N4Iq@g;Zl^S>iA9`%yHekL#rouB zrA4--te0!KL>XtVGqbnOj{2;FL{t)HTZ~nqGicN>Ww5Eh8cusDJ!S7WI;c{I3bOKg z8=7apg044O_0s_edlZPfFZwwcBO%l>n9e$h2xzdd2@>{TKuT65g-IM8)koD39N&X4 zBpk>#f$ZZJ`(_TjZl7`jBzry@idOYiT{UtCe;zJs{i9V1{5s!5&JgOT$h*IW8jG@*8d$7Q z)Ks{l)JJqy91du%Wp+%yp@RPzz904^L!SbrwWv9?7&gzjFSTp%SvY`Br zxeIA2ys;i?Ok1iZc6ET!`sK?|s7n~gIoNnQews-No3`S2aVDC43i|Fj^-Ar=IC1GY6TVW%$VEZ4~{b4GW0x_w@~m@j?@UA^?|;x;W+ zSZS`fd~FweUMT4y8Db-(9wOdcY1)U@t8{6G4ES;(_2|r>0tprl#WdNK{{Z@Y(fAdvmHg!vTO?uGwVCRsCx4e(Sr` zdlDXY0SJ)zzt<1a%vCfTG-PGC3~jCH^o?u{jOkpgzrL3gKtOn0xZZEAjUDs}U97Ea z?73WciT`ZD^?v`yV|rr3KbtsM@)B#vDi8|W+8GnF(y`Dn5c9zj5)$&*8JTb?iHQBx z{rwj&v6+LzS1x*bXJ=_2<)fbMe9Q(Er!;e6ZO=kF+2l0w9tif+{Yc$7xU=Dyp}AW4S1VpeP4f zs0Xc-7$Rtb&X5l1pmS{{h2KI?>6XxR?%GeAPOCpay64SJxER-mhrkXyj*Q@K5}L_OLI@@DTdAR0fGMQn&$M4eSmc23o=U+kg28n zcmxKD1oHbu1hSznu?`JJ2=dPh2?#;>EOw$E8sPh{fuTX6+l9cy|23FzAsNU9yG1*I z<|Bjv$UiRuXn}il6pruz8cc-teKf_V83X;_rvL&b1kza%)q(uak=_R;w1DXrJsGTI z_Y`XTmchzk!YNwk7;yNGJ*27tV=K^?$*j9*WnLGKk8PE_;puLl z9K#S-=G$oCJ1gjG6)=An)afHI6I^zEP%LjTYIFg0{Q*9czx9>j#zI5&$_uHy8|_nW zJ!VC=@~jEVefd$B+I_%+JyM&81I#~d2?F+u^hu_2efMz!m$Q86(uXaC<_50KgBLrlOxpX{+ z6b@0n6YTh0qfLfi!!9+V!ek4iPg_z?Kdr>d zmEY697|XE%aa<5(+nu3R@gI`#Ux%7imR|vG4v!EPN-eP_FQ+hz=*~ndZ`&(;<_^^H zCXXY@@)MKRrYt|zc)f(`UvA|N=So8&JTCRztX>5VJF%R}<~doibM-<~qt z#yB zjkR)Rdv|)fTp3{pR4nbEu{a=q+mOBzpTC6LmdZw586Zx!y&Rbz1oZ_Qpd37Uw@dOL zp^5LPw+Ywt@MMJOrip9WHe`4$HvHtH8O{%PmscnVcb}fbwfN;abw^&ztAE!pLaE`G)WILXbll4SO)9C!d>$c$_zvK#;Ac+;Yl(dMUo&Ws815nmF$v+U53{8mZ2bfus9NL?Ud+we4PTk_X&@q>jIpO8@l`4F|k(sR_JMq-{(6XY2&r>OnAsDl^Hv<8(XJxf^s}%hcGp-8``p`ecDApImAh) z(4Z>VU1RofnQD~^+5-K_qMJfSLDF#n?))W-CMrs8T?#d6?I(#(ac_23uC|X|p|%q( z&TBb)8PCisj!P#FF9EV<&bLH|ESDH=n>XzHjRgf$H!rxcKnwa2k6r2d?;LmVHKzGK z|I!Xwq{9H!rHrw_yjKTRhM8R^#QXAoWSZx5u$Px28E;$u&((&rdIvKp`Nf;XAeDTX zm@Tn2ASEx8jZP6IE!80#wOl*GpI#UjxS{`T_3O6Ox;CR-2cGvcJ-%WOfwrqWj1%2Y+UXNWURqI zT&9nwI~XK4Mbwa$n0_KI}bAANH!UFo6j{`;eK?8>5MRN4iX(jkuoB#sxObD_uhiGQ@ zuVwj*F!MVTWRBK#(R(b>!w>$uzR;*(7vH|=GsW(we(+!0f0C;r{$~?kXtHq%%HYfw z_|e2574csiA-cbd@1O^ZfA>TxNc+pCZ_xNxb3< zm?1m=em6}DvQhsTRr~*=Jkz`KImd84cE1NngeWj^6#RReo12-kpT5=l1!8>i`z>G+ zFoXWd=b3^*r!Db`%OP`!u9=3#&y90%5ol>XSFYo*;y9-m6Jm0r(RRNHw^(bA`iaLy zVa;Nxjusw^!AC3%YeNI5mT{zerB&^ho_2xYsV-%dX8>Th;&gLYcP+yR=>7dWDr7)c zo;N!lsR0`8$W+zT0%s=YN;QSoyq;*|6X)Wt5@`@ZVO3uNfH2H|R(C-=_I&V=h;nmDifZS5Sl>t$}+T%jU~#YA=h8?Vhu6Kh^>u9@JXX7g5G zq_UtOXkLE)ieD>JR$d%<%jKh-8X zUp94;x}rsh|M?ueOt&?~1z$MuW7S;Ih-4aUdr(4~@Y-jVY%H>KwWLXPO~b8PjQc6} z-O2pMpz%W8kUsi?J>p?bXEirlH@<3jCsKAu-RS^ddr zzkk|%mcL}%zCs6L`~Ep${nz|5rb?Dj4tkrRCN}rfRlrd(?Z2zFRY%S4IOkF(BYBkk z`UUm-)Xsf7*w;LAAlKffGTDQvCv(>JL{~K%53I5JJYsygJuqFOPVe^muxI}E`htdu zDSo<0_WARnle(TKtB%8j==GWRTk8CE3Z&KSZgu5T^V*BtT~{Y7+|BUU81jcf{b8KXY&tp~j7m2I28XFsX zu_AFKtx?$*MUbG^2(xBCUI+cz!srV@3-CpR6KhEsn7cf+5*J1zk<)O8=VNdDIb zHum!3;>n`O4+wY?xa>Bhw^g#z(mCkVYG(GELHKSz9tqgELfDxk^JEWMo zV3izTgkiFvy`ae*(J>tGltA1`OK{e>ho8b|_`&mGClRo3JzJU_@sZYXr+=%a{S1!2 zB@=ReFWp^kZZA%P=Kl3Eu|3!6WKr@4KtMZ~(A*^GQr29WilA|IBt@;w&fHn}yCX&7 zgU~3Xp{1Qk(L^xo34k<+n0&6uc#%oP8cSuDe|oG|YcL*4Xo{$DdJGKGvyvJCe8C=4 zb(~eJ@%|9Vs9qb)7|Mp2PZ1^I9wTS#+n&vFzdgzpFfI(tZevrvOE8YJ^tWo zy`b#xPUW3d_(Aapq8}c08vPEb?}+|RGRzTJn1TNuMiU|_{=qU#&jk$pn;5tk1_7;j z&I)AvmzwzygN5Jg(m4el;>lPj8cA4#t+Dc%;ZJG_p92%v#r_2^x5L} z^+>9!^SzbhHv=bTYIC?nuG%4bTY!?f@oT*{K@w9w$wbz#D?JCt=(Xi71mXBd>Crj* zx5tmoU7vne`Uf-^%{^c3osQde_vhI4A?5l>cT=z2lKOTTaw^zoKqzakkSWa${?cxl zNBnLX$99#jj;haYeR;p(av`~bf}#*7nRtAWWI>@GHOBuEFbee$7&|uE*o_NxqwM@joIQ$jt0#r?DDp7H-{T|3q268zXoooSO2})m z@Xv#Jm3D@#Po#S#BOQ>%q!z)^-Je!$X;ai_y=|5`vplOx_!y8ehlN7Xcozx zN_^G*yRPaxMLN-*3w4$z(5Q7cpL*r>imS`BJny1$~KFyHgwg^8rYiA#Nm6cg*nY=qJqq!;uOM1 z%_&xMd8ei|SFOx#uDijv=#u|1N-A&J0Cn5H6r66ndYG;g{z&%PkZQLAkH78C;Eurb#ev`jLT&Gi8N-}* z<~v;~XU)J>=@j>k@wDr$JtN z+%FaCj$V2eMmiPz1uXJeI?S&!XCrmyQHF*TM8As9&=a*N@`s^ojb$z6f|pFkSI&Oc z-3f=IP+Tk*L}dtG;o5CuF{HqFI9W97;-~UR``-RW?;+8Esk0tzRvne#0Q2*|h4aix zR{{=#fO|4&hL;fv(V`DefYHfjZ_o7Ed>v85w>wU^)5c^6RS!tjxS~`9<1_oniI_9M zq)sGn(IKz6H+F(VRX||Wu>z|R0nu~dN7fG^YGAhLr}gwFHQzlSpUY6!4!6)HX>X9* z+Ht_W{@1%e1a~ylf}-K3Zr9xnCv>f>?xkW_NO;m)$<*3>9vK)Vp?ZkkeL1prOas@Y zoH7j!h*4geaD@*wAG=I6({#n_iK`K;RiuJ-o{A)wE)*(?q*flyQ8R}>m`2Q)gtHt~ zQZ?XAiw<%8>VK{UHO^=hhw?*mysiCsYiLoIuEMy`%+b+ar^Y#}6q zoj;}ZPOzH4wBWQZolKdj^kD(gZp53O=DZ8gO+OkMhq5Nq{hW^-oa2*6TzB zhNz`0L$-4fH_fn`Wz88+ac9t|tE6qi?_4O+)6;`CSD7UjtD?{CT#8ye2JM1QHHk%D z43pc#mRXYGIM+!J*QR~XoS3Th@%`S?yM$bLvi<_@pF2=2Q`7^sDeFPuMfpf>3s)&7 zXSsyi*!N!hj2I%L+3vJ73&kU^5=lD|(qzn0lE6@$#}{ZCCIAAC1I#8G_*7D6YBYua zXfO5ivQ;5}Oq&Ci1@@mImcTxYz&$*#eHc~%M9dbBLTa~)_3QAEVXP_b_2Mj7mA<8C zg0kJT6Xb6pu&=^#Z&+BkFH4@IHO%EMA!n7#WO6*qP!i0w~3fgQ(1e5b$V zlMo5-9Xc*WY;S&a$fXhvae68K!vG)@T40Ka4sB;af=(rQ;woCTaH^xB|gQ^p~}uiG0EH9*FSZ zh2MQa1Bn7aQIh)hQ+{kuo`Y(lgT*TJAkHr2#@tFf&N4VCJH#bb*t%-W)|{;B@?Rm0 zZy|Z*FAQj=L(CA!?*dq;g@wi07iV+@JxBGZ@#0Y5;G!&PvE4*~g-^J>fSB*Vwj>kKsc2u;xCtX(1%)E{Qxy8aifXynk zhOh+};j2%z>RalV*2sRjod-ex>pKuCLC>cL@vMZK-cDw2%SAKU52He4hjdIG)lDqQ za_dwaY*RJ?%mZr98fMMYmT&CMd#R}pT=D`oQMjk%&zci^9V`W0p>D_T9?%^We|bEv z47svYdg7NopE78wre(u}b%T*R++%>6(^k~2OrQQ=pS)uV?udDD}6Y!UlO_MNV0s@!#Z0RCf7JX&mhvTjnd zu%nJ(5G>+kucLe6+(~=>6FEZe^NIBS*qC)G-*J`vKH^w;cFb!m#nr0h^v?BXx7TP_ z6^&Lk0e-=IcnUcOH9crNI{v)GMfNIlPfe!TzY93{Fh-cTbb|QQ*XA5RZ?^|=Y*79T3AmH6Uo@nqjzU*(Fq9!NZEtoT}Q z58n2HaIfmLyiu~Nj$C4mk|n8`U|QD8%2MjL^3#r?R8$mo)x3G+fP<(qM^RTs)mnV% zw^Y7#912dZQ9J+Hat?_TKhpclN(T43Uwxnu9z=xEw9RC|%VNzfERuSFjWN;*$FvMa z`ttUShXTxBPzEL|u=zzo`{QYQM*;@JxM3DQ5{PdK1XIv8v4vTS{cMY+b~3A7)Ra~N zGwwmr8=u@1|C${79R?Qn*aylk27RKSC_2v|sI~ z%$iIU5cfWP){|?;Z%j97x#1sF8ELG=bK+i^WlLDCN@w=0TYT|u6a&*-SY1%tjQ@gb z`lOv-n!4mXRFL>~O}%BKq}TKmNiyTgNH+f%dgh3^;yj?}W^&3;VNa(Xngd$@5=_Sc zfY_;j4}cWh5WW?>2S8cx>|KzH%13nqm`W$d=T%4+pt-Xfn_;y&G)-h%rU!!@sv~{Q zaw>lIbYLLNkDA_`J-R%3)xt;d3S3DsH`!sBHF6Ag;b}g?sCN&?z3ykdDK(Q|hu% zsu6~Zf2wb2Q8VOn*uNxUX{Vc5{{YQFPH2wj#(sH<*sm1P;#@4?nBVfTjwQ2}AiaRH z%WZ1x$sw<#sjh_W2XvdqMf9EeICt?$v83TL?9k+|;T+5B19iwJ!e>}Cy#Ht=B=$vv ziJC}l!t{{yTGN;4KG5o%0p|yMPGnqKzU92Jc2CF)EWI@f%mhfvoP8H zYgnY z5d0pa5q*&_=A(1VvVGDVqf6>)pnZ>5NTx3Kx$;~XzX~)Pp+Ai zGpLW1(Ia{!U%d(C?=(@s2c|!;tcC;YJy)5`DdVjn@5l)>b;g^QhHDvwmt4;2)}i7t z4b$`$cp-VO0lj+eX?&xF?x(Y1M>H8@o}((OfK}-MEtxM}ShC9$EvSFTmJ&tlGl zK>YQ*3$tWfNfi_h(g@AvY*`xR)wxSlv?7F^UtAW6oTgu;7{b{udJ&irdj-Wr1QK=3 zo=Cs`UC#q0{^C5NXwr65C1DbfidZ_%$kDB1u3={yBWu2pTOR3AX@UeT{>~1<7kYj@fdxyo5RDglVUhdRdegy2}NygnWg{v z#4M23-hn+K%$MYE926GNb|g*pH6$Pgk)r$3_Z0oAx#<%HjKg27>9DDakDd-OQs%0k zgx1AAu_s-L;JB>$M*>U!Y|n_J&hy`C%2#5-MsG6AuN)&~b@m%0<}fxrV^(b5yPdw}^I+0! zdHmFz`4wB3?}4Sud!ZB;e+}FO85BZZ|! zV+ydH)4L(QP}DFxTZ8LT5c>c`D__pI{?o47XNo@$llW4QRBm270bC|4*+Q!G0$%J$ zb2@9M)33!6kB+ZCS;79GM9%nB)^HsE$FT+^yLUqiN8o{rQGqh*Sm14Oxf$|;# zn$c$@Qi1~S^%v+S#-E#D<9>miGuKmkZ`$O!{Tz0cE`jN&UOF{Lf&Q@*jPz$(DR7VS zWiH{u9dkMvghmjDEKl)12KITFGzZL7W1U6*9cuHT8VMV7JS%H8wmdh!auV|`WvxR% z945VIm|U-53?BDL=cLdf2Qf)ZoTDBqSz98{IWjS41iQ`W)V`IZykK`{qtgION2rWE zJ?$lN`5qdOns(&GKN7Z_8od_OH0905wi>pE4}bE^?{s^2Q$abzx`!5_ggRw7fE)|A zadQ%tZ`*<$)k`lb>|>0+zD4R#@JL2blD}T}THzBZJe{f= zxn4u!@H$iS)_A499^5WA*WRQ_r2wxhr(VDP=Qto?3yeeu8A24tX^XE@hH~8@jB50V zc9k0JOoJouC~713HQEREa=K%#QE7kuZ>e@Ikz?;^NEZ1!5L^#48YH10$sWcfbeSt% zfx0DRW5;(Wqx=~B1_Oj~AoCpPiY7=f5F@+8i!ArQc!%pli7C8qZWqhUB5=z4K_gN= ze5wK&y7o;yo*gXGZfeVVlulO3f&cykFB;qGpFY2~ovH-z`y=<&WA{l>6JL_k0%a-{%ZFdB^1 z-WBODzLOrF4kj_$O9JZ4RrhE^!Z8&Cjm)TQ&Ydo&X9~%- z;$XJK3^SNYv)Kmsu4^W(!<;m-K*wZuYXzux+;(~Aqdr8^x|$FFQ)XDmO% zE}`LH=Two>A?_ct%HpT(2TMI$Sb@#ymF@Xsp6#AeP+oEH`tl`-9ib>Q3(7BM4fHs7 ztMK8au<5hif;5f&?tul5f1EBv(gqVmelJSLB#d|lyxy)YT;EHFnsr?i24Ps)1@L z5Kw2b9YCac_|EwQ18R0N56DvMhSa=86!vF!h2>n+>j>Y8ZX08M}pJV1g6cX!v|?oM!b32wnPxVyW% zySuwK?$9{w=6UzIuJd94f$q6h&pE4V)IF-}m=AoEdjIbpmxA-KkyIqpP);_slFsaZ z70n2lsVvQ%V-sa}R%X+vu&L^Y`A3kDu|SeK7dfdfduEFd{+ZYTeuOX9-tN5Rd>OxX zt(y{P&76~XU0=}{!+%AHs2vfsKMhE%Y?dKOw^+01<1PQH9G_fZqncDv(5Yq|7KZRe zMvV@NdHn4xFCoeU)vjqF`D5?FC$rfZyV{$k-b^L~QhWHk?Ey4GOAk}-R&WOU>L5~^ z0a6~duFm6+!9Hc%AB7KsU7gW^bmE@1E9-E~`(42r0z@*<)yR5#cSy$*fBL^nZ`L_% z85O;jJ#kdoSRQM^q_I_H(}Gy1FFWVz5Q1=L_PJ2THtN{O>_Y~mT8sJQxZh z7g1fobj?Yq*dj>Bad(M1l&i%b6fIKOg@s*M-<8>RMaaY8l75T=HsDC|+mnn4g^HpH z{8_oOx!c-7vdkDRvm|Ptc0@N@UX0N#mWoo|PE2%P4WHrL;FjXRXGkoTMz~>p?H;B( zlEjN*XRs-b-LU+C2K*0QK68C#^{d9sfQ0?-7pO{0jA}8FtqX56|Wh;HH zu{5zg2kXC_jh`HjeYGn7YWdc6rMkXPh4=lCSk(gW0OvV{z18>ImC^nr>Ve=n2bUWv zK>c!`8meWFInOUuDJ=w+u10|1N}x>}5vF~`jx&?}Fka2K+1$N0w39)!`R7p%YkqAY zNWqS4rc^z#dR`rfT90Cl=6CV_V3Fc_XWL>E?1FuTQ*@&ga%H`1jyE2I8lqh!$4{^V z3($H%=TDHJ%Fk~s%rIIt9qhg^)l^10CUFDC;W;k5>Q0Y5)*d|vGAJpPdH9w8BKJt3 zi==%Gj?{=f41KQ5mZ8KBYd&4B9ehDKAds_w+~FdDC>X=75wB@8OnS9zmRWG?RmvnH zcZjCY4Qta14qQ$J!OL3Dq-bo2G{7HNV~xB#k_4QD4c~~RE+o}YwDC@uc-W-MFuxv$ znLdG&-;p(vITkBIJP+%o9gL6HI0p?;J7u;}72h0ubc3ilXJXQW(K@;2p$A_Sj5C5|fRstd+1ibAj|f_CS)yK5b>hpZbKe^}%$ zfJZb(5a`Ua>KuG50Tsvku)s28wT5Q`Plt!5+L9&}c*ytG5f>v-n!vE90$)ro$@^LN z3+qB&wut=hMAkxqMCU@bOt&DF3-TPDcC2NefthG*>%z$A`SJ%oSpF z3R$bK8Y@nD6N8|s8JCWH_`=&t5Q*ctCE#a?v%IH|+^HBKvz>&XKrOOEaV-N{2gNIP zx5UD$uu_qqUM3%Od=U4ZOx!+GJcXSxhU6Qs_UR^h?6=sTKt8>z@JTH z(OmHIc{IcscM3Qod5&}87@H0LS@q-P6z=Y^k}G+y(`2XwwC*13p6oQ$#GIMOomW4x zd44b&m>L-;pS2$9Nm+BRbKQ?#W$$O}7;q2wK=pNW1ASs=(CR_|!KerUD&;rlgBh zhvz3lys@L$l96DS^?uC77fm;x?a`w|{Y|^{qBVhQj?s{zT1i&c zar8&^>ppR!gn*=9Cv9B;5<;zm#{G_z38dbgK4Nez7EI}y5aOSxhNZd;s%-m{p_;0; zMthbAm=KPlayu0~OXK(~Cd^|uI9d@ECejyGnD*x*dR`K_8{4bf80kcgZ$1mjqW6V* zx>|fAxiyZ!tw^44I@pGE$S5nDno+W)Uh(9&Vbm%f%l9s2J>Z!CX<=kyy=08P5* zzy}diZOUl2ewa(A;8cNV@$!|XL{o^Jxfmp0H;D4#B)hS7UpG9t z+0K@7yi$f_B|sntfTVr7?u%&+2U9IvF%uay`AeTezS*Q&-D6)A9yQ!dB(C#&w>QOo zk))!imKV}Y4`e>icDG^)=8R~Le`6eYLm?wTwuJ=N2~03oQGd0^BBZTx80E^|0D_!Y ze!H6KWoo$uyhf`(q^mYf78L;IQ#I4z<#sA)HZ4;1U~lhn-+rYwB}-@Im&5LzK9>dj zcJjynBYB>ZHwu+2S|M%5gJgmq=B1p@?p6vY{^m&TbgPK6oJ*Y_*x z*bO){Jqa}!`iYnfR=_5dC)b=6S;@0vmM(a(PMR%#;{Hk^ebio}^~7z!{`$K68i*Z^ z2Js-(ul_vsqGk9q86IA>EW}Yojeq3zl_m~Uxp)Vpv_vFRBCVn=4$h=dqTf4hEnzNB66^ zBzPr;J#n1T`u=`MU%br1EPfUugrdP4 zhM3Wf4sEi8GAnWCWV;nU4syt%@Lo~lF+tmNBA3~iXKUhoA|KQ@vpj-gpgbZ3hlKPD2`vGz!tE8gil8@V4*v+Av?zA z@%O%to{+2b1Sfkb**)Cg^4Aad;qSZ6srfm07Ta>xf48S`@)WePJd%O4F&AnKItD!F zl8WFY3mKLkEUZI(4m~Ff;$PT~cdMD5IJsDG8EGT*hSC_x7&*jdb2uuWdNA z99t+B#P#C!$3J)TMSfeoQ43J6H;ciMnG8}hU|yJ-E~U?Rvw#KisSnW$YvMb}7;I8~ zN3s>|jk&WgtfLW|a_!?*%WR7q3k4lzb>?O2*AdJDFUr&iR(IY2z? zZm!jJ*1@>&Pidiez!z$|O#p_gxH1Kkk-QzQGCFg9{eagG zmJnPf4FNZ%z7YjgvKibk1wzXylig-y07070-z*G7!qAljA?EIiBb;OcsKj%#f4 zi^#;KU)t*R0&TT$Sc-wo(#MO+-e1XPOKI8b4-#`m}R8Wtx~*yk9X5xa3tmYkNQT3J=xuhbkekK(0}n79!To%~rcU~#`3 z?eu#cEu4pu66iJJY7dAeSY=9T7$22P;&fke$Pdr|d0)}eWN z>%-+xumC4M%|ZV7p4b#v7YjoT%+$GN$>#xa&j%p4oyJxFwgzRgk^&#^%PfUX+zl2T zwVSO3nLL>b4{z4s&B6N;5Chi8KbUV60lft28?G8Nq)H%_hVDjldHI4Wo(Ff8uouW2 z^|#!x$ajci#?T6I`*$4 zCXde};K6j?e%a5C%V)U2NXj-B8?EgeoPvY&NtedL52Tqit~gnG_#R4NAckM2>#701 zZy5oWOYM7I7@RU%;|-sz~r{F8sRipyk$3FP>BeNz|pWrUvhi$;>RxHI)pPpb>8PPR0bBW#&{O zsZN(DR6mZhj2Qq769{l1oY1AUf<7c0(YiqlBuKVOS@kz;ksupoW+uZg zSR$~hsV7)sqDz9|!E`_~Uptw?Jn(k;C3hcSNvY3zdekp0m6;eMK4;SUsBKS%%EaT! z0skGx@xRQ1;)720c}l^HDu7a z{*jhTYC#=~B8c+g5Rri`yyL%=LYMvQvkoD+|kc(X+<{`21vlXil zT%NBetq;CPes~%&_RCGy(H=+I{qL&Ef*mTzcqs=J>e@(WoO!O=zlOrm!&_`{3!R-w zlQG(;2OT9W7KU-GI2lmM-+VcHr}`Jye%>`2nsc_G4zT>{fpDI$UOQYXfl|a4A#E_q zGzwmvB1;`q# z2obedoUV>B0U*uEWj@Vf{lUsUQhK~xu>^3Y@{VDuQ+V|?N@h{ss}sgdpzw+p`hMpz z*tNND81@!t8XpYTyo2}H()k)ea#{uKIt9!-u-dH}yPsqcQ=TSj=_1TwgJqPha+Yu)5Eh}%mYiSF) z9z*|ckt7*@FXIR##KZNHWg%x8GT&@|D)_c8DNrQ^5NU7~gkBqSGthPBH=^t>uNm_b z5y{qGGH>>RolE?eA90h{AY4uqz+U69T3%~4|`bW%~ylSixAL?nS;%GpNScUfVFGXDvK+3ICe!$Myk8(jg)xdw~(lw_3?s9Ash_dy;TTQI4$S*thUEj@UBKQ#Cb$rcjf+W zS&Ih(XI8zMGMe%&>3Gg0ncTI1JU3uqVlB?d9ul=~0dr&v>k?H9R^Jm+(K-lpCW zvu4t|?eeKyiiyrFev0f?a+7q|K%a*Qo_1@^^r`4Xa@XP=aWO=x;gI3Qy5?FpSD_F2 z!}m!-ulpSTZpP*`z_Z$Th!5CN+*taXZ2qfC*1VYGfZJSUCj+JKG2O@@PY#~IDXywe>zxAZQC`f z)Bwh1%-r!_*yBo@9i?wtCyX488ijnZ57@!=4GO$nKo3ZFDTh>*6vb6u52>WPr{16O5D|Wq$#Y;D~V-;*+=j&nIA`!qYVL-zF#|cJr82UwhZe-J!jh0*u|a zzE?aJq~7d5uHI>*^cf*42GuVvE}NIq`qOdBtfN^hSH&Y}4T2uN{{OB@7@toUs16zob72l7kJ)`@2vNuoF~f-bDC&%8}vI!Ks1Sdfm{x{OmO;cSmJ(<>C z2zXD5ND41=wA14pFjanNGjV!j^ren-vxv^GX)HUhdgV}MK_gG$ixKqDy=o0?F#11P zH0AjA-!#|_HNGb&-;2QADLF3iU28FjR4&Bdb=r9(?r8RsCqz~HDi(?$S`pJ=u7xH<_4A&C@*wcM(c;}2q_~?|EZsT_NIlA! z`(In>7#>30ZclavH`3V>qs0R4)ZEr!_u?)vhe#*a1Rk%ov$nk5(!OT$Gz|sHNq1KD z^T4h8e+ykit5?>l84c#;Irg|18+G;kXuD*8B(HUC3>q|WLx@$S)`*GNq?4X#sVyZ6 zXDu?ZUGhKTKQkq9c$OkyiQqF@6-=%wzYVQ?#^q2l_HcR0VL5AiONq%Vv=>g$_E6G| zQNjT}hk70#JND0CpUytWPfi525~e*Cey%a0xND%l8=47Yj3*f+1d$uXkknZ+~A>*%a@slrGoyTK}xvf%T3Te7KSwz`RHq}j9~JIR`tpK!eC&(`** z%1)OV**U(R1_(cg4`n&##bkhUKLq#d2o3m?NQ0gKSWSSx&x>?I=X6JSHXDhjl-=Ia zdl4xsCLy1{cJeb?w(MVx8y=H`%%TSbEw7Z7UawUKE?+gPd81>V=RIfaluZWh*HzQ1 zdw(?W!Oubg*K*8thwO5IYpT*z=#cl=+BXs>pCl2EQliCMNr+VP;y9jJI$Yw!lQVgH zB+hFPpig^?ib3dp4rX)HePQb5x`OxOvUVY1G<25hR^F=`zX}wuK&PK0uJ`+%A>N^B zQWAcckwy}}F{-W8ZwU0N!=-S?B4hR-+W&)Bzod5M%LyYGN&1jek^p-)H7^k$BWm{* zTzmx_4$!2xiIdlpfVW-S{*B3Ul8&mLrJma{KPzXyp~kT!4vg1;X(^57F^|#3q-sj^mY6gwSfNsg6s-#Cy)dujr;myz0GPtWs2 zpdL;mmGAW$t)-Jd_auRv=oHD>V_FD>T(gFvKbuuPMF6ZNrRFu%jmqk;FoZLx1t=eBn zAP)bNMqpx#T(blr#foL)aKz1OhNrhG$yOH5SXNb~-dt)MroX{ISzfD^qu8!3P4&3> z5dC%A@qY#%Y=SL12-nX~B@ug_Q8-b)qk9^rUa#qmwa(X;31wd=j)z!OeGwiz0C_QL zDEjT_!4GJR9hHNwnqL<>@s5`xBswG?tvzw!@4(yO1x#HmdOMr`AcU+zd#=6te2Aj` z=({qzL++p^DqANyMI|^)J>rSdKcSYQW_lFu+d)zTKJ}a*LaT4z6y-^UmGp@bhCke2Secj~+ zXIxt!85fS^?KfgbV?|>))NN|DKyI=9?qhRKH@37_!FlT@R$nLO>J06~bnEGbrxuS5 zS6#3g)$#*Azf-4PA+vU>x%N5zNkl|fZ@f*Ki;j*4b-Yf|-?%081P<%_C7oov z$fo$!q3co%PcrqBk53COcvc~Td@cqlquguhH6X0vOALgN?QQ=a2Vk}v8N92l!4KmE z1=H9Q^B_~VO5UGG-_*}3=*ot z8g**YzCZJuEz0&I!`H3dXK&okOk^kymkqcY5!7pn)U9-}P(AnI3p2c1#}VBhmn;J*>9> zjy`0S`w8B;tgFVF@CbYmh($~vLG3HgXHcNeYq#m_x0+;Rn2wo_YzYFer-6+eA}Q>l z#M#9F#O+YdC|&coZ@N90DxMW73rM1RRyesv)8=-GDF}D16GRp?pv@2A3XVp)AcifrJa(z-JHI1z!Ev-9MRpTuh$U|8l6q+y;-2dwJE0qc+}R zm_r%aY*ru5uTQNyKRyUNat2(1_M*3c4*v9PVvi#IGxI%n)~T-j-QTh3qVh~|x;7{= zHc8ZkxPHf6MzUbMPgzPQU8P4PvGr;rXIpG^PNLl>u73)DlUFS_89;?n%LaG2BV{p3 z#~!w_DT2`GO!maCem}hO6~s_sg`7C}>+76@!41E+XP-GWBIZYTAPWPa2K&JRy8H02 z$6G-yty(7@eD`*-L1cG)O-9r z1QF9ia^}x)boofbwNY~al(Y> z{x7ug`y)>4*Tb#Et<}DS#w)1+6s@r51)$G}V~PXRDpBq=!Usr?Tto!v@6sWth*VCz z5bB0KG|fFPM&FA&-&cj0vP>Nuo4*r0{_nS;A_4S^mSSY84ibtAA_njdu-OO_n^<5>E|M1$UylM98( zL@GgyE$>M0>RLTwbeHsQbrfY)%Ii@-6-H|tuY*IPzK-=}P?+B?$s#9q{PXvw< zVj4%*axs!|P^)MVs}!JKYgTQV%r~}Pb|ERFoBSQVBXea&)E1eBH6QWU7KZy)sn8(G zgT&S@SXz-g04&i6L*9ya@!F8yfoOa8Ts;y&8%mY6oQjH^C)FaeRA}mHHhb^j)o*-b z@C2F+fveo*RIxAI6TEsG9`&lW>{md~-ZkMtwyUL*V*)ein<9$3g#;l$fv|BWqTJM96 zU6zZ}K}J_kPL3A&=os)npQSO$z(>LF!hQXMp!#zA6e-QKYaUn{R?*fmHc832y%|EweH_Y558LjS{bff%0M-rd~Jr;Swd+njzr{;_|8BL~Lsx<0Cj*WEyspM(VR zK8lvzp;`!|NbS(L6$J}$(&kDG7M}jUSv3v1U^}`}VH8uH7qQ{mp5H$7H+9P6qDqk$ zb0emnwD&~MKVSkE*G=DB_ihv3LUy(aY|D!KG&Y?EcC#pU0d!?=7T;-83J$wevqun) z%w+yIH8G~YOA{6Ug7w9d%&3Up>$lF5x#SFnm#!gzIqq;06=7wSr0I*}@Y#53C1@Oi z40rUGKX+rA(sbe-fB#jKY;%GGf6|9hx&W*l?{oRu9Uzqb!+_Ax-DJ1_ zRBggU8tXKrYbtcV>a=S%xUc{#wo|NXGM(z`lE%`krS|FV!sI!)p`gX zDc`-?EAihoCP7T8*x`*QPFgv6oN*?|n@Dw!H-HKJGDg5S(`6uKu=Q4ULQ#-ib!(7x{fyH$o?)`NiFL)yJFrJH|N9D`iFR?eM*`-``d$N&G0Ne z;5qR{8`C{SF`IL@gy#qaT6H$9k}be@=)*!&(^2WhqS(ssIBXt4zI zcIjl5T{78vXj4E1p%$-urm0}1_U5%jw=_+*gmcua0W}M=AaN=4!bHRr{6rD7xe(8( z7E&xBCqtqg4A^5t%Bde8SxgG!>ZEtT{$G=&3N~1r{{A9@^!6inh(Gdd1z1Q393^Te z^o<0=T+VMU+9~9)xYSAM|8@c=j7P_cK-%JeD>93YD5+b${w(Jtf-Xi ze*AkMRk7=)dV6xUw*OL9y{<(@yw&Nc%o*+vpsp7UBOs!n4yS?z`9)vPD>?dNaGLCo zUB;CtF{JRVD2EE{ z5CkU^TRQ}>b>7EeyYUYnjn{$I)4jJ&!D8ol!99>VTU&y|9}@DPppSG$zlZXeI+4-& zHoNRJe-cYU&i4MJF|N)q3=K|g2=lY;E3=hP2k;!Tu&4^rHJta%GqctFXRAw?Rj%O% z?JfzUA(R2Z#|E(_pcjVgq(%;vJG5FD(fm|5 z9LdJyZh5bI9-t?iz&oa5LMR8xlo}@*%2(gWEr$d+#`aC=u{pwX`wbebJJQ38SDSl3 zjDK{zT;g@3vOqa2wsEZ5;3OgNOGqktV*HWB{VX`@pgp{3f$^s{Oxe*`({*RBk(2ON z5k~cp%97FwiJLIKXKSf1f$@ajtYdb0iO8iS5f_(xz>G%gzanl6IeHCbg_dG=vkDeT ziQlf2h$Y&E%kP!=a4v3`cI(5%>%E+Vm+Ky6xim}FKjD?yk4EX;+9Uf=}RHp>ublc9H!B&OsTM7IFaadX-_4wh#u@prfR)=bp(AA#&eXN2ij!C= znrgJq@YXH&(90xoK7@wgHKxl!5tTZoc(vul$`T1T>&0?b88>1W_XUi&O^G zjgk1Rq{F$exX_(1qw?;x`Y?m%3%LF289t}W<}AIa`2Ixi{7iVq)z>DF#fvhfLfIPM zRWOnS29OYLT}Uu^O(>N}A&>%~=qg^@r6p#k!#`Fz9iJ_;Es&d6F7;}%%l|tdlfU&v&~-ZN7tK5!NS zKlJioT1Xb=P6ln-!r#)RMb{5@Bg()@k!R3!M$Gq6D;Easv~SvBgXTS{KAS+@lW>nZ zdRN5Ld#-5=r_@|-?@F(xTWQOZC@?rNJ7~N36e^ycS2q>w{9uHHVKAG=zBcfmUlVNB zT$2#eTxKuuT{I9j4<)L^{O%XJ@b^m&-u_&>nK|sQ-j5p9NjxhMxG0EVc6-*DCzINd zHx3iQ*CX9>n_ucyUp(rnZ;5;v>Jzy#L^>IAwN!lr-#GV9W8GC8o8yE{SM{6AtCtR;H0voJCJallu_~@b%6BI0jJYoE& zMwq|lrKQ0CP{SeX(ib~6M(?NN>7S+g`zrl6D)?kS#(^d8#?t`O+LJt4wrPrC^f& zHxSJjQe88bsQ;g^3;vi~j}_x3tU_v!s}#e_`3dpskT<#L)?IfeM|ii_ zTrei4_ls&dPq9cpzSwWqssA^`1sM{uLxX#H_KCuxaM&~AW$$N_vGCXJh=3^Sy=TH* zb>!knW_6g4_RI!K5z1wWtBH~E)0y?tPXFSe-XbVY&^4}I^Rc~nt+Snr1yrFr2kEPU zTJ`+Q3X*wfH|zewqdy6r0+CgeaJtTNfEkLrAIP8dZ)r?>zN$SD$3$3uyfNRxo>ktx z??4f{51UF1e_jH2)h7Lz`pzqe6BxCU-T!nSB@r}_>&0` z{VQ_geCCHjaPv!hYkGXTR$IR|e$BfpeKc}3*Rz~xslEKC!eyz#FaItT3k@#l?$W?3 zsf-~woEg5Ci_%`MbKSI~l+RpsI+280C0TS(Qu8W;9qKi&!YsxES4Ar@)`YDCv`Vq% zCcC#)`%n8usgp!-{$rXs?ky?LdAC4j+5IQ&Qa_!DVS2!CRVO=Kz+ay}s;$X1G|iVC zjuv(pb&K?=zr>q$b=@cc50Wkn3eK)vb9Qxg?rbHpDcWhEkStRHECsp)5gQ8Zm3&j5k~E+8;c@QeMi5plAfThk8;qE zR_y%HSpli&lWNXYz}5+da+h^O2%pLE8qM%!I)d84&tr0ug(}jaQgS;k{YItUi+lJ9 z>vbKXZSaizI7>}Vk6M~AqR5IYcZN{mQU&r&WMPZCf zA9ienZ3YLyjtJbkaQSd{OM_w9PZ2i39omt9AEj1N-0Dmhbr5PrJajVLNrfF5q?nP} z73~%}!i9_i>3cDW%hrMXUgYEn2dqvz4 z(-+u3l6nr)^6tvDSyaVJr)*%mZaqO2&w=Q=8#s8UE8H&h4B{B{nTa-i;2BPU(;gdr z2~H5tgL3zM`Woyv2$a|kFzZV!yvKS50vT|Y6Yu@2^_4}ZiS+}EWzj1tStCl%c6}85D0MJ?<3@N=>&EhO0 zv<(953X)QYW@;6=!E2Tm?-C#+F=s2oCs8kVJUn4>%$n(BvH5@a)D+?|fIg7&Ge}uv%mQIin`Zu|de!*%=`oU6JY(Ln|S~ak#$KCoa>3m3{-;sbVa3!KW z<*A_bJS=54(S4+7jn_vaS2$m(Xe|c|0blT-(OmclYBCo$GZQ?x2G_zetwd2Ovx6a# zwI7rZLyAOVsLluxJ~Qk6)|3RU{fI)vvoZ@1Y+EEuJ=68OMeyOIEY}1qw#KIaq&qQ` zSUKRz-vueoH(sR8_p=~R0peCaCw|PRNad_uU~7FyqhQ`NxJP`zWQ6<11i_GupRl&< z(LC_Tz3@_E&m@&$QFV0(*-C1Qg8%y!OvHTx%h+HxSm4Qt<71nK|C3(l|bx&{4Q-<$o(3U~jo4 zdh!NsKy{RGj=y_D`pOaW3kDI;qrM!*SE}($W3=2Ip52z1prhSVJ!_mi9vVGWS)!_Rxcq5q zE;luJJgc92wwfrd4BgjgpTyB2Y;R1O!Mhd3$L4{hxn4i+O9Z~fzrxut8s*HO>GHJA zAAKwk3Ezt^L%MkUd<42gACv{s%_avO4%^c=j(Ao!=u_U_T0p+d%HFcIn^Xm*c;Cfe zZOo-#tw(Yj-(P4wRqR9E?#xS83)49SS1pdGvx3eGRv&QdOcb9M6IJDhld~lbRY(^& zw2M}=)0J+|NwK)IqZX6aTR2jo1js=>adI&4E`Rbf=sLf*yz<)xV5->s+(EZN|7*(a zk&b%&V{p|_gq}iPaHtA{N4s#5sI=KGg3kpU?ja!0o- zj_{Z{059v!6YrXF)#h{BZF2S<&3BGI8y+oyZ!oIdQC-tFE1{9xV0-DF31WN8#a}eT z*1wi@_lM4xs`5Kdc;>5!gF(9{5gY5Rz^VDvH>rRi6=C~UqOvBHu zSkt`^I-x^d;w!`S`H%aI^<{lmi2LuV;ES)%Kb9c64&d>3b0(toZX6ilmXz@n1jl@Y z|CWJwyee31FsX0_>hdFhzS&?P&72A2%l3X99lgEw?h6?bVU6*bZ=&qGf|L}6+3 z!~2mU^o%*V;IyHpD&yL?Dfx=6GbeZzU`}GhhAoH$WLqGe(xfG&d5`O?ew`l_OT{fF zP<{d1ZJb!=$4R}G09;z0cuS2%6y@l6V?VipThz&+-SxvE9zw z?Ao#J_NSw-Z)k7QZW?FHp6Q?F`4DnTO3e3rQw3tG*)BRlnENf4Kr38_E{hqePPBjK zf4r6s@@Tu@ULn6?2pDs7qnZV&u29h_O|LI|27CLgjVSD+4I0@ev>s9DzULR;r8AfD z;ph19m&5)g{JXYPjX;yF{{;Fjm1zOZ1C1@&XVT#mx#h0TLu4BWSQZx`W@z0nWzv`$ zdl5at9|=23z`d}zS)^m_6utmP-m51CFXx$aOj)Eh9^n;et+yxe z)$os~>gg}aw^|#;UdJTQr~5l=Q6vgJJw$HM?YV5798NQe!Qw0`x>LreSQ0l3f+xSR zDq+01{zP^8?Vq4+oipQf=Czx(v9Wf_-47te3>N%*swT#}Tc0m=zXonuWN`*W-Z12_frorX_3WWjkGr5s(jr{izm-;W~bZf--l^~{apOKHKU@IMFsBA~je zzFD;OnRz*`k!`lj^5I7YFrGLNi{_-JxJ+xZwtwqRyh(ESorn~7h+>-HWwz~G9#x>j zD}kg^KpnGF{f8F`c+kr$DjQ0KemQ-UZdm!(03Q-{DjJiQJ6xHD!5{SGJzK~k111rP zi6HRnE!-A=Ixbr6$7NLH$a2Kp9MH+tRwQ~U;goDCg`!2ryA0?}+L(#t1{m&#^%nTP zN#&Z9ZIq?*aDCIzrQ$mclI^;2W%4|Ixy$oBiL2yLPA6#)ycG5UEy6A;#Q4Cf=x>an zWG-JQ)kS~Z_i~D4k%5^|d3fEB6!$@CIvOtw7oiMkpwX5vrx52NIUs7kOH^FgRC{sV z>d8bZyxz1>YJgH6(4Or*Vw`yVNk9>`P}2XWT6ooa;n(A1*(D3eLG-gKV06SQ|RkK;zf|8_U<=AYyGj$Fs4)m_|3`i%&lm|0HBm zy&wRz$=l{PV0a(#=e-8@B@6=g;rp>cOMCvm3zzPz@{BmqG+-pWpLT!*+8x+7{&CpJ z-e|TFk5hb-7;nNpCt8^xa>|5w+BTVH;tXO5$1Y%%u0u~fwIKH#*}{ogi!ylMEKWAb zl;7>kjxiIx9q(YlJ`fSmtzKWBRnk@j$nb0ZNPA)DfW^R^t_j?5-@-^6Ft=VF=t37N zl7FZVb?px80!vB0EBPfmDkK-A9{uN;-o>mSa84GJ4u+Zyy?Yos_|6n_rNqHDck(bT z;a-dMFBbkHk~0^K=+QmdRUs61<}gO~oNts#ZX5(X#Tv-bdctIMpFt2f`k&h=fs@2v zVA+wQMZT#k*%j?L9rrzUWpY)1W`~XRIU3ozs&1%5y>TeqB$lk#IJ3tpB?DE4nre{9 z#vS}#TrjsTUP-Edd3u+;TTXJB3;szMMy%ShrzE4S(qw}y1`Ua~?V>sj{#)^SbR|1m zo_6-~VCTY~^<6w786QvcrYighQ%9o!WnKFBVAiD9?KB;QrWCTNd6s6dwJ0FyH3{wT zUwrYPR?UU!*x-OqhyXu^yrF!*)k4~Ja9EzYU1`uwHd-_^Y&GZq;pr+EqTISJsUV7k zfRr>yOLwSrcf-)#T|+7zBHi8H&5(k0Hw@j~F$3Rl?|Z*rFwZ%2&e?mfz4lr)x1?XI za+=SwC{XCh4tp@_LFB)rf3YV&R!_~;8Ov4+rum8LcVS()8n$+|yh{}vA{Tg_8J=+O zHGnMw^NI0K!aTc!sAcksUaO4GEJzpI zurM9-9jPJjDyIo=Blpa)_pwax9}gk?Na9+eF=F6nh+gat(_to192sTH zoFB>8QE2BDNAT>5JCzYGIRwvzONM~8!XqzQwuPhGj!>!AWg@BZsSC zEU0aF$L%uqHH+Rqz-9$Y;mK9Uc`&9W9aSv)JHkef)!gzzAsrgyZ=-1+;{J8)d2OH$ z*U4g0z7fsujk2Z(B`NX#FoTS`2P`KhGnVRiwR|hqlV(B5Cr0h-56Xm5)&_qRGn@IF z%!*fEJ@~4scay+&Jda`x(?=W39`$rmLHHR|Pqyyb)WW>7SP7F{vx5-M?|L2>V4PiS+L+#*N7>q1H)gPq(+^4<;r0FeY3yZCZr8<11u0?m{b5jITp4JD5%R%b zUzXHs=i|N4+>roQarVS}7@&cKdc~wx}i56mdpO6cqsU%Vjs^|KA7sd3O2kaVGBFQiFUQX=+ YPa#MRmA<*>UCsl zgSyt!al8Nz#ud&Zxj<45EQ zCqL0a_$?aP9wAg6iB%IPv>y?h|5eCH64M+CiO{K1x=ey)1F(-jh&5oJwdkLAiXpGi zbc|qxE!-003=3u&#ZC46x2o1$axc6@tW=eH*wqLe`jis7Xs#u@qzZ z4(!AnpPCF4-9S5or2_Lfbseio_8)|hUL|8@qBX3ye(}kWf((n4Y4VyN;rf^X)n#GS z-uO>K_(EL2kl13(){GS?8)@MG=B(#3ZeDgTh9AEx%~yX3(5N@cyeqncZDvKXr}Nc(JlLgC+L2B!bRGg zI8zKB9uEO)Ml*-ssh(bS+KkEC^pEL;j&YxE9mzx9z1cDkAxN2z)QHb3Qv2+JfN7l>R?P?%0& z#ZgKlXH_M@ij{2qp~$sQrTwec?LUARA>8nqN$cKIv!Bat5%(oZ2XCWFMzOR%XHnAl z+XsFKU88_6KXSIEXUGYi3J-}=$1*7yQ>3X+@xCFKb+`E>z}=71X=E3;IgTQ6;(qVD zR9v5g6CPTloNhn*cY`UFuT~WM8Lh8ZOHhb``}-l`C^kyPJ73OunkCC@Tpj5bBM=GP zmq_dh?5S=%x}&m@SYY|SovIhh&=~m`!VXV7qM6Kk86CC~C*fn<%b*qT`^t84mhD|s z7N13sCyPqH{xXV)`^~Q|Id)WutFinpOEcd3sj08MyXJ!NEr+SdNw8{l(#~}xYL7BQ zvm*nyd?5!BZ^U;)@Zl=cmWrUH4m4NFfL0=-9`2*_n#vnNZG8&BY^cUSJ@|C+s->ZB zc>pwYZR{|YB3KBrxTi$xu!VZyf@Qk!wE$d7F+9(NCcjU{*aca0v+!UqjXTvE1*D;) z8>P4UI`%o&_Yuss$IL97%f*?Xyx4~u2i7cC%k}d^gb&?em`E>I?R~2?oI_O=ETU!1 zxY!w>+4A=cUch3bQ*U->{y zgQ^5Vm5@66?1pS|txRo-puvY~KJkL&jpL`)Yl{bp-0OPUOZD*^Ba{N%li)$gJLd!G z0EV!Fnc^rY5nAD=^~JUUw2rZp_PlDa=i-1lX?VMiY>wOzhZo3^f%dnCebB6Wk%%3i zOg{ghm35P`sNILFmyw1_*bxL+a{c&pU}wH%DVLcYJ7Zyaa$P)^?xw^HUOe{%B&kQB zO0b)So>&v>4Z|vY&(!2_MLoD$fOr4qZmT;)!b=Uoq6ry))(<_Po5W4zUD_G{+T?P!bh z*EDZ(q)n=$YzT1mYPf)DPbjyh>L_e^a9rIHi;-8wG^!yxf>5n*g{1!8#+`Q@^ zGgM5PwTdB|ILY)@=Ymf>7~T|e0)Kh8W}G}iowQ|v47I5pR(fCOL{WRxgqmiA2qXD} zByXA};+C4}hsK|ns?4FwJ13#S73`j$Culkez<-0)Vtdg-c0v}N7b3Jf?}g`dS-!sK zx<4!M>+#8!(Qgi+rLuY@dPdB3rM0+GSJjUeP;w?<%JG*JCgjykJt@R>*?NF|hwXr% zONhT;B(itK)%pAgkI(pzy} z*2<|ZToSGTJb^e9?@t;OMY-2>{__O$1FN$gcelndQgf!3r}LSOXBh@J)DId66rmuR zR3Z81T~pw($M$pTO=062;*rGYeO_-IhO*JF)}IA^pu_$1d2y{eeq@D!EDi{$(4Y5l zvK~i~-UQ0hG>G9>szMzh&l-Zuk-eJ$lt#O|Pu(_U+l%k*`*k^6?uu$yZddGBsDQa0 zGdCyo`X8n}hUNNS1r^63KI)!w|* zYm&4Gc|S|@*RgY^%w&@+pXCW=Z2X`TTljD$3 z|HOC@p2{@-(mi1v1L`W_uD zwZ$Bm>IO_-aPgJ5kYZU6h5Wc*k-i;;91~$Q&bUC`s`Konz`&ePd08BGm z!Kjxl(-&2W6W1^j8rTNIE%-6Muzww*{rGOP=!8c_nBL;gDuv3&_p$QbJZ*Kpq2eDI z6~nEa&&zHiTBN;7IIG|2hvRSp8{QPNzD{pBb{BXU z;-Z;$OMUQBHa(wRQ&VD&V|;{5lx@qb$z%!8kIbx{p7G|5^HI)bdY(n1%K%ec>v*gO z;_+)(0qTsH1=>}%wRd(0sHCB4?|8qby-%a)W>%w}$WSUY=}H7~-H`w&Os@GWw8cH! zdcHv0J82#N!0o_+(KiI#8|D$cWn7mXSX#4&6S6j&kHq86&l8 z36M333TWCJd_6cvzmbby&PUScO#f8m?Q6yW7IrHU#gmNR(&nollgKbR?!cf7!!4`{ z>EM&|unG&gNn@{Tn^Z$?@I1MBx>9){0-9RRd6_T%5yD~)INMdm_0|j=264Iu=9Jfe$bB5sWMrf(}oz&Q+eQwXfm< z3|{f&TETFK=isgVR*6ZaKVzziyl($Grc;>Nl}JkdNV}qS-|p45{h@LpJDp+*Ce_`f zP2t|tr!ROeMn5mBX-F}(c+Q50;d(Bm8DHlc#7f{sd46Uyj~l=oe~P0U_<+XRrqJK) zq&bh`vR-jE{$@MHAgT1?s6%Z`&DX8CqxtN1F3=`ud?{Fc9Q*ZKA>E0i3n!q?{Rxv+ zt>_%pMm*p|I=S{Cjy;DO&)ogAbT4ML8Ga-knDWTeg~mvP6iy*5uYj3F26^z$XEG_R zQ|bEfB%oC%#;An>+9G00`Rwm@hm&qDb34k5-sgOMpS_Y--=PMdsBGs@&2$%~+_4V9 zk3YrC?UDB7X~G9Qt2L7@;Mw<{&eL%NEJFoewhPW*0{aZpr?zrmK3N)XyvD+AypgB8 zh-IkT`O0s%{y!~%jM2=oysx+Dx%5{Kd4>K@Ig-L^#U2qZrczP*{k0E$4ut6yT{5pP zU^wPVhv`jvHk*PF|&j*d!r!Ae`}Lm+}@bw7lu&?3-lP#=XEsSXxPuvht$q zWshFC1_a-8R0rc8R*bg{y&$FtWqBPnOm~hxXOMPjAd+@+cwPOY+vWAffurt-tbKyu ze@F4{%TcuPrBy8`j<@Fh{&;&*9pUKbI7+vUx2TU6dk3C@>2gzp&f-5ma}wL|48WY7 zLt9r1zwegr9SVY`%PDmO^X%40pyiQEtv7of6ACG;oa>iK5iJd!$QU8M2t9lav)#-A zdZJI7p}&UEtaL8oW+TF90z@;vJTF(>Q$?RLq2Wg#t83{v`9~pQNv=OtD2F?@jdy*c z+})U=4b2O1Deb7_J~)X*^t8whsrP$d(y2Z~=q(BwY41{Y+I>28&Y08x!6Z-N)K6En zRJjlx42YGMp8YW8OchuAB@M+y(*yN<11{C2*5ZCk4X83+Rjrur$u{M-WP@>din*(r zp#Uu?(Gs;`)vUGK((!S4R*L3(U#v51_>R={0J5-Q2slfR<zTj=Pq|+sX6qDGkAdol-X&PNda>#M>M)fg0N5HmRcuDJ@y-gI8T#hHFMM z6HiY4*9z)pby-mdeSSXyH_=$-!Atz}#`Xc83$d)Hd#?Mp^Oo%9 zEmn7!3!l~h?()gO79KC%6ZnehD7r}xR<-Uj}b?lOl&;x<0pL z@onAPjzqqLIDFyIJ^y%S=Xl3vHGU=WBA)seNNBtD7xTWpx+OhAi>AQdyVOy~Of2WW zyP-&#~M2^CqxSJKNf=??V?@O%_XU!L959=2a;Z(?td9SaBFRp{|TTRGr z0)G$Hb0#8wt)cc2`Fof#hK+U@%e#EEFXf9K|7Eyqu4!ul7YJRMi!ZVBT6f2{ilY_8 zyTOM66aTz|FedkUqmwPC;$OYN_H!|UA9rZg9&Z$hL}gGkDY^-8d?q$#+&!|L3A7OG z&p1%GSPloM86iG8*~8B&`=>NDY~syCytxBki0j(S_k!*a5gepQHNO5RJgwHBH>i&a zog$R3vItELNT~@0ldd%As6ChPdPUZT@7F71*O_8-4fBj~*N{zJp`sjWdG)T` zrYasb@wCP_%~6rllDIj~^A<_2=O2}MGwHOpO83U7#Nq%hD@-WO*G59jf+aM}Y75LB z#I=>DNY}b4p9{vUMnsjKd=KWr2n$hCyv;cTYP|$;c7!%u>|*!_%1+z9jio%%(mgZb z&E5?@gVcN-In#H3e#p2aY~E}WG-K`WXB`VR(O-o6OIs`%q+->VJI0QpVuz`_T(?Po2!{!4KIB%2 zfvv8?dc%6=)g_15d6Rw-8nV<9Va5I#Te|puMPFMAG5|NMhmi+=#AWKus0mS4)jk_6 zdx^S*wTngM9g;jbb?)4ZE3oLAA#%t;8$ZKCaHrnSwdj*Wg*5ZnJ1i9wMiv20D@){l zz|Lm#cV!2GvguGw(P~k6D|@pY`mXwfpB(+Ra?#{C1FU(P1}IY`|0c|ybpxk_1(6CcIM=^1Z?naAH-H)6 z43gt|)1bc<8X@qFuJP*y2iN0xdvuJ*vgsZIn^(khk_5Hc5H#?p++)^e1B1Zq%sNfU z1+6QfB@fhtft|sd?gR;BIuo44d%EYEm0sO3c};wCA-($ghr7kcus_w}a8Y8rFBEOq zMd*z2i{82IUcJ8R>JK+x4anDf7KFguy1p*J^3to}W$uM`% z-M`jaJg?>;Axrf(eO|8&S}XOQ+!1mhK>`H*_-&6QvQ}e>2VLnM_{8&Y@f#=(Rqd(A zKshQGtgu?jy^%Vjmz<$y`gnxKFyX!ul-!#lq&h#{%o$L5_5AE8CMI&W{nUHJ`HLee zg0Wz1%ntl<+l2iz}hWue?gaG(V>DM0=kTQ&?Kw647}*kmvxu z@JCj^yqBF!_Q)v(NFxTZQa%%6#RC431*Zir{*hksr$n6cCZM&yVBehAwFRt>1nKo1 z8$p;A#kK7K&hqle;@W!9&;iF}un8XIb=z-F=2Tn`SN+f_)CYCq&qeHu;r-EXrzv;lsI(fN zMyev2ZikrIk8TgQa;#RfZR;%z{o4uO{q=gc&HE)HLScDft`(qBGp8pbelm8}%$Nn^5Uu%YSa zx%)mpM1U^$bb#aYrfd>S9?M!erIQ7`qa3yadX-urJnx3AsyX7EoBeJk4q0t=W} zAXQk0vtP6&pY>;Q20C+i?#7O0jj zxcN1KPxG>L5O%Qwto|Ag;TZSHGpYcBpk6w4XW*r}UXFRpC0^nY!Kt zI=;_=ZX3-${ZZERUJelfTAAX_Ss)*33)wsuYW0l8)EAOCg2s$Yx6DV+52@}1?}Zf> zUO|9SI(7*E%o)QWVI}xQFI*yTJGo>*lE&WU=Wmanodo&*9urLq;X{xfnvo8f{8d$) zr5e7mpgwmb>S*mqNDINbAKoS0yU#y-3)XY-N9-3F^p*S5g^Pq0(+Q`1soiGdMlL5* zvBSXfhLXu2=m;|)TD+{z3P8khWLl=f$!7Ul*KyoX-S zg<+)5=}}1cSdrz*qQ`SU^ZMP%K(7S4jIwIfq|NW?iIr`0Z5&UgH2-+jj%sL7za ztc7>r2X6sH7^mXpM3I`H+@XN>b%V!s8{?HdJ%PyUiXH$#T9;i^6KJe+-PzmNgMf0F^^%|&>BXN>I7Ejd=q{XnF@mXYel zvoC^`Jo0ZIuaUZ>PL`quSC09tG(Ii_9{<(PPAZUlunx3U`$7t-6JxclYb=}CIW>x6 zwS@}aa8;Q5!Zda(kf83G5ze(lHLKEkn>P;%I@V=##?R_}y9X=9Hm@gdXK>hM`-=Ct zVdJ@i6EU2Z-(n%u`&#&>Y$zSw@3sHQrQzKW^i0R{t%M2!xMmdsA^ZpFnqQ>_pW*~{ z+R|3Nd<7Yqe}XFc{pj}EOJqiXcV@#%_NjB;zKx{bFs zxRHX)+|@&0@#2>n>)(tm)CLs@v1+w$=)(`nedvD)#&8H~u z#Nh>^OzGTq4EFX_^p)nx*r)O#ek_h8uzo2wlE~rCjnc{NE>M7XU)ss7^J*bd@yo6# z_CQB!JuOJj_U<8y&{@@{B~()z^XB_G$L`nqNo>?79pvGru)Fw~Y$^cFn-`ww6x zuqGu0SHrUx_n%`5ht!6e)NRL6t_>6hSS(z>8Wd^{lp3AMtxNgid>HL3GS?`T8-9&G z)pESsroH)@38gCZvQ`{)fzruLq*J`>;w23zX9n1NE&N6q59dtJ6SY$D879p&`Gbrk zgMf4p(mC+SbiCB&JlnsN;%)sWHC$q2O!74J1y!TJbAw>@c%irLoM}vktJFFMfYfFB zQX)=xro{%Doy&&z8XoHr(740ut&BuxZ=Grd98IKGwIj&=| zE`I&rwmi**8yGr&AE^7f_dn~}FR140jX_KbGsq?;BD#AqEces)x9I7+w>nE!=Juu5 z1iV?f6+TCG`;VyJ4x($DVY;4z3~)jn<-e|^^H}kn25Sd!_}IVbeK^`l=S8^kf4KSs zv2*rCHVW<;){N~W!LX;v8ey3EhFM(4d!FF=2RM7~@{_|yr>yIyJK3OXaU?@n{TvI4 zZX9S|Ld^7=gi8Tqr(2poZ;_72hW~r$piO!q5afovnZcFGIZ0f-J^5orw1wdD z{>f{IMDZqbm3=sE1WxUC_v7tgf}9d%e*>lpW~wI4id1r z>+Y*%Y-Z___htNf?kv;cWLT-P88@C60uhe#?jK_z`s--KK>PKyHd|UE9bkoAQC}o; zM3rf*isvumo5XNvYx{RMd=v|u}qvs2v+Mu%mu5|Gb%0HOA zVLy|%+wY@PN;$TX7s$gi5G~SE_K>_=$qg`Y*q_Hp0VBB|C6T|xwO(X+Mo2+1_h@^I zse){~$w?(*q9sxqu zGjpf>qCMmUiG9H!ss|Zl93Jgx?%>l)I^e4}D0u|%=jtDx`}9wJ-Nj9ON(RHAbn{qJ z14soVY`DjNjDzh;%4B`xZ_-u1P~JSty!Wmc^5(Z;95zWjhXQLdI&ghHkiE zO5$ep`7}4QnR=?Lr}i`gkkPrTt4HbUbMAUS#Y<+P^E`*&K2+(y(T_AtdmEc7H1Sqm z`Tm=62$N#KNAEQD>o!isN2&^=*47>P5?&X(Wk|gz+|{}Ui4qT~T5taBe91=X0;i*e zteiFBcJOFDn(b2y^VoPjs>AejT7UYSZIIVbR!nM}%oL9GN7`EjUBaF*bR;o{ElpL^ zUdb)pVA?;&w77Tko~O@(ve1*W~>3{j$-{a7-%NDgMq(I;5bwb2y+9{ix_-#_2$?uro?8(&3+% z6ZWgr@HpQl7_NCIfX{c_=_N&Z2N4BvGZLuZ1}|?@35@ zK$T_QmqVLJZQ=;%IZo0(?a>duciA=P?1;%6&BF4lE>z^EukwQ5kgmz<|Je7UC4f!m zUGHq3e#W96;~kG5^cdRyi!^X-wVRE|S*!yh>XiJ!??v3WDM@$W)DfCRPC{2T5hLuP|wDn3JV&S?E^P_XTr&M z<&3)$ov8Hy-<`)K38w0`iB@R~kPxcLNO`Khhw2 zIs>?)-BW6h|k;8Q96uv4#Yzz@9J@ol&m#{43Lcx1YUl1OQFCH95@Hr_AdP;}Zz zX0(o%w0vF$;uDB^^s$Of{s7Y!fNQ1ei_%xM zZBULE-FQM2gGLo*#S5bCz7PPc5|3_&e%23Hsnq=Lf=N%klv!V11aiL9q92y2!wWT? zg$sWXRFlVrZKgP)>Qg(62r$`VFT~tHLZl+3gNKatJKk>v!XEcH{k1|xPl?G&GW{O(f-k~AO1ZF60l1{%PT=yd7Z)G(JK zqOQkz`J4iYk4D49VI4|>9t|=2!Q_>ucYNyHZ6;0t9zFGk(Ye}8)`N$tfLPf1j$T4{ z&GJ+J-(6hGg!yqkdv9DEJRKNiNF=%;lhp}=Ov5|g-;`hE87rC@$NM;z3hGVdY+e+w z&|*lyA#k}`=bGJn1X%jueF94vs$$tJ8;)9y+!7olO>q(mRzzQOl79+b6wEjXry6}G zX88eFC$A<(7^!EH&FoX>sg(2rO{FynYmz4KjLN@1-fgT{61{me;hcT@+SbM4)Bn<_ zBiI%#hE)C4Y)cG)4qbhqF}r?iUNTI7{8Ds3j4kSM8Y2Uhf>W;^u1Y-)D1iQO!24LA4S=Bj7EAN^-cJO1)n3hh95QJqw1 z^{m^0B87y^mv5z`z7`dAUI-USx5LbIM{E%|&P%~?!L-s@z@D9lRjMIwILX!~$_)@KyVebd7I>Yqmd4xS!nA)rQJnJGK`)YN|_S)&^k z-qTzlq)}Bc`1{%MVcZHf@y?9#yu6Q_dC3irP%I^k8&C!x{LOSiK%6VI{)TbOW062& zkqp<^&;XvoWAKsF#w=CRKKQTkg*C{?O2nvx+Qa$y`l#RVf&V*le3(k;v{|-({kjjE zFOs(l#kKV~j7j-&<@R27D5?4$*GjFAWW=>u?)^R0Rwz_VZk43r4N=wsvd;pyUNHr~ zRV;V#!qO!*uSyp!tmTD)62TUz2mGy18(l!MbkIDcaSy%l8=3shM3m3OAmM|K6tSfG{StZ`!)tFg*l(TV+Lis&=BWV?fth!0c$<7!uEx; z)3D=uw_Vys{ou)d@ku^3PLmvDj6_lQQrl%XcS-bkSDc)}vBCLo(R}IMy!E0quub3y zSF>>|gN1J7cx&pho5wLOMDb7z1}#S~fqtW@i>%KGnEl&M*}h&xuhC8@eZm7afBa`v z{~}n}J#V?nv1v>@!e@yRoj~W(FjRUN`@Vd3Q)tRVrPE_sO(V}q(2=Px6wwR0Ecy-X zI=+GrtsyKcE>2$xS(bZHId53Gk#_KE8yzOh0C%2wMEwL;im>2*W&XeoqmfhB{l(bU zJXvbVS~#t#c&N)m|E=;(X%6iA`~F_L9(2=O(3ruK6}$odG;y}rkK468;@xhZI>j0l{6^U;p^eO(UI!{a;!kc6?6bjv`ll`Ox||y?S^??ww9|`%$;oAuml?{{UcB z=mI&qdkzwKMqUu3C)w~bhJ-f)7LZ#*S^1IpP+#!iSpfZ{ho=EMTf#FPuQg(NDl#6- z{5}Dnl&dT~6SqL4wD=><_c*gd=#0+o>kZ%JeX$N5Nn*<4A7w5pH?NZuUL!Bvw5Xy! zKxU_c&W7IIaSeb^vs3RMd@DG}`Q-_Ih|Xtq<^Ge&`Uy{?DN9-QSNzA zW`&7S^m8>&GKYwbl!NlZPGe@)c|tNGh~cpW*&G47n0xx zfr{?*Ncb$y7Eawm@x2fK!ejUdto@ILpkRi?fMv0@qW~F|(emO>6?@*ZZK>Y&(Yo9D zb}{Lj$CVB70b1yTOD)cR3C1}+eN@dbmmnsj``UC6@MhY9(=iq`yAaDMJ{4L<`Q`%q zF(G*TyK$+HyEBDu`~KL~(L|+lzrQ14O@f7f7lW=_TG+{3?Aigr_gAkFy09>&rlPS( z_hu$9${!j&4ILFUZkQ3+fWVps`cw#dU##!>Z2zVh>T=-xpq+rbeD^rjqP4K!hihU; zggS)iA$C}ilohOhxqD7kx$i494__s}MLmj=0 z>k5gS2{Jowv8b#T?I;$NQ_AO(mEEqT{4Sj;f302Rps}E1PFI>s#|!8B&@-q(g)=An znLD6HRhEh4dTM6XLhEjpp1s~aaxq+%$Dw184dW3I`mEj5;hQn~ZFxPB$fvr=YmTUW zarE+z{T3dyGuqaj@p$k$gFo!&?OQYl6&0At2xL0+xX!?_3Lq2@?=NqX&xl) z=b=F*-RqkK4^Mi<;M|kj(tuls>NoBWKQe_t0NY-JkAhidgDA>|p~h}Q=YgS~J9QI@ z;g$vu!?^H7`Y~~{iN5c_C;Z=T|8_Gy$FBY27jo@)(CYo=tb+Xg@jjL5e}JT)@Gy$V z@)1vD%6LfsC-v?_MJy}|>A~}a5{oWEtD=HkV&_0d2UZb4&XuRQ`NH9) zxvVH&$`=d20~StEnYOKMJLk&^m7K{>4quXleHvoY$WnPO!;#V2eUI29#2PO~Cag$K z{hR4{IbOrqXh$Z|Q4N4UEU*Xt7gDcVb+>aMN=>pl$FXhaB3Ha)kS^xN0#rYx_?~!{ z8~_eP(!&CQ6@Dl)qhJcIA$Q=HzI&c9QxKaPGnpYp;ipofj^ zhJMIsK5sOA8}T?YFonH{PImq^;T$>=Tz|#?n(6OOD&uW5jG_J#P&P0_n$GA^CYh!c zTnk61FOP8P79MeInv>+)tvz=x!OS`JW~q}ajk-)DMLrZcfubZ6T`youFJF-raEz+G z)@fJn6dRL&g9tP`*&4cTSASo(e{0KN`B|a4`2FgXtB-w z0UE{;DKdM*CEl#o=^s1)LoNRzlu8~SRMEhLFNSoYfNLa1D~4S6+w^nhW%m~vTNlLN zOc2yoCGmzS*{;O;tG_3(Rlo4^Useslt_sb?OsaZg%Hj3k{Sc+4{G4i=Rs9|;ojB$2 zP_`8(zpLHBkbhZTku8{@nU_u@HMIY2g{`D$Z$(znrp4t@Ozzp6F392AaMRXnJ+|0@`T%xy;d!bfgQ7BoqIcG1OIQmY6$3XA;>t8cki#OI+0xvXMJrxS6LWGS8b?@N_A* z&}%G7ZE~x};)7=PzN_o>iVgfcB}{2b$#*!z?Jv_VB{SW-I)9wr7~>2R6)>>LOm9C2 z>Mv&fV@v>Byhy5_=uyP50tY_h{&JV@)e|`d!WOJIuA;>p=~zQJ6=OFy)y+5uPry2pJXSBf_o{H+^HR`HtAohN7lBb zRNgRFGA*Su?@O2TxioFatz{ok{fjM9Bxv!Bt!##J{3DW5nqS+IouJESV=QU9Fab29 zhi7p)kG$cGL%Qu4bJrDHkrPGShrHD)To2v@wnuNl20MHX@D-pp+{s0iUC8xt?Mmz} z1MP(6Gu}rgUJey|#x_gIs#5xqkSRSpM53x{YN-WsQ{Tr6u;nQF>+gf>Iht6EET zrSD^8gV4U6qaI&GG$5UWgctL5OCun%>Xq@ux-2CDcCq?WIme1bma`atLjSbpaqU|8 zz-T=X!TkM8+M(qo0|kAuOm?!SFBeceu;7Fc7&*wx2n*olG#R~H#@q8cK$|L|s&w$G z#CI7O$VtABo@{B${LLw=_N^+Z%Tx93rEox~q~#I{H3 z$q!DM1Usd?K^Ez08~U2LzSm7eN^_X+C+s5OVl$koSJxBSZh)JK60=B7tAN>{qvEKt z9rejbnb8CMGh7cUfJ{2s-K*Z8Z%8q)5o-KgN?G|5emTA1+&&=Xj#ZeS_GgJ$q?655 z)2W$$K5nq!SfW#-BRinc&j*rzYlY)%cixapsb#pk`i+nNsT50xLBT3%9BaWy3CYma zsGqa$N7%m8Y8@Dx+4~`3}E;=P5#*gdOSSw?&L08C4|Bf*QAieZZhJfWG0RU77^La27 z7sjnB>n@}jEWc>qGViNqYO~)}JDTubIS8fp)^;%lT`vEr&6Ib+)+&9BOpgR@#t6Ix zfvF7g_#7VvuDCLE=pkC7?wn4K1xos?&9J`qKd8n=s<6sW@Lk9CnyFhlZA6X7T3asR zTiH;{gVTK0rVQp8Lu|Hv`N>F~7Eg<4K*noarg3Im0o6|vyW($~oiV*&Cl`~G(&XAw#($v>aqi-ds=9b9?YkwWxbko|DFXe9s&EB%$ z82?GJ?bQlK1|Q*p&$lk1I`{WEFHcN{UlN>E>>6onBa^-Z_Tlq$ATt+hlDt+6-N34) zx@^v@ybYg%8>JS%SLH=IUIg4APk(b0aChMm-I?KOI!EBUXF=?P3`OJCG8!a*;=#?6YNv* zRS|I|Zd3%9@33=W?2NugNibEwGpGYILqF!zkhAV4q*oq?K|!JD<*wI{@#Y;+Cgp2{ z4^N}?ieL*{um^lfFpy_PHbY=-8i7HVzZ8!o&BuxV+L}*jeB`JFiLo)j*Rcpk=OFKmZ6iEQSi)c_!$4d8uv0u} zwI5}ux@!YS4|Gi+hlyWr-<`@569pPQZSLHdS7g?1wYyR2T`MeW!S<)Tsna&hR?%xL&Xiq7^MAz%}^ld9nFuM^KSwr%f?K|?Lel}Mj)yqs*iMu6MJGUiR{%8Ze*q$hx76ZFsjE%Fz|hEtHd2u$PgGY;sZvQ{6;RHhT>0VnGoJ_1n5_K#P>? zVPs^UExdm;(m*p(3?C8mFpxzGZM?K2vsBOA(;%cR z4SE5S$a9`+ZB5-YEz{ntkGMHIx3`0BDd8wf4{-nAju?Hp9Q0j!8b-BuP3KC#bO9Dt z#Bb2rtUqxV_l!K0B#BAucEvHlwda{-)h<~l5|`=c3{inQWV)- zIgEGQ*;A!6GWYoBZa9gOY+-fGwSp)c(VWd{*UPws`QZr4MH!V0CQ9wjHpUE1C-r%$ zSn6pO1@Zobx##Ms)B~1^s#azzbS0N+PIn64;my*hGB>4%o{#wOiT^zrwx}<=B4`%j zTl3~k&rj&$@}TxgzbA6z@4dpYa^3IE&*#(=Hv%H>;!SrGVx6C_kG+TQHBg=^ZErDX zrz~)r0E?g>pO$q+oHgH-Kd!f5%_(wOe7)VzhSrTAalTK3h29WmgdAEU_{4DU=r&LL zd`xZ$sL+^y;FJ62bx?kP@xzstgS}Rh!l?>-3+wAraOKtZ`FAxMV>0=Qg+^p6u0v&_ z&JZoSDVutCnhJ7=E{E-2{lf!n*6-^si=aofRHK8o^;Wo6s&$K({<%CpmI?dZ@J+fv zMWdeYG+PWZa^+RoN@@DqZ?V+Z*)u2xek|6h1Ll&lrz+I{bnR;covTPw;}p(`ym zj&wOoVQ6A+5$~BcMY$%b!cZIU$h1L^%hW6;$p(WDggDECB@+n`ya83L(x#Wja(#bN zHYDOCtu4!?{IRMO5wJDfCeK*hN@?MXsFBYmGMubSPreEE*g_Y^o(r2LU>&f#uj5C+ zjen6P7*g_;lnSQBB)-+n?4;aEu&5!3P7S64EjuWbZ2ufYvQNC2QT;jc3?opcB&4sD zM~fony|?zIt|Q*?2;IWl>F52}T~$ti4%@1OUw*O4#X#P)vB!m6!2XnzW4{*@z#?3q zzep2rtiG_ab#fl;$Y2rkRnf!1vCD=Jw2Gba#^8@%9 z&dDv6;z>Ds<%^1}QSuRQYT&YNS(1VPBQd#x^k`I2o&0Q*q2xEc@#5ZdjETB#<^F#& zh+vo=&IhJ{bC6zS|F{o6RCj2puWC%UluagwscJjIT8OgVsy^B9e#V(QRFl1ZYQm|$ zCrM40{G>kt*X%+C6Q-9M*?ciK?Ztx!nHE@tsO`egVNgeh7%$08-wV&7^(*{I;?*TX=B znJiP-%`3lj`D`k=45G+{&yNJvzFDm+cVlp^i|=o`*wI5c?FSBuR~~ zm6!+mA9CTvGqv5D?M`Dl91?_B&A<9E8FE;tBmIP$mv`1$*z!#X4fUqGbQu!JteC#? z>4Oy|r*E#~Nl9Yc+$VKf*;l2VYWOrRE-ZY6E6Zn(PKSYGmqX{XZQ=$eNAIyJ{Dq0I z2k1Ry$D&qnVJHL6$-A07|p4yl{2BEh_pU(C?px zZXZP8K4{i3yOM&yv|RZ7;WKtg%4_t0Re@eZYf#|56L~YxAcsjF`CsCVzdQA>-`%IY z`St7MwBhcVBbiVOgL`ow7@@pEN?A--UZ}m>ecgnO#b@8$K03R+iE#igSa{Ju0ORT9 zkEDTTGwoDPKebOILBlvwBg4!pkNpDU1hJ@*TCbR}?gOyFnBF|GEfKy7jWlhV(NMy2@yUkmudm4sx)x_ZK%fBkr{io#d6cVyMK*Ue$)I9~m$V@{ zH0!H7+CZqBV*uv3S9Xx=&|dS!vZL9c1YccNsJSYI%9koR+!jkQ213|xM=R?}DY$f9 z2;Ze%CnCgIHyEvzKC4v4&2)>LoVL`bx_Cq4>y-g?S1&i@ar#c<)LV*D;Nyq}0t0-N z$&OA37xYJj*GBPq4;J68Rs`GGIw(!t5?SCUCEZEZaC$NzA{RqM?sxJdC#-M; z?IpJsn4cRr0YyZc^Sm*tX04G4CiYHUkhU6@)|RIMmuO7|?d8ZFn6EsRt0K|2Gc9`lA$~=;U~Exl!z5GVuF|H!>um0jkDMK8QEz3Pcx?$ee0vU`{Mk zZRF1Zm*s|0HMis&(fd7V4IRO$okPeGOjyiY7CO(#q7d`PrtpYozg8DUpciQI-5Q9` z2;2xon1zs&zFSte*b*BWrj@({hcwO5SR|A>p56@MTnx#x*JE#uc-Ftlh zL%eVe2ytlu9k}=N$MRJ!Sb8;;eVfp)(oKZu1Gkd%txrCtQ>_(7B$)hVpZt3h7j#{< zXY?%^21qC)a*Ah(WJ+7V3lqe=kA>}J(gB^(V&&&XyT_^4c4Xf<9Zn$Q$+Qs|Zqqpg zOm4sfjE8UTloh+oQSV~PS|9o$-CFdMA3E8z!GbP53($~NE>o2`L$p>(CHpjC%r3iG z5>r`d&KmA@CD4GdE^Q{GPa!F(R>nc~?!#G+SUdz1@BfDR{pHe<({StVnjYNVnV#Ly zFaO-s1=cB#B)RIX51;Jp#3IU##NA@pESB+ z%-EnevKhiL@yyFZ2a^?8(X-~%BQ&spwFb^&t)$iROPP|EI$B=eg89pnF9rmCX?g8@ zC3APG1}{=>p%5Yi%^? zCm6!CZdmOwzki&UqEoJC^Yv>AS36vk8ZVlC%SaEegV=iH0~1v}6|6FdeCTzdnsYB^ zqFyav%EQ7kJHLwvl@gNZ1yU1cr;@& z+Fjh)+XSA)eGF`?X$45tk2do)mGLVMKxn@+i%t7F(auF~5yNbw6oicB1@P+g`S3Sz zB|sc6-&dP%e8cauBgbmbccV1mK?yS0xZnE}=o~(Xq%zSbb%4LsxpaUMZ6k%oGi*rAMXd6*waC7>1CGH=)Mi=LL3; zUzuhPjQRD8HP~`zD%N<)E{a&%)8pHm`!y)mPw==oSSnW^ah`nOoj;Sy7N425RtZ}< zGcsSBu&E-sA^9EvkbV5P0=hJZt8%eWnf5{3oPz7``86TAR~EDDSG$HotAmW#%9ysn zY}Syys6l^mwNGZg_o+|NTx*+YQ%HOca0V-6?pkZIK3|<6RvnU98yRXuPD@lDFUCH*OQXaGAX-V0cJbDpZwY`8p*` z*xO+MPYSHgN!TGO4gslyynOvm_Sn4}VZ+0<%2$1 z4dCLshkWQR*D(34mgiq6G7bOq!Em8_#|NifxmicLzM;{qEzZf!KOPBa3$CEbX6P}& z)NN9=A0!i`@>TB~$S-4v2K&%%5zIVum!@d+Fx9a4#D%JMK}5a0rJY&xs@T(`SC2Id z3Lk?QvBf>}Rg|B)y08%Sp6%b#P=U$d zat!5izrn`6y?F7FL|xXEC6PH8jq|lpj|=C_HTu5#9PI|_-~>F$_AK$?RX`B7jnyzf za&gyTnFDdR-~x~Qw=n#C(Hg%ph2JQnK6nQhSS8mOs^bpGkohTa0in8TyozOA>h!CVW&4nrH6=^=uRgFC#T;(!>G zZVk7_o-x1ZA$I_0-P+afXL9urNL0==tAw*z2uwpD79>4QgiO2{`W$#;`$9!D`8=5De6(Ie;eM$bh? zf?3OltPb#QE?^p%U|3sA<I8n76QBEpID4ZS6z%z_nJz`)J#8VH8v;{M6lUk#p@Ai(N!Fm~2!C!MtK2 zG;S#(e%43mw>8Xf@gPSKHBHramYw$Ems>a4zyYq*@P)q*i>syq7t;^}NV|pjGoE$W z<%z=cU5{js?_z>9-W_tB*`P3$K2gYWWQv69o^>=(33q8N;*HvU=NZ`@?4G*&2|QF| zKzyQ5us5YHlsTGGf6+(r*?yX;0T;UL4+1yPjV1GSBS$Rxl$n{8r8|xhj{k0Z;PF8bR-aKs zq%5)orv&JEa~eqk6S{!k`j257Hz@9lU$QGaFV1{7mcksb4H+U9KT{-IB;NjZv5((q z^Ioc<#BuJWc`5z#9k2v4M4vZ^cIA83g|81HBAps>z2eoMCQjoqA`+i_iq#yMglf+m(3XQZ!;t5wq~@Zle#wrHMBSt6b{| zG@y^C@vWnHB3oZ8jcx$D-zXtoO?NfI#w5g9Q(W&!vr;a)gBkuf4Pw%7v#7_n@qyl7 zTdA;1I8a%fpdz0Ygs68Sf8n(Bo8Xn{5t~ovy*IV<4AZjToIKty7N9T@+)d)0HgnKq z45z#@O_hs+b|l{=Jc|)5DVhFmd?bn&J-W735dutZ^I1V#ga=}BeF!)(nzQKuZj}Fd zt_?3YwUk8n!E8lM}i}7P1M0BKRbJ4mhFtt-Q^_K{EzK>R_ zy=@L&FfzNW=LZkSaICgZ5$Z$Zs*8`nND&0L=ZVh0Tb~BR;a44{g2Ylge*Ug+Wnj2L z#kjhNv&Sj*MogJVLU)|1_ZugI?}qlbx(0r})5{vmhf}<*iLBJBv1IDD((tY>mXAoT zZ+l$`K_B^ZE|vx3m4)V!i4nu3$W2%$1Eh5h@hrYlZ74vW#l@N+AnmagQt85sNgf1* zc1%o=PvPKwk~H-oI9Ab+)t&;qU)T0^tJ%; z&8$C7${n|Fc)TgCM4RPTtofB>*FsLKtqF$)aPKs?(?q@ZQC z!v2;!ZO94KUdwOj(0|S1LFk(qI=t{Wh}lU$q1874k}F8?C2prTLv{IEk3aQs*GW7X z`YiEkp?qt5(q)$%ZS|zNGzC8^?DiPrdjYy9_Kit^B)s}4I`OD^LMYhZ0n7g_C|Pr& z*I_ItxDXvAIxIY~X?(QXL>0YqYrbHJ9l44}6JzNnA6_eWXXLyS17u3oxyqgNhMORO zp`A_YfVaKCw;)+B1z;RWeyH5~=ieqOLx$IOt_R;Ze5R;?v-pVTu*P3mL$k!{p~B@S zuJILgU2s~4_9efUQA!RodV(;TRy8XZaU+cl7A&IWazlmiR&2DYzvsFz>@KkDfC?oS z0EF69a9OBR20p3`$~qxaRUCc$ljs?g8HhT;cu~b`k^23PhHs0#;pCoh>Nm_)gsj*@ z39a!`Hn^$o$_Gxy5a}&b$53CjFdcw_@+#MK)TzN zuI)M^CiGXF_brGI!8qYnf-5yHLfmK5NQf{qn(iauciyVYAo(+rTEQC<(pA6zcw^Dt-m;{af{< zH^IPz23Dv2gj@j3{ma7fGGY(QNF)#JH(db0BNI9nPWLk7?D(>E!Gj(eDWS@57=4R} z{;fbDP*ZTgxI%|c>`dUCO9%prig_Sztuv`R^W@=r@O>1EoH}Xzs))M=`a6<*@MlnF z5u;S=!c(gce!XA*^dBBV95C*#XIsBL>rH}^d4X92nC<;R=cl)c2czUTr>STbUw3#g z!{j@Y{!SrjBT#s2iplA{ZEsDiN#jv8m$#^xm^v3T-6|;Wb?#h;u+IR?sr_N?_kU|* zkl|7Ww4^r}7B6<9u|^Yh{21-K-R#SJ#~yO7xLl5~DdMrlg3!awCSoW9g(AAe8g zfovfIS%ki>qg4gotUz!!To1G1sd2(uKiK=qGVQeXW;p(mhNl6H`R*k0`}Rf|f)JQ~ z#be~wu%=s6JP>qADp3YEc*B1W^WW7#aN0zm7;bsA)Nufth5#1dB?!7p%_Ut|9!fS! ze$q<0eZA;PB1Tef-;lzq|4YNZ?^Ae92onKd%Nt zpF)SF$B&(qdVh?nU5LLp2!F1B`s6R3vid(pK&p?Fq>DdXB|BH?t&$a(%>A^)bwz_S z_6*%=K0{5$K8ISK&GcFg*!&$!nT6?1R&*7t^)_MBYm>=4kXjh`7>ADBbD|ewpL7NjuRC> zuJ;pg!?IF_N5%ujd9rFAC-MSRb8|e{ZFquK$g|+COLIti;mxnX$j0MvHd%XNOsj^ybyn5J>G1gpp?#ict zM|Bs3G{c-MMmkL^`Kr0^xI2 zSNxLlwN+VCnTs~^VRb6uVL4jF>h)d|7!ihieK0*0f8~2=PfV5k^W$kr*y8P}$Tg3) zC<*na>}2Iv+lDW-0v!3k05L`Zwl7zQN&m39e z<^T7Z0I~@eAUF&SwJ@;HWKxO_D|0H+$s$YnLm6iNisNVnGDuASRTUsw82#T`>A&O; z_@;v>Tq#q$E(E-~u%JMRy9r|}rTmsNnbRSbK7sjMo@9))$AQ`P8$-ay6392a(>FE$ zZ!b|Lu!!6H>TuI62<=y*WNI_aX|g+99OeI*!+&;B&YQPuI7QwY)AG}QY~No>@X~>` z#HI3jef!_E{eOS?2mmKn)~T`g|M#vpE&IQ{@!!Pb{|~)^$CV@;FtIaNt{*_J_~s%0 zQ;0urnxG5}4V|$!)4oA`0v>79+Zl~H2E$8?L%P@rw z^?!25n^ppqHJvA?{NsPP4Y)dd2Wsr0&xfJ^f9O2$;}qGOPZ@~&9~bPuuIcjTeI_gD z(!&czbO#fuq0F9LrO0@f>M5|Q#F+7fQ=IRGSz~|Pdo@r*RAsQuQpJ&ptvT5;pWUxu znepmjfwZkX`vX&xz3^6ir&)~QTXmXOJ(am~u)wZI;eMs#O8UIEd0Nb2YX*(dX;v`z z{Zx8ObI|40a)$HOEPU)_1=H%p`PXF9QmIXen5UsbxoqRp&mds44 z1^Lk$sq3_nTg#DN!xJV2F}vJmq1CFUKo`S%kn_dU+QFT%)g8**ZHn5lh#YLL%QND& zp{k$KlMBwL6K0pKS=)ebvM!Gld3JWY=gp5n7vsJJ3XHBe!zy<~?pjTW^b5CX(!&Sx zY4?`Du8C2yHN|GD%CpIMw+s%vN1RiQ6zj zF@V0@uzN3^dJj6R-Xc7F>yu!m%(YrD;c#WT&YAGq?XuvKhj=|?mf1>DmkRX6Dq(kI zw-uFTNnS3>qUF`OGg{s3{Y#Er>vLPS>FcjuZ69G=+;>Dwo1{#eJ`5;s&bOM?rFO1T zWh<|cW(Xa)n(p@JOf|YP_vcQnHWk_V-?S_`7A-bkx;+hZjX4(CwZ^r?p;|qKek?rA z*0KmbC3<{rq`Q86HasQ#+8cL55q-Ki30TQRQ<}|fcb?Z`fZKVD1M*v9o8cZCeya+A4?hxtkcKbSUwLpu;jFJBkhwYsn?#W z*gh1OKM~hID?vPj#ynQkIzDPeK2%)a0(MSa#TR;A4!>!StOo5IxvjK%yOY4ZYR;q2 z-#-s_4jE~sP|W3ngT*S>vZG@Q2#2;sI9y$HSdkqECYn4Y*DI(t1#78Q#qB)`y#Yhy z9hb@4pGv7JG`QgmvpGeGK9iPcoqc}ZrJ&eXh&g4XKDzT?S#(YaO)a=-?lW3?;CkdL zo>%89e*Yn7(F$F()2b<>co4w)~oby;>BDzYMBt6EK&sdh9`I-q68VGR)G=Z)M&(>0!y8 zCZw?>AjR*iA;DUY{pkiqm9+@mvi&VKW0m_mDt9AhcQZ3}G(YX(1ffGmmW;*T6}}$~ zVdEJXr(>wsx7FW34;961o=) z-5DI(ECnWolAwkN9cO=VvrY?!U?8|X<953kA>N9|T!2}K_N#T&_Ns6Acm->m7=4g! zYgU^ov|0Ac$@JbpC9hfLqjC+Ec2vM^>!|PP^X1u81>sWPlHaJU@2VK2ZFO<}V+rZJ z3ehr0c(VYY@QamUkMdlCGg2lRO-}^`9Ci5dlJ@7!nosgXi&XMLGFk^k&DDxq>y5>L z^H~9uJR4@nj5+Li_3R7ro8Fl2`w;DQCn4I{Y?|&XKIL-PWlIHl;%pbq!P_opBMWnp z3da}|k?RFjQaXp*l)1zG07oGJtZ&OtAJf{>y}lmu8088HYJ}*=GHwHXY(u39iFRu8 zQ0!%_DDqGf4)oc1Atg*tvH?SyzSCy@@5!rL?H;!PCM9G6i-5K$c;e60y6iXs;Fii$ z_^*Q8MSNr`QsW5`NVpvzXkcweqd~|CN>-KHd1W;d2R0bfs#R&$pDl>aCdp2Hb5yLl zQP7JcobW86yan`VJtR534U<*}Box2ECx;M03xw0^Ggg(Q$y%!|L>!*To3-{FqEYBT zCx`fn2!D2d&5C%M(uJMn#y&Azue;9;{v8GVi);xJ+u)g*@NL^+tE`5p~EB*mQ~i?Si!w$ zAV%m_HbGi_%nx%#sFOxdPPrZu{f@Es2D46|ueTX}>h=mu$$L>^;8D5hq05oiF^tMO zzD744-E7(B=k!V|!o`p(6rQc=Pa@(nfKi}{2)I^a#k@0;H3S6j#)c4c(4oi>KdyaK zqF#oBO*q(h)uZKA@mwDo^XEFe>DZvS6tJAhqk9e3Ylb67iIj^%Kw@|g@1tT}SX2N9 zDPf4XMu#jT873gPVaynrGvQ{-R9>o9>q|xX@qS6y6gHqx;dFk<)G0X0uL=UZ zhx@L~I8GW>(<-y^lRO&-!40&9>E1kSa&;|v5F#A2XO~}ox?S>_ZRg5B4BAm7$BkT{ zvaJAIr?)RU5{lQ9Y&!C~)u%_4uz?@56e;SpiQTCbwxlCXd_}NkcC65_qggh`dU@7v+>s-S=e=YMGdgaDY_De&HT@LrkO15-`~y3> zZBeC^rby2r?t5%cUbacA_f+4KXnss%h8WmP+eLe8+$g!8xOZKkodGJ5!5DXNy4XT6 zGkvq?eI%qEiE@}V+C|haBJSq&p|Xtmyn;4&@?;ZT%no?l^%$|1gKI-Y*&I6M&V`%w z$@W!3_t9wpAIE#yla1`W@B}0EWtFIW<~3)knRoXBSWw)~#>`|g#VF2)SFaf;XLJO^ zo8}3>CpG=(9qKBe<9*qIKl`z%b35SS7F*nzNh8&Ei;HEf>}J(C@WU^>GTCpD-&*!* z1*1F&KeHwxC>%efQ9%H4YIapJt-5}<{6$;Drd~McjBQ+xSg2m3U~gRh)Zpo+n>BC0 z2KL8*rfZJ-A_1w%$1+#iLc3tB{CL5@mVWkoE86qs)Z1$jvSL%sO z8fpWDgmDrm%N_;B`o6K-_R0naEg><5vpiVFMF>gDq$*?`o>9bT2SNQ~tEZfXnSBcV z9u6GsH-NpWX6(q|E0liT6XvIVr$RpK#TD{sv>c7Cqp-|o0oR}*D(${u@o`MP$&025l-Z@Vc=?%vo_c{=*w3HtAC-9zNG*GocT zC{{}}Z58;+;pJNK0Tj*8hMdqN{Dda6!B#oj$H1h+0=Q*a5416 zZf{f%N%vNEXhEuGJ4Tg*F6>;-*IV3F_=(qQ8Gf!Epz;z(^Hdk)nL0;GLsh{nix=9t z9d{8Mpl_Zxs>;?i)I0FX+O7f2vkb?$87XH3M?1{Zqa~ZKQ*7eH*`Z;3d%=1nu0HHg zoAW3JY}bad#(3ITewfzzX=KbO{IRsR|LGGJCF6R8NZNuU_``G1R`NNuY+c~`^g#2D+#Qd{Lq!fX9Z-#2qJSkK`&N{HI z+5V|Q)ZSL+RavL&U;9u^7f3Hn`v@~==m?Isjq>-~=N(beBa-mQOnla^!^O3cBV8c* zcPoiBm^8Rf>I3JUx#~6>v6%YGMS}S5tv*Ea$Nh(Qzv`}z4mAX+TrmPX(9Wr(D2;te z8My4OyLU0_N)+4mZhDh?iTnojGDeR^sDuKJ`z2*VHl*I zC5L5gACta^w8VkoyH>=!`h>}V2EDptzTfqfh~pA;ANKVt|7HX|#i*)#f%TK*BsXyD zfQly|^9p+w@o#q zHMT#|+}s5W_u3h1VEzSo08h8dvfumtj zWuTw`5iKWZPtNtGQ1vLq^Dv{CiT(L(skSzmg~^{{_NLLCo7pRB zM>c5!-_--nFy`a}V%-ddl~r=Dw!5XInexw7p~MdK_fTF%RJa?7da%MHs$MAi{6fV; zsIWy^-j4*?2kaD6H{%V@dY6p|pV@80V+lmETF{Q5nQg%34q@H+zUY+~gKP_we-9&f@jZv6I!Q z;-1Jz7r;a;W70G$0Cmouitd#?VO~i$JR`o|Pi9;nQO)i4M$L^zGHp!xp62wp<1p+) zc1YpWRWn$EJ>nU2%;1b!gKDs_I+T`lDw&6;k*|4os%PDI^~uPjfz=GDdyhQauwMCI zJ&z3)m!B6BMf0Ahv;FrDp+@9^3k0}*^K&D#$H5_rWJ?qlcLq)MGqa;=5l$ao>S@OmTX4u|H#aoR|$j-34yCYMVdUS zk)&Db(fQ>%f%ZVl1_*j@*XSN{_zYA!@(+t$2U|yx!z4@nJub>$bvwgyJf{8n_op#j zDcWp|Bo|iVXRbZwN1O50jx9VfCKn8M9d!r%C%@z2nWtPx(S=uBunob;wyP-1w1dc? z3k$aCpUNf`$+SV(5r2y!KxI%>E155-Lq>Fubda!h2u$5VWLARfoh%&^aoScjFCEj{>oE2qp6C(&xLqmp#b8&PXTwW5S3oB5 z{K-l4M_WM24{NcK870@7JmW>BEha(TX@G=o8HP<_ys15RvL4JSMAMy9{N7l8_4Qn% zC5dSdO;>@Swk>ZC{t90<`!81%4mQN|o)6}$o)ARmTfASklvO;q@cRJgvLoA-Ar`+r zCZLBRR@txqqrl^`-5C=5QnX6?OKe zE5W@xt`;6&!aTpc=<1;od5}~5I$b~9Wyd-Gpx9BSUWVP8Brf7`GEf&}?Aj)n2 zvguT_!e1i@u73jRb`N`YRse~ZHmma@=@(Fd>4Kx4v%;5vWt&p?<;z$8GX$j2q)ZX4 zrj2zZ_N0N@i;}^oclQ{N5zepVgoordcPc1Ulc9(5p{0=_*9Q=jBXcTKeXE5BT(4aX zNybe#!kAaczgERZD^`u5_>GbWDF&YN1jON@~Bz)-(MUUxL1tMhn?XN7juwqB)!avba-ztI(Ae4qBJDNW6+ zq+=SXkBJxFQ*P_KpOB%MA+~UT)k<_-$t@{M zQ+s4_I9INsQoYv`uV5m%*dfDdT+9+pZ1T7mKcgDw`WT$N3TpdGW28wSMag=x-OD;r z6y-8!{^X&GnUnmC#j&Ctgl(@0V0#x9NpKMF<5kSH-}zo()lCm)%lF6h=L&$*eQ-qS zY;PHYP3lRP5i;4QNaZVJ+pDBSq>?Z-dUz6^Wxv{4C>f_njdlHV^5HZNVjvcL>1|gz ztu?E>UJmH9Vv*I1kTJ{Gl}VN2w8l)0;)+9BCx2QiDnfz|W2Ev$-F2!6mi>mSfl>2X|-AwK)mD9 zi?#(jUZ5PwVTswGr@{`gdy-eD-d^WHlnHGKzFwT&q*`Z)bXck&Lf?rj6{3ziOB%em0Z7Gh-rC{Ti%L&kf2xYI1&28z2dnA~P5`rfh?+NEPQ~F57l8($=a_nwhG+W-B!rEfS z9Gjx)SPO4AB_ZaReRxdou$jVIvy9YxJg?H-(!)o~0aI+)BMyn|Gu0Bt$HxM>~`f>(HTz8jB*N73;EEd4&!a*O3P_CPeO*rIYH4 z85CB?<6wVZ#Ml&)ED3R>j9&K#j_R(GC#NOB*AGTMXWqv*&!qAF}WsJp%h`b zR&J}FKuKJ3naUGbg17i$nV|F+{*aWO&pfx&UQ*N7zznsu?disd%aMM{s71+$Zwp6* zqqo$D=?XNOR*Yu;>X1D4j+gy;Q-^WU_O0^m$OkAMxKdROxdMqLZ3_J(OZj!niY>X@ z4zn+ea=1C4UB{SQCp4>;3THL3mkWUoHpE6Xb; z?hZ#z0khUD&}EmdEGD*^lHZ|eL5bTS;z5vf!2Wy~BZ02cq?jh@Hc0<(-~Q(k`(zM~ zR!yT`qF?{H=ijessR%l|wW&*_s8RoqcH1Btq=6eWC6$|z|I<@tpvo>6Wh{|{`u{sI ze;d{$X&bScowpJD-%0)b>mCWj-2E{LOJKLbUk?6J1LV!C1PMsc;ZYsofARO9U$2nB zj-GC!0Nr}@Z+-B<;kD3#g~j3Uv3?p=HfE5cwN2e@Mo;tR_(XYF?uXsf&FDN4t%Ijo z^CG`I&dT$m>s{&XxAX=_qV<`7eQ`www!lpU2iy6F3Gmpq>L|$aMipPE(FoBGAXV(R zcg|)A#j_5LBg~3qEj(99MX2CC!SrpJi|R+s@6D$wVL!IyjL*38TMz#0r3XCFZyt|p zQo0ep#{)d1Et^YwUBcGQ*WO`N#OZWEgOWIP)}TCX#@LJK73$^Y+0f@>(D(8>bQCG2 zJCRU9aTTVUBx+x=Og>|N@)Q{fyx!^HBd%3l5orueDsra>5uKS#2|6+OIdb}P-A6Hv z0*kCz#xpoZoqm0N>y01a3W2B`CgNwej?B6*4Ez3+OF_esH0cAzD3>mNm}g&HFgI+V zQ{Af2u64(uiYU6b$&nOZ-e*Z)b=C{)+DsPs%7}7KD39_RynxD{d@tjjQq~fn>fc0) zB=tb0B0UVJKArc0LrbJ(9RiObViCiE>xhy&ugXLE^2;4W8@GXUAamxPV88vHN(zFY z`A41M8$Cfue?O{SN*Hq+r{|dawEG{i3gPA0t5`Hb;Py~}jWA4<@|$p{xKyjc-HNMu z()o*bp^7eg1DOW<8Pz^X@p+GUsaH zI3{G98w*xnc*pIa(4DnA6?C^D<2=f)U5?8em$MM5WL+bDI#lbwrSk$4`B^<(pXYzAewz$occ4fLH~e>g)uBqK@YW)ljv z9tSO2PNo>h#NDFV8GP}8y-^;604+E9!j!hAKupb)7vQU3%&Xt-IA^^P7ZfNHK8qoU;CFA_L zv6`ao0};miKnbcZKXJAdsTz-PLV-HP#DhR7T2S*7=!w#-4YLVkiRT|#iPr4)iqXv` z1gh<`H^bPz_DU3a_6?v(Y9k4wCk+1VkV~Ps(QobX?p6%Ebo-I0qsgwOe%*xyK z?+?%;mVbFSm5$6-+gD!mUjKoV$JNHe=&z}wO@peE?1Co+mTLH8+V<=qhU9qBh%3to zRae?CprDYqXH4ITj0ctxj>GzV?t`rpO3AN=QB*4Me~CF-_{pSa*)MC!=&awLgs!oq z&M&)ZcUPlu_vJ_4cNNJj(GsrdNv~RNh=zky8+rzKqp&$18;{xpH$=%eM)rOaMUn1K zzYzz-m$qM~xw}6zdqbjO{D=wqH_;3dEZ0oIX(ge~Y6v5}GFJ3+MMvz<=4{4{js%W} zKOr+iYt>@b(qMORoYo-oX8u@8h<~Juh?V3ZArQBOF=RT!r7pja6#wp_3~jnsFu)Uk z#YtXjbjKPjYq+iTNhLW{${Y#a1qN) z`XdxY$=k#1M8CR{*4`&bZAwW5?F(kN-(AIsVjBn<>VBq@ug?=L&4|TeFaHdgG_BvKcD*>I|Ol8i1vpFO?M3plxZdRw^^z9ly^2#mV z#Drw0UaW0rWOwW+ki=?aSM{A;$H13*4`l%-9j+c)B;XWIBK2WR?ufj78P2<)O0pnsq(J6W zZc6?gS5Mf)rpAngL2?s5qnA&BK)0jEZ-oKs5iJ66ZstHZB|q%?ZG@sA2zSffX3r ziP%~l$ga@T))lzdSsH1O#EnYC0)(yqC@i~OU{Wx^>DF3>Qb=L-i*!NhkQ33vzpvr1 z5C=8h?^#%6d8@gYh?An}^PqD9EYNCJYm zyFoY>8*@Y8-<1BG#GB~6?@2qq#7 zk#3kdx~(=QObSy!M-cuf*DeWAz~-VBl&SWY()_9Fp)0D*$#0&?s1M)5twKr7Agw`* z@)@U-jzqYiF+K8S=CsSiUv@Fgv?+L4o#6{1Wp<81fypxEz`-^VmBozcwmG#&`ewaj zIL`pAZG^Ib!$f7h9jq1`Dgap{P~1kJHMJ7U<4^5Vl&%S&hIa(5X7{zddN|ORQy@lzc%Yn!wy@hVA zS)19FO5Mp93#%Wi|6GX=5MbnhY_Bw^!@nNOEh?ylKG*XvLR%Pcx327=&;`KZ!M@e9 z>Dq1~f``S((wr#D7j7Cz$ffd>&``i-OXsV*=eu#()ZWrhRCS6LUNrri8ze!~TUL)C z+Nj3mYt=?{<7#2#L8w36G&R&J5giaa$`vq-u zee2|}zzel{j%a5i-A4Y5k)+NVf^o7XE-QOE@ty_iH6AlMt}IEdGTV6O&aKTy@_Yy7tyOXNVON1bpD>Oi;0iByUManmrr zq!g(b)jp~M!e2fiC33XM*sCQvyYzpaF%|}8y;Y05OobMNDsPl zRuE?e!tz%h|G5djy|85&pf7u3G(`1p>ig$D0|$$j=FRE+|JI#rub{XcwqH^}C38T4 Oe_}$?f+hSq-v0}UKv3NP literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/single_0.2alpha.png b/docs/reference/search/aggregations/reducers/images/single_0.2alpha.png new file mode 100644 index 0000000000000000000000000000000000000000..d96cf771743e4e3d23b3c42b1fef98c79ae2b0bb GIT binary patch literal 64198 zcmd3N^M7T_(swwqGnv?$33hDTHYXE16Wf^Bwr$(CZQItHbDw+8J!j58@b1t0VXa=> zT~%Gx^{uL2J5WYS7!Dc}8VCpoPELy?17kCNARraLSSKhJ#r64juKSh2-K2e01G;hIYmRaLZ}S|}jDnF7u(5g~1bzZR zLh^k6F?vHXL8yF%3jPvK2u0`T?{4qs*IeiBX>MoxWvz2d>8S@E<&pJGhux`o_l4Z~avazD7j5em!yJ@2cnT>IWPg z{Hws8lt?FgNJqH8He7%*7FYQPbb!7)Be8nusSKcL@z4-q5I_QecZ|4#vK5ut4q+A9sKgPmD<_UV@mg;}k0s-xMzo~>NRN&I%KQ5&u zx>nl5Gpq6|L|j4KF|hFZ?hM9#dAl&T-bKW<>2KeDyT8J37r_cT0+itiAvzD%IiQi1 z(EWN^PTlDJ|2P$)h4XKl0EreUKM;&*OpwpzTHk zMilAa^yO6dxS|_n7H!kiGVHqv*uDSLa>C0TUhv%{?Rz#qeEr%MrRN4XpsMXQ41NR? z{mD?HB8S~L8I;1CvD?)Q&cRGacw9z!ntO$V!|v9er2{vO*S?*#Tg28_{k={S8mi)@ z6hZq6mZoAY(eOdZ$afTP`HhCS!+sCg{al!?ZemWNO*zj?E9?QiM#=Ml)@?_{hA>>cC94t)*rVOO)C5)TuL_p1P5O` zAi2Sg`}X0t${yE_8=W=XPqf{@4YEs61;x) zNWI@5tRNmQanm2sdOWPM+|9P=$vGz?6E0NCHQ`k#%3RQ@$k*d)(Le zo^$sw_a<{5l+FT32JiJXtmPMq+YQ0;wM!VDFTb46JUUOjA)e_ix^JfjTt#RqJ zs0{?344R_uP&XzkE2#+qdzN0i?LR!|w7OaFk8r*ZhzvZIQo;c7KI5stA5b3yW z;w2(hF)u0rHRzt=N`);%X4d1P^YB=}3w3;I0Om&I?IVUk^}?J0bM8P_0aD)pgU7Ev z0|h~vi`*b*fsf+BWPz#b6qJDs;>BoyI`hPw0K)a^-@rHk_t{`X#**JaNq`Xl4(E-J zE#Qle&-4od*|RRm)R=8!ipf^i3|k#9`~5fvWC4>^P30NWO+2_%-kK9lVL=?3-4 zM~nA7L-l~YCdgY>fm{gnix?z{pPZ0!ri4sg!7I7t41of}Hzd1HvBBT?R(TY&aWm60 zKgn(vNEZmAcf`=pk3fib3{}9J0kd~&(O`u6$!}seK$iI)N7lLnhj-gtKw}0Q@LXq+ zL_9DEEN8Ax224~Q$R7kB(AhFJ!M%dK!gGed`4RkJq1{4P3_s|aSn4uc~LlkZpE5NV*%Kt2OG z`&0%sAD-Ox+9SW^bW!E#)^=tgdF4? zgd6lVh!F&bHz7l+g5&`y48rCi;Hh#O%qQs-mvj~KdLq95oTP#%CBN=<6! zyNEIKhi8Aun$Nz^kj??*S>~pH_r!R>uti_NKt>lsU&6>kuMFu55ejh+F%DtIXvWx2 zJV@M0JV}J7-=*c$!!-0y^h*d$08N-qJf}gREw8ewG^%v0%BfJOx~ZnDwyrR#a<0m& zQmTZnDyzDzI;?!G)~}eY2(8Lq>|H2bAX)@e)>n`Jq_Hme*d%P5M%%7Na^5de`@dQ0OxRbM|FIEJXvsl;64!mr5w!?$dcL; z>zo6RES*4`W&LxTMO&?ASM!N|?|IP@*XM870l&0&FN0lTKrqFS%mJlW!TNvel>rK~rtZ>_g&#eI=? zelw-JAv;DlWSgGMo=iJm%`m09)u!-zi>uXb@i|WeRU=Bn{|@GyyXE~5_zn~p0v9)j zRfNrP{`2*<5ORq97JKh`KH(vA3mCgX4XTBc( zD84>~Lfj!P25BaOcz?dkXMWRe^ho8@GONf7l+AO(JE0xHmynw97MFxGtBK7r^2&mk zf|J)}mxE^X(gcc1nG$ugVcIv{!2Y}~*sY;3l(1(!4_p9_2emsv zCqX7xBn2(?&n@51z#!ryV(Y03(b{Og!2`YZoio(N7?`i^a-4GP(>c@DQ*+Xd=|>z? zHf;Addl=>PtA{~{@nb*6Kc(UfO$?B;3p-FhtqA0)8rY#(Dw)jj-F2SivtF_K?L+N% zkB9Fk?agT+Hdx(tJqe}<`#3E*DL$q=etjGU#|3|lX80<4xF!~$n%a~?`5T~p%!#uVL=j1g)}?z_=v;C& zxi%I#R=Fslc*?>{eY2ix&Aye>wtq)zSaY{I_8#nfcipfdT@6lASaE#*VJ2jrZT4;! zW4?B#cy`?b|8YYNRGWW|KQG`g)VHsvZ#3^9uXitEuTgj{oKy|IuEoO649BS2aC?$RXY?~+dxXT}&X(72x4zFk`GXUKw}m|jJqWDR9Mik>RrGMxPt~n>_iiaqs8`13 z)Q9w|`!4`O?K8L8mnFt?7Vg{77xU}qLOe6>GO7VU4W$}&7!^2mDOLWP-m_|5R?Af7 zQ3YEoT4R{{_$uOR!Mpy|@uW+H5NE!Q7QO@;79Jbz9S|RIVgzB3CG@IfsU$BL zvtYA`H@IJ%Y)bC9j7gJyr*c#~NQ6X;MY%@iZf}4fiSZ^7=daL#6h~@cV6KyqrRdL60uzM(ZRe8C(-G7Ym7!JVzt_3d!c?VU= zkt4AO*BjsthXrjJLLNNAfsOSU@zl2Gpujmqu4IcgmZfx?^b#H_El`I{ida9DQnW33 zDrG5_$@b{<@Q^-zx;qvfl6l|LX%`-h@0^Ep$)(`)THo!2HbXQP*~mhz{iPdqLOEjvtQ zbRg*&I5h6)wqo}n?Q=i_0e;NW*%-Pex+s>?;l-gI&dLvK&LC+8Nj1qDi9=5n&-~Lh zw>LW@fvSazgW>w=-g`THXxz$fKi|dfZ{J#WUji(J+pAqYUWnWXj3R4x{CXyLOrHIB z_V(_cuKDX_+$$(KeA+?-yV~ds^49EeDXMIi|vKPh`V&Y;&hE7JtnRn9x!f zgc$-*9kemXvbZ=M4J~IW_qvftmoddLX&h5NyQY@^cn^I^E-ym2Ok9@|k}8jVj+(_~ zbgF&5O@O$9gTPBOqBJ)#vT(I+-8D0nP}g%=*}e($ChL_3*rhq4t?6HDWMQpxPTAx$ zmyVaOANj3$u4T+dEL%Mc_b8JI|Il)2oy5?`OT{X7aj{;xOzm-bc}74fjRuzw;4+eu zmqZvr9mjn>cplUp+qc<>k0>X&CRiU>Cvq2{5}vnvXX zJiYw_VIBc>gS-o+yJ=g)(gcAL8rwCtRkr0uolq(#zr0!s@EpJoOk2MhGU#=iJrO#Vyz*0MpCY z3+Hexnn7$p5|EoFGC!;lKN=ex?VuN0gVBdd62P|raQ4ViA)7uwMxys#-wJf=CV?;nrJC> zaov{X$mpbJ=Vq?aOq<5~z5K+(5&tYBjAE@eJm!4wysh77NVn%YX`aAR|J*K}be)A; z(qm&C+loVm3=(Gs*OgF!VrM@Sx(dbGFg`%lm&$iv=p+z5ID~z~?m;KUl)>g^ATlT$ ziZ-m0uu{BVZJRFKrT>`p7fDrGh6IQx3>jHedcHz#nix@P6n7{)r}dta^O5R1tdGsj zKCa=}aLRDd*xb-0SGp^(2Q8yzmFH=cS>}*_ECsZnTrVMstMa1QVHwg8`H*phh+O-o z^R?dYXwjGYx$|Euc5M!4?%Hs=rSh8YuU4RYnMT>X(dyTg!trXD>g#ppTzkh8PFt@x z3j3#q{F+#+S)9+#ysz7um5ggaRK$KuWlz~7CML;uyVxDvbU=i9TY3sjAaHrw{lZ1|V~BjAL1hcw@j0CUgXX);tdYenh!%;P z4_EVh)S+j=r1lAzbgcQthNv0h?w8r0fRCX{f!1a6W4nTi1$GSG0qq`nJOH6%UUq{V zn|z2|3Ch5i!dG9+W{B<^LoAjeg=|Cu-wU6)_^PKK8%B-i9pWi*DvBmTE7vRkCw+et zA;mr=Qqw%BJu5Uyark0HQ>tXvWmp%w0N8#oLxOm$dX{_XwEmVwSKQ{E$I6#-@H6N< z7;wnsU%!Hp1^0VD_2Qkh5hGC?66O+)l5+yCHSU@;f?C48E3>#~7ykhz(c?M+&Pq?6Bl7!F+M#_GXEiEXgAu|KP^DEvEWnf>zO!c?u=Rvm{K^g%a zVN(K4_H+x%?q%I9+iKgcy-0jOKs7;4M705P(;J=tiu3iHAdx1~#It~}MM6^4Ex|dd zFEB4zDZMVy0-%Y~3hzed1J_L+l~_iXOCL?Y(0k{_8RRn{+f_KoIvCq& zKEgjrKjadBK0>YSZVs&ECGCyiHwNtzXcP<`G#R|@pi6uaH6wR3WlQIn_<0edQRh+0 zPdF$%hAmdR*i|`uji}V3c3zoCBD!QZ`?Nqeqd1GKyxi>jcgG3xxrZRpfWsEn0Kc?c zZtHyg#gzKdy5Xw!lZl;&QB@u7l;La86^L#Pmz9UrR*ovpa~C@o>8FxM+UH^Lg-=3Y zny?n|k0J(8vk1YE?P#>t2~0zjY_9{u=$Q)%x+wjvC`Ksk(OprMU&*E?@}tJBa_S9w z4A~5hQ^RTjGPwSn}DA(c(S=H`Kyz*r06IbMx zn3gkNZ=~WS9Nf>1-n%;LGY{iYi0CcR*7(jLQ9|TFrv0l~ZN#+YJz}UKN^HwX%IYmB zU(^=0JW#8i4}q8?fLy#jUx3i!L#%*kZV(9o2l5*rVhs5uWrmX*#87{ORSd%PI{Z$= zf@I;(JYjNRWXtJ{lkF$k3#%(w*u9_LGcAbBpj{DPp{P^xX1m{JeYRxtDc8Ry&;8H>lLY^f6Nf7Ews>4em3#`iyte9-A3;VlQ*F4@70?enm3g{^25F13bB+kOEVv|tac1JbG%@T}rL2VfSqXsJX` zb;9CmyX105P76vG8YOlW@L*%Q-wl47FL?B}%x@2PpLnfov2vSqy}XSYpPI7nxjK=K zPv@jyqxf9hGaE7c#^_hOQfqlEbS!vmidBcj?9thd?XkMx%XG?AZdvM45T9~#cMB|Q z4C;xoyz)W-26ToEgoTluY$}+XjMXbrb@|D2&olMizU~}T4d`m!B2%iev%%xN|4RFr zf5cIZhsgKi{2LB&=@Qi4s_(wtgH&+>;pwWGP!$B_aM5Qih%$E~@(tq#7UxtWCx zn&qW_!Z&v^v& zZFH@Tt!#}gE%5)CSLcVNoh>IJ;U9$l`u!iD`i{o`qhw+8U$H&}1pM&^Kub*n_-pQu zp&Wm-vdI`b>YJ$u7@O-`*nH67Vr8P^_)q_Tz4;%-zl~J=-$;5UhQE*e+nfK6&J3&L3059TKbQNEgnfAARZu50bT`1;FDA+SB2TdcRWcxJ`gB$CITK6 zFcR`$u+Js<4PN5lv0|5sMzX<*$4~WcrojtK^-86`l!CInKUWrbDxwvZKtqxz`%1?7 zj`DaT5(7u!zdy4_8(Pyh(kF`24UX(MywKRh+l*he{ycLVt4_7sjzBW>^#c>}{QWwD z^s2TF-%Ayh5-TB^-enN|tp|P$6!sW9+7f7p=VU4lu+0YQzvmF?umnN%eHH2rBIW`4 z?TX~_^tK{i5ghaSo$~)bqwv{~Njq61fS~?y@n{msI2y&aaO$>-gqZK$BIlT^SsqR| zM-wl=LwH>Odbuxk&=Yp2Xgo~IoGQ|~a48W(K=m!=3!jhreI_a-@}a?q@id;Q z60zm`w%{Y>b^-7--KjIi>jWT2TsLwA95DPoIuLW_Hcf2UiM^uhy}OkJZ^2 zRXXh!tTUk5BHU(2K!UD8M2>PKpK))Exr{q09nJcfmb2?Us5{!#&sX}+;8IiBw}|lWJw;uf z$20eZMQX2s6D$;k{21&noL>R>G}Tx6?!h{3CstnFJk0BD1)qzA75i(afB3Yn9oCuvR>ov)(5S)l zGA!h5oQipMW!P7wXj(*ZT5~bi;qfHOcp~9JX%}X+8XK*5 zq^qu(`rULuXmoOux-y#nkneok1yxLZO*kM)X!*NU^uH|1pC_=yxKL=f++khLVxc>| zrTgkwGGfzBcgJs6knWN>Tx_p`VUfl>3@IkMtgRb5M_+TUq+a9mb&{*Wk}2`wer?qTG8Mn!u=HX`Mr4nAm%|ZbH?ct-Nd&+{Htp9U zwPp$V-1S_x5D|vEBw3_tUe&cQANyhaw>UHw{lSOS@>{<074IFVflaKH!b-5NNS7_mrPx8OCL=A?F$w+lsQyb1Aq?UIVD@^6F zk)GE$(|L#>9mD=yN2fOJL;|9}{B}REPPML~Tse{*klVymIxuQZwb@`C-bS*IIhbK> zFv&7IcC`J5JZfHu=(^uPM*KNHS(6$Nj-@|lNR>!*{rv|{^tbkxVX(t9Of(bL5IkX& zoqP``(NDQ*TXuWaiCT1f!$Kj^lg6RB!v#=rXbVZ>ULTbj{sY1(NT_?san$V!v(T|s``Rh(PuQTDxdR)mNA)Lun(tkGqZxMVWZ!5%pYzxV02DX(m z^nUT*w<@s=$p^R>`-Xps{tecEpnS1_p}RKi`&qR9;e!A0$X?%&GVnD>Tw4-;r&*NP zkw`zgf}ea%%)`474SD~2$dQ2t)gvH0|Kr>)+Z4LXZkx*{?l2kD%7-}d=Un?zzb}wo1Q7Tpo8vUA{_itG3WAVHr9^mpduJeoa&_?v zkHG&PJmEiph2e63!*jcdtTA6!db&Tydw~Z%(3ve#tkUg=$6&K9m=RVm3%`2YPaj^Y zHPv5lbK8?BRw~UA4#%A;*X9mSnytpd#N2!-y&Cexs5&JkRm4AUe@b#~*;cq6i75S7 z41j|V#QTtJ*%nD69yf3}ozHL5N1<^C*|$$Uwq+5R+y z=jE~=Z`kAQI^FuVc$){($d{_~KD0N=flW**Zl!ojK7ZE;7;JOlD1m7CH!#Nc`bCE5 zXDhB$s9c_wVmGI{y9cxBc)d4z^?JW99!)BlBOXtE=G@rWxTnF#=gnX`H?sC=` zm5^ZMYm$EqkHbdXABGccGsY@hXR&JXbUas@oB$mzJWUhnztw90Ieu9IbIdV1BaNH=>9d^0tDEar4F&Sa(OtNt8i z7EKLE;`5m8B6ck4Jwm&}R*`O>na}SwdxY-Q)y3y@*0{ej5J_~F=x{V6Din@O1%ZGY z^F_ypUbFsdwN~}_PLRiLyf^$08Dkr5_K$R~Pl;137prnd z8jYv(23zmfD6)k@tzo(`FfbxXAitbzemgf0i(2;h893r>^Akg4GKvBwZzR&LUu}5c znH((Ms~hKEHs;w8ukCK^yTR-8X>d1Qn;Osc!ixtBrj$yeP}coJXX0vVNiBCvrctr6 zB$JbqTgOccD)Hdw8=XG6mg{X-FW2KRBh>_E{KGJv_aAy@KINJ4r#Vy2xb>@WyI*y1 zNu%q!peS!4WOq2DbSNu=Fp(v4m3h1EG&r=~UY#N^u18g;|2nxNUejyy?9Eywel!TD zYZ0MT%Q931vUmI2#wC`S1{+IZmCqA5Qk2~Y5B5-l$7aDNlTKscGyPCAco)@3G67^Z zoc4Ss9AtB=^DW&xZ-$^u$;*o?(M?whhQS=UX0ttGXT?a^X|_(czmTK9Oc|zbjgO}q zO{>35Y2IAja8Zk@DdFB_2w)e;|NZj#N@9?KE0INTCrt;WkcEjvF==0Kh^%@}tbUVI z+@zt+{&Y#vV7+a+&CTVRy8d7Lehf(_Q;rP2T|I_?-yc#h56}I9*V@{en1n=-$zsK5 z42Ro&nx(<}CLS#8sozR4T75gjta$6CZ`-x>_7 zHYkP{>pM=D?<6uS58i}?(Q!~saynRcl>vYk-DsKrHvpdV4Y8D!07s7Y>_*@!S-O z8`>G=7;A=%AY?Nl$Dq7vN0K@HY%~^{9V}H_ook!NVW=BAAUjdWq!{>Uz>bdKWBlAr zB@yEq1dQ!008ALnXy)b45<&}_kbHsP&sM&Wwhb5d1_cy09?}T8KD1SE+ zL^LqOQPtr^!*uf(kMg!BlgAI9forzM$1gVCr)xFJU-E(nttB$5ee_$&NGtEcUL z?Sy#O)fQv~pKHG!Dbu;XI6iI#144jS8h+}c=%^hZF+T-(TpX9TTlj8@Mw2E!iBy9T zJfyjtMb4F~o8Fo{b5YzIttIZ{$eEkg)@s1O@e-X#s%yY>>%{+yJvlJ^?`dpyLq=vu zurT0Vv{{_%&c~&Fuue}mhkeZGOIv%R$;oU|^NA>*T{~7Vmvl~t)$NQbH^W~}>(?=D zOcErBT2a8RzoSBTb!H4P{cdyk5|H@#v%`RP4Iy0^s*BorEi6)cNY?~ZUr@t7ihn73 z3zCN-7*$wQRJ5<0d5hF28L`#EoTlMlF}Gc82kU-{ykVZ!(V~W7-bmo@X(|xZjQ$7I zA78ERul&wa20Zcy{y|u>)&$EtR#I*u}h2EkMc@0ML`5a7cmpy9TJ1*`Tt+OL5H;mKs zXVqZ6;YtZFgv;BnZWLz0pZ`J1r(*`kM&E-sY`)Xo&N0~4`Fkv^#cY4&*jlcBL>k~($nCCN2=UskiBJ42LXY@1 zktc7Po{M`Be=C>pJfJ-64TM3HRbD<@;vA!%CVlmAcgPjlH{FdS>l0}{fn_zA4e^3h zuhDvAbBaXGg{vOp#GlU#EOB*6Iw?&joQFFwm<@`v0QDCG3^O?oEeuv^PU+ncNe>*D zvJbb#%Ihy`^;7eo;e&5RG?QQ>LV*e4A3N4PgSbqTG(Kwr*KCcIk&(G`kf7AN9hCLM z{S79)E=VuSj!o%RjH5TXL^o<1>lq|SB0=@psw3# z&hX$%l2E`K=N6=B2L&)#mv;n=4l_Yh8@rLxvagN4@g1AZ>bB;2gmS$*xeGM>P{_!* zp30G;*77#gKUl@Ls!O5bR(pT^HoAp5s54618ghb7) z%5F?2yUmD<=2l*ZLp@#Hf2gB~KDAw2((W6asq^Lyj{Z!aOPfw7V1DX@6KU^(2V z<+BihKCMStH}v2wAp0{*Mg|7g4R+8<59NqP@5BmIS0Ba9^?=hx1nKZ`UdoH&vIuF_ z97!hKcoHWiZEg@y&e4Iud{m$okY(P#Lw3C;-!oi$YF|_#n_rk0f{xPWC^nVt<{G#h zjj`&mYs07fEdc8f1mdkosc9V|u z0;vvqrZ=vv!7f(DBxe4bic&s-u1Hg#7&)M8wvGIR;(g1E&h-&U}-{^s#HCigaB zHg%npuVW?Z#vdTGDaJ!2#2ji$IeL^%^d;S<&Gq$FNtbYyw9P58rRz|#Y2XD^+y7XB z)#7hD1IF?aPqvTk!V8wfMjs_8$tM8gcA6wNPMy#|hE27)u|Fk#y0A!OG7r6JEcdf50#q4orSC3)dPHU#2h!;oJWg!gvf2}44yjmnL)*6%UaM|_|Fw?;Q0u#yj0b(CYW75rm=}j|~5mS|UmE6Zf z*KZ?@%eemX_kbMI5=5Ed+`@O&Zrw6kQrKSX^+2VuLKdR2VGum)+mCPnfzfD_s}Ab__oaBm8~s(!rKgsjVNF zYa~6=Z&Rez_=>+2GWNNHfK=Lm3aL<=cbSZv3R=R=jQxb}s_jl~jpB)g!k4ZP3b8=c zT`Y)+d82X`Q$OyH_$_7b_<$rGLuw8&5CUoZ{g~k%46mvaa4(dtQU$T!qm3~za!7WY zO<80}ClCx_heTRu=q?iwoGX+KpugjjNJbnajhh^mmtj4zj3Njt32!+f?b5n;OBeY! zfoB(-nkU1#wC5-F{omMLT&;6Of z25FXwuYrTUCN3AB&rrV zX&*SkgOk@;C=C6FWY%P?;cSBV#XU8P+MyREaLc=g(#n`Z;NY@RmHPo~y}{8Wf67_F z;q|gr6{nYcNT%VGz}s{s(kYr~+Cq@XTxz4YEj;5 zVuYbo_8Ho+#bTWob#rx$WEu2V81W%0xTRG~C4 zzU9(~;&cQ&8yg6ZSJm}gapMlBv3TPRpHRQxkaRnz0HOo5$!KN0jyQw$mbUN3*M7AR zx~)dHj8}Zr=xyIoF)p_pMZ@p4N|(lV%%hakQz^0F)r0A7mj^2#@M649haEI(9Q-5_ zXe8_KDB_+5Q}qT8Xs5R~U=ac%7hroa1~4bX0J!#Fps_lRHVTZdiO=;i zS7vH`TIip?*^D-=_UqqoW`EWihyKfih%@pg>+|4gXwYejuRKuhAXj27d+kVkHz>5- zrQ^;hKYRb&)4Mcwa*_03xtkM~vwi9ZYW^DK4e|~M{0azu@~B*AwAr0~cT;6TwTqez z=fDS5GaL>v>>qL-|A}T6Rq2uTtvTv>AOS*1+eGI-3KP+kZ&4%82VxOVjG;0g-GcDySUV3n?-i>as>?WDxMO zeO-Gc^R5+ETKaeoL#ID!??qt8P*W1l_3c${xd_cX>dTu$9I?GvR7g1;*r%&O-t{TDVOdF&B(*zn)_A_YsL>#Jt&^f za43f9u5w(Q3+I}?Khv>N243}83PYNaHakeh+}^)wDml(frxhY8A-|A%(GXlHo}tlC z-R!`)hM{AwLk-U*vB{sIQ-*GDkJ#s5m%NB7B{n%n^XO`zfFlflq05^UP(Jyq3Xzcn zwUcJ68rdt{E9#6g{~VxJkXhWjs6E_cj;CZ(9yyPV1`~&)26!k7yIMC-{LEDSz1Su- zOR{`ePaIMTi%BibafkYlvUzvac$i~}BJM=@d8fGCUi3*5VIV=Lb;qhNpYHCKF;6Rm zgwN15mP$Fqo#za5Lew6ROlnF=RqA0LjJjhWZf{?Ua-WtCD7V zeM(-=d(v3|pGUcv+kbdHU8g?pgzU1r&Z+9F2OIxP)L=iM!(gi@CTZLg6FW8T6@^PVfg*iijShXAQxqt=Ls0R8 z$W-yS-(rc?YozLqEaO+f5q*!vo%4|yOFtJbmG<8jpE@=^<3;bBO|7+B^8TP2i#Uz@ zflkK^?fc!qWAB%{-f#`Ks=@Sgi62&SVO?6Shdg54^ac}@FAK28tIz4uhW%jZO10d2 zs{LsYv5PYQAlsEsh^t{)hb9716zZ_ew7>~`T`alQO z_)Sfa<$>?Ol9d*wLcwY{X1^T5?(tFG(u zq*cM)VZOq0LU=*SXPFqW3k;uUivY#Jo-W8(T2LV`?k|kD?)qv zJ^dF<;5oO6@z6r{eRO$?1o77#RR^2dC?D!}eeUG%jDN;YUTVZLR!=#HoLozvO~KeP zD{PFC->&Ml5N5Uxy7EmM0*nXQ(n^yRpWF+y0hKXbTH@JHl~*tTYO7wsIEBOnN-E0! zpR2XI7&7mc@ak#-?(4bXuD!2LnVywx;E|}0 zizC9W#lG=ZPUV$>G60}H4_GR9lBO_H`cm%%NV_HTpk(09n0cZ+0EMZBpNj!w@2x;O z@1;Mo*Uz(24v%7`0Ishy{U%;>D!x*GX3rJj+bzxonEWy`dwj==D9gS^z({rnl7>1 zfk*?|vn=K4Z34xP2iaJ}yTduG{KugzSq@&FlC1>Yohom2*k{q}+vU+leUx{~T%Go# zVIx3e*jLDBuQWvwd{oE+ZPD^5qfjYbwfggnY|Dz)vOG>te^qd8%Pty;WCZN-X#RjK zlKe!9A33d5+RG^QO@MSUuGn|F8&woDQ#uM_zZ&2uWv{7{bu_o`~Du>}nxiN8%QBOehZRk9T1g{wCO@u08<3#w*{la5#Rt z%nj|gjo&AY)ga?qqHZP3D9t$|+QEw!ioqy1?l^L2xm$h(&>82vk4`Zxc_@{Lf_!ZE zSJA1clxqm0&r=bD)g$Qdj;H!R?&0bXT8uS_s$t-$pnY{_guQrRb<~FYRwz|lO1vnw zWAWeBs9XhDfIYq3M#3h3?chby~h&t*b9~ z#jCGBY&PgM{0gZ&@{3Ua(a-r`~xI27YH(g13AD1M#3 zi^et7gX@u=HR+3fSoxBi5r}DGDl`{Ue8SIViya@Fvo-^>E&)Z}u&W)zcj}kVLOz!udUR z+(RS0E3^|-0>Mn;J(EaksU9zHut&E}UG%!>lo5-Dggn!n`)`HN$JkYkd^d4DlE0}o zk`aHcRB^tq88OK6VDPF%)W=*h)D~1`J@uxsM(eix#arzfxy|}tuLMa2!b%pN09**P z&%oxkcKjS9pW0*%+vtt7GD8hptXi{Lj%mli*q_-?F0|xuYBv0em`A0*D-)FuRwE4_c_KAKmFrW)#YT79_m5z`*9_u{QMySgWWBu%~|H#m*>^A@}Q59 zQg6_wdw24fJ=sL1)oi&VHnK%Fd%9|~K3nf}+hNE@3E!V76m!TQl=XwdS$Lw+>89Jt zs_&{Ll&g)>$hx``Nu)EqZz*tP8S0Rjoqv^b@j<*G`4q^}+_av>f^(9_nk-Hk9oogQ zU^DM4Ymi9&a<1fqSyprv#b)fZYG3cAcpK`@fu6~{pdop>gFvD&#{&?3rNIWgBUp3P z^P$zQ-Why#UVr){t}v8UHmWH5>A1g6QGx2eq7Z>68QoHRxDCy6K>R8DwuGBN_L%FD zGAZ4yyw6jT?gVtPD55goYdhJ8U-(0(h>c)zdgZa}r7ntlXX?0e(XeiKa}ix6M=%@> zig?kO#%A@Cd#`ZWCs8wNoGNPljj}UGpTIZOddJ6(*?=J%5kDy|*llGRhs}iaX+aoF zk~3@DroCF#niaD$DLEy_LUyIO_aY|5zZo>J51_qx-tVPFJv`;_zG}nKOcu*RfEy=$ zw}$IS+LXrV8v)sHl*l?>)1P`&KWH+abr=;0& zH)p>V$oG}<#=g!1TRY{oRy5KV7B2}8eI3ysiF~GPgy$1tf)u~{$bHJv^YgE=*0K-X zM*xldA~SaYZz}SzFM%pVut{8&gJ{|;m{S-vS|KY54R2f2u?M9MR`OD-ZN9rZS+=(^ zd%c{~y>ng=ilBeBb^FQ8HYb)#9UiuP<`G=RN8zXVcF4V7qoJ|}~ zzZ@MA;|_fT>t?{}O}@kIqN(ZNKD6PZHrWdDH?l-oR<<|Ml_-RP_K4{n#vDi*5rh5nzIyRjqSCRQAm`HbI@ETQ4HTP;0X&El}xXWBDD z3F(rr?z}|Vu(r#Qxr-fpEp{qe+~>zvfLr{On`^Om_Zzhl_xS0?$AThDs$ z!Fd)v82`F<%HF(pfp*VW#Hi?j%t`y4Y&p6&Al`J8Y4W^gW?UqDBaFFRB&sBDj(ttZ zJy*^$zY)X!Nwj;x=mQ%_(#yEM?!TPrH$L7b(&^7KTVy_DB77OTjW!V1nN+R1pTKxL zHZ0pfNqhbI=T{zGsh&xK_oi#plw91iuNK)jLZAF+8!>9xINcy7GZNE$Ip0R;BoAkwHFb<$fhgJ-Se_f|5amr`E2M0?~r{ev62}T7ug}{E0i| zRqcl1_O#dsvNik%b29JQ#=F^)aaefE4&SwAD&7%Z3HOwdI^&e{SRpq~y{6|~3B>7b zWjiOO8(@&xV20&Ak}+QPeC*2(nb6o~d|WAGd~V%O!ML~8{^3>GLeAxFuK&WbF9op5 z_r-N^Tiid27|GXnr8E2l_o1Gt?Fn4#Aux%=G8vz>E;U!Hccyw_!3MAD@jt<=0bi!= zk1ma^*Qu0S|50NL!zw5A`iA1^K@*gR-BFh`{YPDwH%I6eL1+Wq#p@1rH?Iswz(;m* z{CO!X+#0-mh*LG`S^1gI;~~E|S|3~|{;?JiZE{OcCoFWs^by;lgkan@y|>QhD0>Go zXGs$!C|&zkT90<%?L^%~TWdkY({Pw9aac-TvivCq32<#k)qX;+zy%i&0n}dI6SYh% z6yg8l>aC*U>bj;;oW>h>mk>NS1b26LLh#@g+=IKjy9Rd+?ykXIf;$a==gIqxamKlY zo3-}ZyJyXsRka{AO#|%9Jd;7*5W77AudyT5cprcPankuV^|S@pKITPZU#jSKQA(`& zu?hVn^*15!vRGoRwVO|10`M590( zapPfmykzF|^`d7YU%fsO+k(;QfjK43Gx@0cMb|gi0b;GRVAoZXZpHqZ{=U*=(~e+C z3RTyHHj<77y$D_11I$0w?X7jzz0ULT_$W|4X`W7(nw_lv#zZZZhL6*4E^K<)* z{}G6NNj)m`HW=ajLk#y<0xZjjZ9#{B*1hRR8Pa#X$Y2B5VXUyk{F?;VCWJ{z+aAx4 zDUr$Xke0N5{QySJ)<|Y{fGoQ@ZuU{#584tSnde{dB&2dw+&HHOkbU7$ka%eYbCtX5 z#Gl9t-Wqy&lRg01qb^q;Hd9=9qV^AT?cz70&_+MZz0nViMLKoMr8Q}xaw;UOfqw#m z&L&v*K~cpLS7mdK4LwD$bWR9vQe**uQeXR&LRV9>BEdo+cb1nEj@_o}d21k2xMKPz zp&MjjqIa^O)d;Kj#%d5zle+zbDUMCI#OaEBl`&^lx{d`x?@*-g{BSgv)E7Pe;{~=& zz31B!dP@qMkYStueu?~RsUhU^+XU}y<27ta0(I{=bc_Kq>YuV2trO3u)*Xp?iR?WN zx?8?S{jXa8E%v`P$;r&-_P#uAtNpa_kbD>cY$(C{cP+uNE`TW~ITsts|GhO-0NmlX z3vph3&#b~mTWZb1-pYEVaGPDmjdoE~8Bu)JTb1M3bed-UIXNoNinAqN17^Uizb{ba zT%2R%dAcbl(6aIQlY6z*+UZKO`dF3;+svT5ykPE|vKF4j#%sg+e0^|h2NS>Ec}I)< zzk03>Gf#BC<231(iPzBA#{K5ANHN6wMl|n+Fuwrn>RT;J?0sPvQIExaw^g*0xOdq@})ngl`W@MFfl;$gH(Wh$NKjRyw<84v}E^N zXLr{>ka3C8UlcDfvX+k7c-nob@@|{b@&;g%O@E zeA*heCcwTs(7$e}u6w%DoiVV@m3tGenc4QA$W9Cg z5~X&2OeSNmYJJhwkLT_w?~cYbwAq6@TKxFq=YzzG{0j{3JfzKix+iy|;k^}_Z1^vU z$j6Ii>*`u9UAd;vs*>KJW9s3#(B(Emo`f5lbgF9pxydd`mVrxZ(L%8>+wr|x7gh2q zItJJ2i^&kiYi6Tgg^(bykO(YH- zdd{?A84$Rg3WwCs`F|Z-5GCXS)rr%jCF*ykiox&4!YP73Rr!fLJr9Y}e2q-;*{rbV zoym1^yE+?<1*;F1zjqaoDZpRDHe1ds<)Yj(5Gb9RLt{p+u;cvkH7OE}+{@?5J-@K= zpVX{b2#yn~F$axe0;0*h9Ai2J!5~E%^B9ycw%-&dT2xeYuK)8q)uvE+_HeO%FS=J# z>f)BZ>SA)SRbrN}_qh({U^!9E5>uBpXPoX`&>O5i8#vCXaZ>SvZg zIcNmFOnN?qLM`oFq@2|0lld6>cU>Ot6T70FSXtlG)*l03#LYaRK8t6=fa zB@okQ@B5p!*)}24)>~qxs*+0B)=G>U@Yk`>6?+&bd=(l>sSC|>;+os{fFxA()*qCO zrC&tlm+9FQCmMe49tmq_r>UpWe3z$2fRO|?HAsA@XER+C(ey|?i{$u%FDYB$v_tK= zrlXx0XN}PAxt|oKYU-l)`FO5A>&&oSzR-c*(sY4dz-Hmxq49Mplq7lck}fUV7&n)m zN7X{8#kUp}{jH_VqoVIn^+vC&*R9yCES^*AW%@!C+)B#dtt{*+Y=ValGji^N zPI-LI$HWQmK(uS)GEE-+otxB26f|9L`(5LmCzafaBI$|iaT#_B^~u%R-{x!E9vFI8 zG5@B6WR3`xBfr1UEY*k~_cb@KP^7cqM0f6I1lPXQ=-;fTh}M_(F~Ky7jJ;egxWj*) zd==Af7SP~4fMfc$vMCJ2s>q+EENYdG3iUvn(!q-JXX;&^3u&WZQaBV`t5a;Mwxg&Q zP>3Hh%-mKB9tsd6;|{2$8ja#|joX>H*-yj$FiMETT4Wk?`n9KZ#?gKk+*x1cn!c|q z*q+VEHTm)_l;5;5Oe!SnG8!6TX^G(G&;g~})NIdZr&FalwJ_a|-{r}e)#|aBf7;y-hiPz5_Zmolua4D){2JQ~I zFJi!0}TY!GBWL-iJHaJYutb? z?kYFdh2p<WE<_j^KgRIE5w!L*x z7?as&N=*da&$fLWulGKNMMrS-F)FyU+eA9km(8`lGhV-pDQdqvWhb`;J;w81Ij;4a z$XJCscip2tfg1nl+k*84m`ZE}3yvNcu~u5>cqe}umW4|p;qbh291GVg`yfuOp#qVz znkIBPBs3jQVO<5`?lYGT8_GVa(b+SX1Hypr1oy_Kn53l{+#)Tsc|1&ZWVE=GEn{9Z zf3$^36I%ZR4`lysC%``{!3QYRBMqpieRcFW;NkcLZZ_+UkRf&Id5wB+pCT>)7$tqT z^BcKaoY2;w3BevL#9!SaPQHw|uBccW4zYHVo;3axmt$`Cs(1aiNIaImPpHpgjwYByj33E5#+z#XNbG z=Mzd+`HfaA07jZpk9UzF}pI&`er-tl6YI^^pW;gX;lUO;h8K0|3z_?6GbHbXWn`$1U zuNN4JqDA&Yh`;!Tu^9iWZI8f-cdTU^m0f6pYH}UAa`r_xpFZ5}RLbWXc@|pjzSUj+)%4)IPDac68Kero6>(Fqi%VC-kc&sq|#uB~=jAD(rIFLBDOmq`|)8l0LOO z!Jw2ee_P-x53J|sqX7Hb4?XwNWSM}iyx_k`RQ%@pnLsdpQBgm^IGrav^eRl@Zj8|@@VU%CG$UO;b#Fdg^> zDC{vsgnb*Ru?6)I8(se;L7E2Jyy0Q{XaY3TJvL3H53;rD`F)NcX3YPrgoVKNrKB^y ze|h#g7Bu!qJ1k2#x_ILp(KC$WTvn?SVDAd!pV$HSgXwei-C;z+Ev!1;ra{5*C2pU+ zr)E4}Q3b%vRIm9siBu0TwQd*lVf4Jp#@pJ}m=)UQkg`6drMhtz>pPo%IikVc?o8>p zx@86H(yDx)t?b=|*hJ|~gd(2P5qyn3EFL~b6}ym>=^#*M=?K)5I`8&5ad7D8KRQ$` z%8%cuSV#P6?aO<7Bt|WRXpjZWG{KrpgC{z^j>Ka|^!F!vOf2g=&1&`_6Q=HAKCZho zB>KwKR5sST=u^`JW}g!Um-)BjCDXmnPNpNxo?j1s*wk4%RWfXz%qpEgIg6X9N3eOO zi#P75+#H3k0zz)ulz^ccq&IzEU~x#4jP}ian=2y28ZARflfb&cmf5546;ZaDKV}mYNisMd>-nqHid}+UJBBuS=A2@sm0t%#_E|}L?0arv~8wOY~B_c|dnd^9ExZV=ktz9|ogq;SeA9g?#H)V$S=PY!>s_y&r z@5$oyC(?sBDS|cKt43}gNC+>HepqtK6sAg6Xl$m)rohA1M+_ihb*z!6rgk2AMuYI0 z17*SXNc*Qc7YvpeUydavD0z3b_u#VeX)eL|m`Xnk+V2(}Jg>a&$2gnmv!i4hH-soJ zh8Y#HfF_R8Uy)-=dd}l?9lO2dPl<8q9oX%bhw#6K7ofH8~WSdzBi+X z5>jU}$|ppaKAL`%Fd>ri!m!ySc||wreiG6A79+(HV*n27koRFjl?<2Ks3x#9`m{suL!X z2WfI{;c&}qH;%wy_KO4@o>o5#@juY?VF>j`bFHK zOS;eK4fjK!q#wcd)=$$hDkFf!Go+o4VmlBS<+7zWWvLVxDa={lP}ivbiXE`vylTrD zIRK@vJ^&5ZwXm(@D3kBj+{oTnp4VKn9CF&h{XYR2E~Mwi$|#~s%j8XK89tU#-#0_C zkX%>w)4G6K0ohJ|L-u?Q$6XEANQDQ&-STkv+xaY~%=D@&6X0<7n@`1Yvtig&lVD)2 zbs#z5bJEG)`Mf|((g*NI!-q*i?r%lumy4$xZA#FsSRZ~mQ^)VPm2dc7xyYXWY;=of zpmipK6gjV=E$LJ1cs+p_@#!;)is}brWs%0d`z>vSrAxejkNOd)#Q1FujYit3YvlP~SqxUn zF1g+7n?Ah8xS;UFA!d}m_;LFs%io75$ z6s)L!RvGO&#QZeMzH28$e`hIfF9Ars5AgQJAt!Y(4cAF6o3^uqMq0Xz5r5ke3eXct z==#jorUA2k$k|Eh^uUNq%c(?HuavvG1?I^$N-w8_VLo%&!pHnw?SH2aGOA|7%FIXX zlY~Qq=EGud6p|+guT||3nrC@ebCc3j2ksW3yLhD_B>hj$n{yG_vnx0DM006C7o5-x z(bP6OZgkRr1PEBDHv68Zrs-^C5$Gc9{q>>_Jv2*li?>`y5@oj3Wx-i@>h$X5RioZ$ zA@+IhzK|nl?4$#9|Xbe-UmnkI3oaCfJ5HebuZ#r1oa#|i_MkUwx(#2@bVc7FMD zl=7xi=H!*u-Cyq#NN$A!0T1fFG=Oe-d4_Bl6Fi|eG-JKD4){r|5K!C2cQh811GO(~cRAPZeGIb>){QFh}A^ESzYXPmY^%flZi2-Ht z)fPDzvFd47#QdNjF4EsNKh)CZ6?k#8GA7kyPPt&Vg;Ypw1z)qgI{L8RO*T%v5<)cB z%8%_9lHvOf2L`MoXZuNQSPo2l(Mpe>76#sE0ZRkzlRQfR|L}mX^7`i(LD1R*D1$wB z{&z{(Nnn2ITwk*c9oaqmx_`){cqVEhX#0lfWc4}?sh~be zth!X{_~APlEtqwLhovX1ih$V?ggPgFs~Y)z#wLaHDh^ynpbz3WCS3LKZvVe5fN$N* zRzCfc{Fm2PMaefm^ggCwvf?2X1h{nfk(*kn#My)53fq-7YA zL6S&T3@pWM#@p_dGi^C1KFyn667m_Ri_2)oX8Rr21uz+%q@ty&>fjS7Tbmex3xpuC z1xjrRUYA7KrTa2}^I39+y}@r6tiG=ayRcs6(s~(sFtMcEL&F7$ocY(E9f~^W!ir-P zWeoy+jbTuP@wM$!J9f>n;Ov!hVa?i zoYJF3tMu4ljNO7Xe?Oe?Iw-JnO|2$9@pVxDp;To3^8{c#CaqMX(l6YhOH{p=pNoOE0c+ zD!uM4Xs=?Ou^2($s2Wvxxzi%6dF=G^OlLd8Fy~`Em(OW6k}>HYdFwjIDCt{KXy$nK zF}-#J{JdO2OXVkCy8f^8?M%YNH@~3xWi2p5Q74QY)Tn^ZFhKpQe*TCF zA@bLar6-)9NUR%X5D7GowG$SyS@2!x7_R?Ank%Ej9Ke{0|8LFJF zN$r;YvCDgXQ$EcVF@l%}g!IU70j+X+&d0K`Uiqu9s77RIkKCttBg`* zw~8z!iy`%|7(v-B7&X~) znt?7IJa=hLCy({lePM0Jrib$hO*iQ#`nrnuL^BPoghdrJ2O8+K#FO?o~8 zBAhSMditep&|w*9rF6+!NLv0TNXWQe5{;w87^J@TNqB3iG6IJ^Y_T0@brTExX}#Gy zbI3u^G{1r2{v|RSh3^{6QTj#M*Geb zD>(?gK4!t%PK5P!T$xGW(5vA0kDllNsa$gdkB;?j((G=-MFF1@9>JY>)ao{*ZX}N$ z;2iq@bOUtqxw(&pt(*d*@8Xx~-|dc;@=Ne>E33*?fq$0P7pf7f{QYztryKoV6U=^( zY554xMLLKCR!yllz6EG=H1sWKLn(hB2Xkdvvvl)b$rTCGg^l9E+_QgZ4>4IFv8@&k zXLA;0IU+QClfrq$cV1B79V4Y1o~t#SQKmj)Dn-?9a2@ItNyarn_=o|1`NJm(!lthZ zMMO)!)6~vJhj51r%=53QA;F{;7=HsU8*^SvL=qNnX+aJ>ZKC{P>h#1(rLm^)C_Y?n zRs4_o-x7q}I}`V3IaKry2I`evz9JrYLI;?rHv=ng=8g2yE0%NI3nuH%T#BvUG01=W zWFem7Qy*vBD?H5?YiOzJfWVrOBn3;;oDa>`Kfrh`eS6%X&(H7qx!iuodiZ2%rAgRw zL(}dKom&`lbE?ZzsxgDp{gg{32A;qIrdZ=SmI$m2@0Kfx)x3K5zN>?|zI45QpAC8J z@ZP@xd8CV5D>3`3T5X%=>bFxI`#w3i^~B=h)`ZQkO}hUz00?D~Ltq+WR|Xhk*uaOu zYIM9GQB;HU)SDOhbeynX=13~9bFYGdRU$PYUgDCjkZnDdhQ4wK&bab(X#3?s&upfz z{Yz=oUI6gaHOF%x4lc~^Emg$Azl?3XATEDUb0&1=q-Lk8+I%Z$l&0%p(MO-JdL~Kz z@-_sLM2;oy$1#?QlA78jm;Zui)S{lUcji(zf~OJ+->*)P{K`cB(%Ga#&1*~tRc%M? zL`IIQWN~KO>U-(o+jyAEKjx-fL5T6k*;`AOPH-4=w&r(R_j=&9i+AoJSuKUVh6^r7 z%)Ml9t~WE|44k!?HGlOcLEP%xo(^tDg)DvL9|iecx=BbrpRzG3-QD;jttaAVFcSLD z7G)jl_dNReJTpi~u0d}sqZSu7*UyIg$zFtGQxKb-<1(~+LX;2! z1si2H4QYhq7wCGy|D%2R5R4%88H3bXknZJx3)*v-8s>%s`~(+rZ(3U0Aeq3gWx#VD z63>jb9zWjRX++Qa^M)AnKfVl0^jO6M&J~A z%+yXQPHaLipMoxEf|%hHE+xvYhyHGbQK%;<3_l#M$gyp)Yx+tmW!tP5+86D8T|iY} zyMk${l&Y`D!^WzH4xcJ;kqMo{eBY$rY18Z3bRy}JYVfBo)s z3x(Cpp0${(jXf32BJwXjon6cCa3j}yMXNh#thh-$MM&@qdDt4{y7E8AoWHJtftRW9 zcXN*vKb$%ndHZ@C@Fc%l{PB$U7O<>}z}IP9Hv=PrRN6&>_cLAP<=MeGO{_g%Y&Rs0 zW@f>yMt!%OU|C4#M;Ly9a&HPePgYhq+?J4_7WMnuz(YpqWat?m*t6pX!2tR!M95R3 z*9w;l(O$veXX9%bC@`kF-7Y-Hbb1keoYfx<~%) zf2?AJe16hH6wI;p36!r+!>zrb4K`W1E+CF-l3MW;P0880%+OR`*Y;FG(Vwg~_Ni(- zHN9Rhx{?h!bQf1|d}YexhL?p0^R`myie_@&{LfFV}s2nz!z<8Sn`j z97VPnTST7-%HrAgAzxLr%X9pg^o~R%$oQ8vBeNV)tNB}GaTudgHWk)r%s~e{VB)cD zWG}zeGN#)?u+oMa^xZhk|_B zosQk+-vM?B*x|ZSRq6-3Il7}r{c)SYudz0%MIxB?B7owZ=q}s;M{>#mI6MF{P;I;Y&5*=P!-ioR;3u0gGnESW3=`cd8ckVheVJHIpn0pZgn@o5& zDRvpiwLmIa*YKj3!r3}lRBACnWVm@API(ByW^dCYl<^v2!hJyq0mp0BoP?0L^3K*P zYCsq5y>FIl3^I%SUO6+fB1+kyiDnvH_8^@^CdSpYH)9eM!7pOJ7#OhJEqiga>r8~D zKNpvO(v*m0dI@w~-x|~ZQ1OUQy6>@knC&bc{zC%KWd3MoIB)!)2vErh#@U}Z`76uA z0GkNo$b9~=CHskWIb=@eNzm!1sBS1!DPjP1U?*8@;Q?wjEnx_#ZEY152v{PpB!Hwx z)1BIVW~{6FPWJIYJJkHU>Bi7)HjnnZ`=I~Nn6R~NaWJ`d*=9&&!-1(JsQ4kOwv+yq zWz+4A@sFrNS8hS^gp>8#5$>4;b|T7Ml=@uJsB=IH%HsjWcbsrp0M4Jm>(ZG|uY|g@D&N6wr}cBfZ-#rQ4K|B4-cVp%|Hi)s5b+Zd zhh$(8t2{ASx>iZ^-IA5QyF~MGxFVp05H6j?;wNXR3MGXiDK*!A7ZzRT%o7iG&pH>% zWem2f#wmE>=hj5c9>E+6My20jM}*mq;naHTBaAOxXdU{hJLsrgi_>?D?|+E|wxm{A zwe_qcqkR*Lkx=NT{(O&!`_<~1saEiBH%CkDCpD7Jsc52`n9cY*U;#aR(4CealSfyI zn)~jIhh~b@AQ!(WPZe-Rd_z*)mvLpV`AEqv|v0h^=K{crw$yHt4 zW;u=S+wOg31e^+D{dxh2wYZ*y3|Zy)xe0Uo#(2HHA-lf~aP5H_3HL$AH&hW9YK^`_ z+yPe9ej!-l3C9u5(Xlc^24|2=R-%{|q_X-egMoG;&y)ef8^h;F#=r;1ohGzSxSi6_ zsOr`Jq2Z-XXc#k#_)R>gyxTw8WMIm@=z{v`dOqry{3k#JJSX|XKJ`}_B6JC`m^4Z~ z$uDi1%h<;s&yNd$#}SVsSrV4dF`g(Au6NE8no_@;wBUp-K5R6CA2Q#!*w&t$j5%%8}OvwbsgQK1 zHMy8^gD=r@&r?RBp=grAk4N{&Jm2JNCW3KgoKD-huP9otM8rt>$+SZ#zHDT|AV~FE zri2BiwL!O021>`0;5E8{b9c%!hl#1usVA8*J!_Q1w`q7c8NL$JN>bM{=1C7l+p zMjf%*%{Tt94{oz2jd$`lN2?T5f9Zk~UdX7OoaA{0BV#pX<$4Ry@9I*YmYjE!<{1gR zAK18h>10@e3BUI%>73M}YST*v=W9|d6_Rk?8me@>y#<4SmD-fCaB+*iZ%W89ywL%d z+oH8mH6OgI+w{^UwKJ5%X7kv@zQkp%42^RMN|CK2a92ITvVMS(^J_&xZ0z-qMN-fQ z$ds$~Z6#0b5LGsh&$ec;Q)0@-I5LPZ ztm2b~sd+D|h~fI-7^(HU`ij9ukFabq4T3QJCD8?B>vS9|>B4#A9*1BP7o+3%UB0KK zb(_VXw9==scmnmiQqn!}kf6AEY(Y}63M`;zB#F7(JbE|e(=5ni6cR84{e4F0t6d^T zYqdDyixN3Qn^&0mDjFe*qp3#G)#VSMUKZLFEA)-YP{t&R{lMBtnSI4*eQaM z;HU1RA0=bC6q~R47<77alWda43ijZ!;XN*j1V%#LoCqPlCj2E)ZpUZ{;kYVygNDOu80h!yASx{zBPfL+or9F`&gEH+*;bg z+dh_=k(^U_IDe_6Pr~@p{-|qKHR0TSD&M*5;md5&$wIM33yAnNwiV|#Rt0L4DnTtf z*fd>Qr;0(T>Jpr1`SL4If)FW13Ub0j(krt&><)e9HUpLyNN>UxC}F&(9t zrWk|QlqY(Sc=R7FCQ|0#^!jjU@e!5N+VtI8AJ(qJ``wC(co4n=r%8DiO5HP=>XAs% z#fZFyF#8RA?LokS6RD}p)a>fL@#>vq~;Gg3V%xp2Q!me0^eC= zx}KPpEaH!GiTr)3s8t^T?$T@L$E>di}8hJIlS?Emw3@QIw(6q<)8KSrDn_=wthQDrZdgWs$+9k3X!k zflsGRdgfvQOCb*QT70su{U?Fk77}8dc*Q?e!4CS|bXCH83IqNF z2sMjurwj3?-qvBn4!GS;{_6QKnBF!IM~IC6vP}yhoml;AvdenbXa1cp&b#Rxf_ySP z&FiRj&*rW2>+REgQJ5vF@j{_{#)~M}WcUsgBNAicg&9nLipXKL%H=X5chs9G;Mjs4 zFW6#+>1Iq7>=!%ly3y~ucyA!ZN~W1{omW8f&KwFf1b==}$(D90|0BEKFsIFeD1^6rrh0_PX_lve@IilN;kN+9Vz z;@6WhbO~$Ms!K4d3A5;{1H`(^URsn@o>`gwsSTaC(iR`KZZ`(KT=r|!Z~H+ig&4o? z(@pL^v!3FrbC*9XU_I89i(LEcPW2+cgRFazA=lLO7a>&LstJ7OBkeiYf{vi`GUV zSi;rX<8T{Flj>GCErz>il}njl%e_op)`?#t0N^N6(LxsJ5ia+uak}duI_9~YmfVo( z`tClv?L&*%)3?9&`4I7SOVYK2JTYYSXPloQ%)GpKmCI{eXcC<~gmNyw6SpXR$7zh) z*dw(e(re>S7Jpw9lY;$#29wzerw-&ljs`_9d%@eHxshaEe21}n7ZE9{8kLp(u!342 z6Zk$js-fy~&Jy#XIpx6gg+j0g(Sos=R#|Y5U%n5OSqQ1Iq!73T=2lZZuFa6*K*saw zdu%uBskHJNp|MKrN$<}v{Tn~EA_%)pAU-uoC;T^KskPj)lD?->6*GMabb$2Hg6FEN z+C=MBqDM~5e{tZ8=NjD}(bAc?Z zX2KR1#hVRDk0SDfSzZx_w2RXXnY{-_H+v~$ER@C-nyN59qGe)n!5Kf@$1bb`n`-D znpyJ3s29V-d5zgz8i@e`DKaS-uQYL0LL#c8#$~Yj;jqo~0Soo_hm)Qk>7xC&NKf?I zQe=7ni%arIuDRK>TewBUqk2o6WulkLrX48#!=U@^+oC(qsg1cQQ692SIwc&P{x-Tu54pNIY zmke0a9Kel2I}hhcspdK4@D7yX_;coA%AjavCwA6K14#T7wTK!KLtIZGuDd_X>At&f zPa#O1*<*1daS z4YFz|_xA-isz2%NdxO!yA#JH#55RvLXz{=c(V=HiBuw^DFGZ=)7!1T^E>seElY#TE zNYd?5pcfX>C$ulKvaUs8j7iTcH>8QK{~J;6mnI-H$9?hEtEy?9%6p5(i5Kt;NnZ=L zptRtQ)15F-+BKJ{q*M*jeZdMlBUrU9X@!-;%P5HyNo#O`_$O9K9Z;&> zU^RJWqfGd%r1#vOD%YJ}UG{(n9%fu*CbTH~5Im;HlYp2pP|nR0_E)MLIy;C4hTgT5 z4WnTIgrT(TH|@!W3groAt5gBewYF%|xU0at?4m!wOQo4MnvL8Wm+Opqk+>bGk&%gn z1PzjHGsgI;Z!*fWJvCsAQMXwKJa#^JiC^T^;KgN6NzxA1sAcs0Nurlrnc-#0d~9!< zPxW9yvzBs@j5wx@r{h2FZQk}RS|;Ho#E{O%#pWVljY>&m3p5@~Q^Mf-(tM=-1F5ms z)ZYGs9Kz$MZrDf5o=OyW3j*Te=9WnmLhv@6CbiXe1!bf3Nb3i=}R0k8< zbq`3*1%joXkvRI8TxB+4Im|v>uOFW?z}e)24HSRzKEvWnmXlz-qHFR3a&zk|%~Q^; zhR?Wwfo+Fdq&nKc3>)K(MMYL3ePQKKsr=in+Psct=k!QJO~dP$tk`6bL`$H9xb6~D zDy!rou{9&*MFH>(-dW>HWNV>{bhu9RbVB5Ma*?Ra{E2Fltcvf~9XPc#Y_1zZVzAsL$PclXr795UWf~DNHsN(*l|sf> z_Yk~4dG%LU1L;fhOpqH4S>Lx`y?E|$kO=4lGRzmj>SQdmG6{}xUQLJxqgLrQhXuc# z{ft${omgIAAaDyM*iL`>JYWYix^q_J?OMM0K}vL3r5V7D*!P*MEq_&?{gKK2Y~%CZ zo99K`nkSX+S47jBNfT^LUhuVzIRc7xg<-dEBm0hP!+Eo_7!I1>>h&{n=sgu8^xTIO z_tS63OmFqQ#T86PnEsb{guplLcThz^$5M@O2>4m^jHS@&2yF%HLU*V8iG;;rQ zFu*i{9Si|WbsC4iWbd0A>u4!8smil+M=G=6I}FUb&sSo|L? zmXI`hEf4j2f9tmTtg)fpaB&3LQHE%U2sL(0FbLzJZ^b3U`TtB#?wIcj`e|7?b{rm8 z(7!tc_ki|ZD+L0R%%=0Wz8JqQLxb{>*cFN!>$^W*p>@T+^>Vn^%q6trnGS2=|ZQe`*4+|sDV7M`+> z#>!0mYZXPEQyWP@edv#a0RJy5$aVbKRIZ97Rw67IMkGt9+A2skNZ-O5)i>u*A>->i zVTkFp0&eEZBp|cSn^Ue5Y^~tebVFD%OL70@fH<*LGb2)D8bl* zT*P%uk)w;mtP~x2_C3&qi@+&YR>gDZh7Ye5hDdyDce8ywj^(_6#Z>+b)x4b|T7sB< z_fj`nsURA(_cP>WWF(=4ikAuJs#)&Xbq()WqwaV4A=#7n)R&6MV^$YiPdxu7y`q~& zx8aML1%96;bto$lSpUaotg*{^RNg+j?u2j-Q7&nfy1fArn|*AK3OEbH z?pq|m>_Wh>4gq9A60m-4ccb`ak|#p8gb}ZF%SE(G05d4^^X1xjjYMipZA)?(eR|8> zrV18?Z|DlG@2Wyq5NrL^*EYv;yomI|LvlE(lMZ^;Vun4U8gXFM5Xlq(W2_a=jnH=w zOW^y(M$Lz+gB1&*)JwwO|n*ITiYdGWDzJvj|L{toEi_~6VFgiFy z_g{MBPc7m@0V?wypCA!)KofDmQm&X^qgU1s6? z)J8^+rt-$iyP-V0@#a!(5OEj<8JVpg_%im~29HMUZ#EUKGoJ11U4Jf`tnA)9U0WBd z?pDiD3_fZk4g_z(`PW&p3t=VllaT>qf!Y{yx9?ur2^S+@S)C?F92dQ^m*{z%%(C4N z(jLas+`FdS>H}Dy%#?u&O|z;u$aW*( zvW~GmP`EC2xvOKcyt!Y8!3|`jgJqh z{Gr&8io)C)@P`gkWEfE8zr}COu}X~28sD#>{iSw?nUB55c}^#vU%tG!96Zg#eQveq zX+^NuBD<@)VxKP;NOE`&K zu=c)v(dH!i{Za8-_GYPQIgW1Qss*xeT{a^?tKqjGwb!=<2^YW5Ayg1W!_@kc%e+qu zqZnQ*&dwqFhmkx+nwN(4g1g`>UsgVp)sEBgyf4C2zqJ^RBJ-EP~^!62~ijgieJ$VyUjC9 zgeHHe*sOxgZnM4;j<40z9DUKWKcZ}pe(RMV<4x14pa!Ina(#@5{>J&_8?kp3_Qm~f zpU-{Zbpq*)F-3pl_+!Fx=E@QkMJ`&|Ox9FgoGAJwgb%k5GXqD){morZgymA)l&ROd z0ju>okF;2TKDGZapma+~MHv&c*?D+Ll67A0;tA!x{(J2y3kD-tsNxo*q;|&F6KXAQZtT9q4EGJF!4HOMAH*-Wjcu1g0rlT_UVl0#Bm#41(C3 z4TJ*1HKGSQD5%=h01Z5o;Q>!*#JCt4x7FkY=Qae?ewz45Ax7aArxecOmy!UUK(uen z$rLxu)Q`>5sA|+J_;u~l@;#tf@O3x&P<1bRx51pnZP`xkHv?iewF$^6s$qVJsJxfA zyv=6{3qE}`j_gPXR1}cdKh$K87KUC9n3H5lUqV1HLm7s{b-qo>YdIs$G-U>NtxP^J z?6GO-f}VFU2NkJj63VJ3H6HSv3f^MI%DGS~-Z6Poy54UT{JS&Ps;;y8RrV)qG^bqr zkR=*;4|vr)m8J9Lg~u_&IG9=B<#V6L*Nlu?4~=ax`P)A5m>25tdKBs@fw;`}#AiR~ zaTbaRn%py60**ux1-PDLXSF|L$Nv6MaEw%^duMMKukD1hhKo6#`1qwfFoa$u$Pbm> zW$V~QhqA71bzvZf_-=LFg$k$%wap5h>lAv&(r#4HFu~oANN9fdjge7TZN$G0@uf26 z6yb8q1#m>ws8cy#rql_=^F#RyHjAu$(?YY`+n-lBgze{!g+n_fLcXhx6SiGR3ZT7J z@O!O8^tnpuNR!wR+Y3-1su9MRh|bz5dOX#&F_P!dD~+WvqmZ)2&`kFD^VdQ>9)}iZ z^Tom=1N0^ms@$2JtW|x$ukXgjI8*#4$T?D}9Y(H-fpB@BECM9?66qrZ)@|YC(G+vA z_~3A*$QA&l5<6l=-J}tSeNVcA(v5S6PlXWv`U}c|AgJY@!uRM_*|RhOJ`k(+TY)as zgYNGBEW*&h@W8!xw^4sG7Fir`N$wOS%C`eIGdkLetuxJqmQ?_Vd zlPr!t*N*tu5IdymWCQs?+-f|?oRu> z+Vze}e1eDgPium1ANqNI#(WeJG7S0?l887+M|*mFf?U@n^^-*8vM5G?8GsBkA^e7B z7w40;ddl=l<)28I``}yZ$7V&)c?#g@`{)K;*n?pC%c4eg#+HUt==`iA>RULOX1YSbGc=)W6c(tZ+<)8q$lG( z^Rz4bcB{$CPHM$BSBmN9y2Sik|930a=FVF+Yc4%73XwhkRs3uq;J9nr_VGwyj^l;Rnl6fWE?Wn2nys=78>fdd@6TN(jr=}svL=}zhH?gnX4xgTacu)?CkwXAX-)S+$2pWwr2c;bwS->oZ7jqzSEki8H;)%%vl3!K+bBi^gM7 zBRlsGe>N1)A?&BnpQ`jn&K;2(8y>|V3SbzgjB93X?)w#xc+HujAx1XacrT_yi}}nS z#E+yp z$=;>pimk&H9&I|sNRwLV!M?=#fFpVINlZM-)%6RkDTlVJt<{JLvsisRIHYlkxm0Wn9G=<-x zT2D0qdu9)vmb-qK^XL{h2{xl8VRBCuHT6Fqe?7!!4;%205Vd!%#L$-0fyQc!tF~5GoN5e5S^k)}WJKMnAgWTXwV5ZF& zu6#lMS0?%xa`jnI18VHDdU{5(uNia+#l9^pY+;hSSy#J>GNQc!}b`l_xJ;IDvg)9-~yOT6ryEneynZ`*xD|*&yct_?{Y=*=+rOy5Siq>vH`ap`K z_kgAx+S#Wytj0f?8t>5Wbt6M-*}3AtJ(>1_$L%Q7<`{AN6hR1#@L zzb0BmwbnZ-`KrH(8d$(se+_cCc(BXnikHe~{Mb|@$e^Yy(qn|d=ir@RC|)G~YG(L} z9x0o+3fhkZ+edLfaxe2%3;bsT9iRTTv7{4*x{9+U2BZ}7PVcOA?rb!Yr^d6_Nx%uA zXv74sX@Gv7CiGqCWK>T*PGSPOIqAxoa|bjUPxh<5!xTF^QkQ@LKK|%wyUHZpN9G0* zj9dW;O^weL)ueSA%(ow;#>i7;7^a1lJwc+lVuYZ4~D4r8V3BUXmy_!L|@ z-%oVeC2IQVQylx2z#Uu+HH z$%;>`1q1DIJmEqWcK|B9X;oNzJu=d)@yqmPe7xW-Dl42Kx>2OmftSauRES=Po#<3u zDa@+jB}QR!R&M;nW4)knYUd|s@bm1b@i&H*o=S1|x8VUR!H85E-%veV9&!UWsrV5O$1(|3ps6EOUYB|8eh> zYrJOZZ)i#)=kHV_QOsMy6CMzIFvo;|m|ZPa(j=zWYLMQbDx=Okfk4ob0U+fsJ=Ixg zQbl`I)wRk=?GuRk1Jyp5S@5l?9O}_tKbQ_2>Iw-ox#&)qyRqApOs(xK4%vFT)wfDz z46}q;GQgI7$2*p9=XHbQVPZoj>bpP1?qMz+f8}>pB>_eSO@1b<%NjE?=+aVKmZl=EZ{Y50+~wWbYmGLC69NYM4-YBm<+TRb@`bY0S|?2 zyMxn3%#p~Ka>1jo!JU;hACexDImw8QZoa)HchS_&8o65;8RoPjM8bs_n;HkoNfirF z`&$+@gjj%;?@^DJxutnqKGj}2FGquEwEEaCV0A=p7_rWvT-w#8@*Z6&V|is1JxwJ= z@9bsG(;{dJH`wm8tYm;Ix}w*dEXQr8u&sJq{OdP~9O`e*WO1nr0iUEDAc5V_P`Oul z>bhhL(fd@oys9HblA{%ijyKP*M6A6XmO%|Z9I+BP4LqlQ*6Lq7$#g48vzZosptW|{ zrn`P*uT4s_qu^1)*R;Qwv2FK?WNt+=%LC1o?i6hA4!x~1cstCD%J4BZzSh-r*~Bsz=LO&jS5 z@I0Hhe2T(=YEV!>VyfyJPfIkT5m=w85Zxb`&blbeGb`9iY^=4B(H0rMl=gcGlf|Eu z@#VSC<1M@>MPXW;QM-9x8po%`#HF?RvvKtgn|$K==IGXAer4?~{1UN;ZOie%rz1isXz}1VJ%)$uKC^^XQ!O)V3!FxqYw47S|>Ru{g#v7T|N41OK(88_4B7QP;j2fR@*^Op2EN{QG zt2S05_MZWjlZeOX-)Mp?CyEu@{Sh;=L@XYawSFwo+4jSqEEZ2>0@`qV@sgd14SgaU zs*fz4JQ?_q4s_jJWrj!S661^H;#MV^nFMuAX;7#1pCT9#PA;xL`D{n*FU++R1>rOU z`kJ^nP`mh2P{>8tZpSkPuRhSphrG%dn|C`(7}3cDx%Hs|;WL)q_r8*SOz$w%^d6#V zo1AsNn4o}J06LS(O8Tov*+PiS#hl69)ph}6jW*C+P3ikP_D_>>8@ETOc?F_S-n8dl zPDs=rVJTq0!eOK;Qs2He3xYB1#Y~J0k6Ts=WC=z#S2t{_^Y+WdsdtbCcUNOCsW>X! zobdIf!~>Dw`N!KYnPtY$SuhRk=Ecm8p+E393hso3+vb^^O2|^?!u51bKC7vxau>&L z;T->T5x0@m_BtlHo`W+gO4^6&`ihbg;)0&^QlB?@_r6O~1Yh`@#ucwQ8QX5|Lh2(( zu!puq0_-Z}$5Ioxt`0l!u-+-d%&PU*sXT%;(6WSOiRn_-V)cvh>Bf8`v8*TZ$?Y$d zMqaX}OqYnL!#Rj$(x=KvNdGZRC9Z61PB7uj`l83(tS7-jSIAT8!gixzR${nl%+oA0 z5#uN7jlnE8r5E;N$NfnP`_skmPm;h2V86l2j`XI*@gkSKk31bqGFsOqD$+#ziSgLG zzvVfhpo++%b_^PE4}K{^#7dJ0e4|o+ifh4`=L|XvmL;J_;Vt*_70qD;I`EsvWdHD& z=oc8OvhxGFL8ehceco!=nHqSF-kBP9f)^J44f;@T$_B4z#1|952igctk<_k8L*N9l z9V);=|6Ucd`w=)c7!7yQXE~-}5F1T8cOxEFLLaQVRX!eua2s$veNtBGxHS`VlQjeG z;231XjQ-e6Lk>FpEH`#@CgYCw2a?HbnGh+k-W0)K!(yjfZHRPwd4``}9;)@T1u}t7 zTO`oXZTYf+cXWN!^NcKlpU9$q+d70e!eSRjjlY034=0{Y!fKOZflC?pva7neB~+DR zz$V>2V(oMO6rRnf&dJ^EcGk>_ztaei*&k<3Ni^UG2_aT_OHMp0EJj+99 zzK(S2!pVqtNfQ@K`5n2Bx9huHxZFJoPpvSi)UykhW^gN8a|8AGynDuXq#li5x(vLs zsK`X>R12`r>8`MD@)GV2bFRf55b@LeRI^w0%%B2pBgQU&Rh|DjPFq~_%@h2`AzbZ> z2VOGA^ZQHl7zM?8JREX!U+&VyW152FdhPXyOQ7bp>unpmzaZ{Pu<`3@^y{`;BK4Q# znL>@RrH2mqAnKyG;(%^4N*qN0lURD`tyyxEumUy5us94jFxVQ}?T_v^Xy$EAM)vt% zo;_Z;JsI{sxv2uSSx`bekBuprjr#rO4L5-;yldyaX?~e|YItUM2Ybm56Cg%zyp>S8H%`ohr?K&<$&hT(3Ct~L|3^Mq_d(8p08{|6#Nc7@M@8; z+yK|qPTv|vo4omFTeq8IsK;6vLWvij(u%A$%d+LUK?w{Loavx^z(Pl?ucUrX$YU3= z$3_3(<#z7n$)}x4BWK`XIzIi=_Ah0H?ILfzHI%^|vMMUT?ru5$eO)%iKyDkF{A%}* zq{``gtlv}K)E@j=p9?4FIbv1#6dg#CcM1i|du0Out8n*kpXbw4A>8-s-3$-<11NFJ z@H&cRx0Y(r=kUSbn5{UUJ7$-r&S7uMNg(%LBGruj>=^eW^8;G~Z=IDKG0(6q=!I4p z2aoSFe)xDc?m)eSQ6XP5l2{Yy5Ept6eH+LH*YUV(DAcWZI9Ncr&5M-&MjXmRAYIdrqoqBx^-gHn&-dYQ8Y7 zt$B;pP_$!S=|CFXEw=0_Mn8eGks z$IUxh&J!6lC7OEBD;+vYHOT_aI0Fr(gG!J)Sk=vs(dimhdMHVIK411bX88kX!m?}K z-2Ej_82qAjvu*el5i4=L_<1s=mG1v#0fcuWVHugL6KRoY@HcOKd=wPX0fPq)4v`_7 zg|vjfmkOVq*ui5Q!5jM%47DO@Ib-swSo_;^Eq#6&PF4y;--W+{nGj?c`?t@g6L{@8 zJQc@g$b<9O?*pa5E0XaVUWTOLR&@S&MGCj&vYwjXV!J+&nSW4^ImI6AO5b^&gJ85N zUr$YghbgWQuz0{15&yUXulmdLa2*UV8{s=Fx#6-q*OAJ}2X&;*B}KAgb#Q z%l2^gE|&*rC&bz8RBG@N0r~i^$Cjhq@ z0`nD4d7%f*{dvNa^9y+3)095AZ}N*68xRXhxH35Y8iAwNdrhBkY{^uK5t;c&MvFDR zZGQjpaooXH=BG&tMi6~{;ltx#iwF|(rJHq-DURvD6vK!%)2phP++KV%s-Owq1n9 z*<2qN5<}Y1<)I(iRq;?!=-KV+{JmL+WM7<)P^=apCnZ|p3S;i6VjI7Hy%-YSeGB}Q zDAD6@wWQ3^qn_g+HK+NYu+kh2`X_GiuX~NNL-u8UsgmXV04BL4k?J5;n1bRvT^G)WO*|*P27PlUu4<}p%Igi~<_ltpY zH&ue7sVCWc*`+9N8J=h2@^`%@a?#bK*hafPvW(6u8R=Y}ui*)Jv|1 zbg!uup?K`(Zx+?%QU?n~sJ>zb2uR%jf8BT^qU?2z%5eUR)cm2}I9+{<`(NM%Am6D!WDaayIZVi=oBVRji zuNF6F;UqD2#uo87Y(nAGD*j|FKPP>HJ5NJCD}%&c^Ga9_b*_W~8~z2v&(T`}uS3VB zZ}l{+qfIaOeJ#Ex-V8OlzSxjDbrsyET~?JB%(;>$N97v*?&(C zY#-~|T<({eGGUVH+`&@*Mn^1nI-swg4bua3oDcOyE~-YwyA_9-*^^%cGhVZB?v#hQ z)>7kpzDmtVTb`XqGMyN2>%`P|vVC(ls_)_EEPu~};rHn8|L*kYYqN>Kg8g-KCeSa= z1M_Sie5y%P#o()~=iy^<-1WQ6SY7U{eMh>SAfsU%vklhJCwI@48GBsQph^T4wCAdC zJS>#tZ2~C%VT7DUjWwNjuZmHd+aE?;fMw$VsnE*`Z4gpNG=oGw!}8|t zz3FN(+F#|Y=lQZZ`?E1-hwTK^A$<%dX{SSi`iyiPsv0_NY3kJh_cTq<2Qz?p1BRlW zX{-egIDNAnR{K)B5Banpw;SVFJ@d}rV4)qF^PspO5N}pzgRpoB=BN z+$pDJUaT_Cu7UAq4xhbAYPs(ts$ zFm^{smvxW03{ISsLhpsXkhGlFY8~DIR%kL_P#h;L?0x$)V%~nQC=8FFO~1jWpkW0}#Tl_v@=z~Y zBlUB^(KjQ+*u1HPW4vs?w4}WbG13r}6xa&hX{KD^p-|x{$0h{aZFTfdp1b30TcjbR z%XCwmb+?D`3;#KpXrZ$R4dapb*YeYi_w`Rj9FL1D3p4vJP8zXtmo^)zXmaAkWp|~otUJ| zi4}{Zb99k;{)I2RL$h^XIuUHyq1XcoNhNdQYupe!txb{RDJ@2ci~% z^I`k9)ppC%YC=*x?q2?_vM}c9Cm8nwWOADH?E&0E;qvATgFrv@G`dBuoMu zR`l#>`G7R8xW*@Z;3ch1&1vz07zU$2`GZ2nODD&1E`;aMv%Ltz&@*3pa2rPhX%n$qx@EH0I;36H5 zC;#`{j?K1`GoGU#V6^9TCpe@f4Q_50wkRq8yl9VC^kjIKqM+hvs_0jj-n(QqryXsC zAm|&&bwLTO$9F)NBxhW=08N6?iJ&W{wg19|utrP3rby;b@-KapYWAC45upu`7?-gx zu?x!-wJ&Wc`;0jw>OrocGgTB;?s4@i=yC6$;IY3W0CT?s3W*uqo1)|Jh`oEt{9|!h z4{6P=izUsVS=RcmHpSs>(nuki*XYYdpHlUK-2Q6m5DDJtQ<`II2H$$rk5r8k9%d#0 z?-gLa2`1RQGIX4_F{HnSN!-cY|C>#o&XbeePhXXT4rM9{&hf}XdI~(7e!6cR+)k$! z@j==Y1KY4bSMR;+jg)^PSeY+yS)RJJ zD<-o%c@&?qGgQ(+TJrd}nSA8_?(o6%DrtPXZ>w>IU(+}XsbQ2ri9&w}wFJW-!@JyS zaHgPk&iDR)8*i?1+zvQ?sp1Sk2jJdiq>HVey~>WqO_j+|A3`q9E>DLXmiKWTd+Mu( z3#=`Jv5pAm+xSjNmhdyS8G+Ive;Z{S8z#!w&<&cwuO1avZRa?Wj@Z%CHSO(E-@c&1 zFwJ>BSjGbPTSdek5jq{RfIF zvOA1Fxyo}kA9P-OS6xOrL^ z`t=Oz$sgFaqf&|mAh0){Ln62Ce?H3M&1`9ir~c8-5oPi8QJQBMN+iEsrPV}m#XORK z1~Wjq#!uem;=zOrjvXOnO>5}PPqKaby0jjMuQNR{;OmI^a`8QSuLX zJB<_a5FRG84%Eit|;@@Si#Za&SoEj8du3HsW=C-R8uWc8!)!l)_wob)Xb;&Xt%7?Vr+L_2i zEH!dSMb%Ifr&a+1R8~G z-POXLD6XM{;I%lNc0qXeYFbtj||AavK#SI|yXZ4jO`z^d5Ac@<)CSNr z)7C+rS*&%{!UdsHDtuMctb>fnx$MP|vUUmk(LHpPJBk&Gmy`q`&{&jx#Ak}iVLgmw z1fvmOj7m+uBuAkdwxJUgAbf{`iTf!Y zC-hY_)5u*y{}el_l~m|I&mabv8)P_>`Q=hwm98x&dHBEu>ezmQ9L7c{@;fx7%_WDE z8-qzl?6G>~bKqx@ksy0mwt-tl@ShWklo3iUI5>GQFLYNP^9(&zk((9J-#f!eC~;W* zee$UeH>*zF%G-su4kE+uHp3Lf3Z$#)PwxKLQDy^AO$aeC^Y4AQ8oB2WLqB47sQAh^ ztkPsxM$*`33vmZU&Z_u%3a$sRf1yRkPOR9w?>^;H>G8?2m1pr7XnTg&AB|J*x@Nv$ z7Gp+eDBI;6(~PQtZRp7gZ%zO(NdN*jXd{W0f#@UWm+jwOf8t%jW62n|dN&AaA6mMG z(!C5gXCxkOa>$TbhcygZRY*ze!Y(0of<5k6`3XYrkbn zzbQ^q&6}~maNB`lZ{^?HVC)%dl%7i^UG_L>H&`pzFEz$-(K}aLQG2FEKfj0+RaVV$%SsMzAgv_l)VB_5K=cTZe{1 zo}&fJVS+dZ2VKfv`{bj&{qoU8?wp+r-gF*-zctsQ+oY&247Nbw1=wQGW4NS9c7)Mu z!srro*!}PPr5RV5e1Tv=DwqJR!0H0C zVMa)YjIZ-xh2w;-+eo<#I_ZNf0mp`S%SPdHSr<*8MNO<8h^#*EbDpZgF%)#E`4^B` zI$;1-KvP^SzTjZ%xMu7qFd4(r@3agvL0$&KrCap)w%AoiKw-(_Vc%kj9q&q2(H`Au+{{GX`0ZX zmZ}UK9|Jx>VLh&B7jf?UkEr%@2Bp0)O&O2p#j=5W4_mN`($J zw!a^fLsS}RAK?$7x+r^bJRea|RdytzupVqH_A9$gC6dm5dqGINa4sMUp&zXO%iB4L zKgylT<|8T@a$oL*( zqbQT_Z%2zFWR5>=X;W-8bbUf z;QvuIJ89LLZAfxOW2RK$d4m%uMhsx>r4fWvmjMCT+OvDT_CKZI*ruc==o38an!0eM z8&YOVCfINbC&R%}2*}{>@Dnt@$sKgEQJ+sLlu?g*b=1N^5eO?{MKN$bVXKcKPA_Ly z(ykoTp}C53vw2+>&(Up<5$pQEKfSbWj;+Cf~TL6QVC+YMa_AUd-JjZewmR~zj zUa-Da*!u*M@l*|$cQH5W$6ugAS0{T#*7?#gmr)a&PFms$)U{*sTP=NxN2%sR7V%ygX){N~=X~M;U zEZzXU?%1cli=Y5fOB{2v(G^S=z;TOGgW% zSYpgAscc>@l-e!Xb&=|;mJk!$V3rZr2TLl8=+J1Vt6D-~e})dyl||C0@zl57E0xoN z4)l%M`N9n8lFyPOyKH7zt*E1e6t%ZjHD*F=row(2@PR|2=lmfC(`xg|Xg` zL%4N_-p+0SZ4bRhLmTFF!csS&m+n9uIx=a+R93xqyL2I@359~x=p4+4h?l70>OAIu zJQUflRhxYc97P-ae9wt{8{)q&t3F0L&2-DR+A8ml#}y&l@KWQD$qul3wraryA7v~S zPgP%gXIwi7nfo(vT)NusvcBEO>~7FN^xX>7B*+NY<9E+>pn>wR8p-ROINx3f^XWn@ z^X-w~pZ6FzSj-;ao_O;>0iu6jfX(`Y#-EJOT_T676=H0}B=PbhTuNSluBcg4MQULb zgE8EB@K{n()q9LZck9*cqwLYf7LNTVPcgLN&`xNs{IBo%eNiG}%`D=p_v$8Q1WP%g z4YSB!4$@;}?}=kpLQ> zzYbyYN%gW57;p5pcqt_vqnJ(R_Ha!umz+`EtzB-?TPb~#qY;lTteUftk+}# z#R$D2mpj9)38;>Jatp(MQQ^1dcO9>{#`tCuNAA&@Xb znnJDYg6;qd5F=n{e2u=J&^>kE_PGf10BP(;yRX8Lq0TI6q&P3Vgg=UhQD=iu$aNET ze2bXPb*m>|L=~LP9w3n#d{ji+ISj~rLds6giI2nelkLOZB8a(N`! zt+7!;!%V;jmIK60k1z*yz()}$?DPTsgUDU*(X*`anQG#PBDbWd36b?*%1Sac8Ecvn z(F7mxVhEY?AJm4DYS~OinhK@JDoaYTl5Afx<9<&FFm`3Pspav#9{Ak56ynqk2#vRl zr70Q;gQBe{a8g`)NZsd0^oQ#Pf{A{A1xtX27h#Exkpuwo*(@~8bcnIO6xj$!1*Wht zIhrQ}wOU!4KFW*_gqHHTEYwDJSNI{r9f-zxExq}D>S@thpwI$&zOflUyuQr|G^53x zMC|hc_p${39`k16?PUS!q0HV)dO(_PDNcs{{9ZqK0KDGKY0;?w9aq(z-fxfscC zqcZwSMKD)tD%Scwfy>csv~{9&n2~Kj+U7Jl&2vg;I#q3pyg!PSf;r!usn?u@Z@ST` zse>9^8sM+T>;EY6`Y|ibz#Qep#jw+c_jbMnLp(I5p0xapGQR`OvbTQp{I0@KO5RuZ z0q46!c$U(0`5=$Qj}7-!$*|3b(cknz`wk>2o(|0iD<@<*Sjcqlyqq3PqQxqP_*f%T z=%HN1tA?>C^z2c5z78i%6)aKar{I-Pp<`xhYlGg{2s^W>;g1h+d?og&Z{P3%rvDO$ z=kH>FzS{%FAbP=MMz8P2UM4Zf_QW;q?j_i&^>OJ;hj>Slo}oiYTzZMbj)gi-3mggC zOEemvz9SKST1ri~lP#(PiPTY~3dDR{2$T~;LWaUOahz}nG*9u}k3wHJTAG1Mv@l08 zMaG3sT_)s2>kY}>oCv?MssCri32!i2#faM`@-{I5!lCd*Y-FfacAmJGG zFgx{5p0Ik`yCyF}dEUh!$0piM+F{i?f%q_koP0 zT+t-8Z0y-|@J_==StO#Rui$+;kkRB#RiABqDooY6&!=8}^Ce}ZqfrOI#vocDA~x(K zgeheU%{#f!<7zZv3P^^Qsn@d!Bh6dmZ*I@Wm^JIB7O#h(stPA85s&$kD!p7E|5Yn` zXFhV6YQm(2O#h=?g9#>q8?xlKz~P?5p-V_2Lk{Bk@-qLGV)4U2UP}kgLgj8MQohny zHs7v0Bo2bh?gZgdNf+JlyY6zSx2S}%IALed#-AId&Mpnyc48@&EXAd5T?&R?+>B2KmY|(qU!H#-R;f`&Kjkrt z10oLU>t7{`#F>EH=adD`?af5_^6OK&Bm6su8)?eVR$1#IZu6YEj`Q39UA~_I58=m}B)*xXoHB z%z_8MEPuB;&dxqy5E`BZexEQ#J*#{nhAV?@XkP!WXWsJ|uavbM6VOw*dC?j!RH2v$ zomZaDbVhf(1pMD%P4<92aPPEQWru5uimleS85)6SiJGhTJet(kQpqslr$cFZFadf$ zPZK*#KbFaiYIOuVHrM_OmTGfwoT=ul?MsoitHQA;Q@bwqRuuUw09G9HM@_q02MScQ z%mi$w=6LP29D^T%*AO3s0C}J=Uh#KG!wZ!Xi%~5r39njGcVAF&O zJDp}Cp4vJ$WkzHuA;Ys7+{H~5UEbO=7QtL3D70v^j`@1TtUo!F*nNTyWn!)Yq?Sex zRV3n=$Z9TRANX|FiJ+vXN4}p*kO$A}HlXWRN%GODxP(omOVzLO2f^(sjtGGCP zC8BTiVj!yM3ZY?p4hwqVR}452mgE7lgGHOj<8`S8@v{8!_sO}z2Ifd{r|6u1r~<6`H8LxKr)FKXxmG>D9~VX3ZJ|mfk5HCK21c3Ll{Fx z-yrlr)xPpBh-l>b@YRw))ZRhj@Or5#_fTF1&hbP2m#SsbJ29s!4C~9fHR-c}}8({4F}a{#iyqYNfkP;qsxS)s|>~`(ITk zk8E-I@qDVsDhmo;jQ;Kt$ERooBwfK};@I4J5Y8#+N-!ek0aLhx0Q*Q99)*_lM zVlG9Ipt4%Sx+WiNeLGGw-@zKj@(C+b-Xru*VwSC!dCkrmZ@URJRYmz~sbQbJ;1QJnb0HgJJX@QkZA#bw1{&|IOS_|jx7l?zo zo(DxbqJc9vePgq_Dys!o?uKqKdD^0*4+WZ7d-bgoZnU2GVM4DpT9z&pHCM9e))lHsPKLjr0U8+~MH1o(Xvy6XQGkI=`ZcVVg$V68Xw)6)KTv(KG;clMq41RS zDgV#_RyLzvB6ik1L66LUq{HhLs%`%ymrD!lAlK$`=^fpW$WhjBNpW)$m6VXiZ<{oO z#)dDRbtWBGN6v*h9QNVP(x-Hn3&-^$%+{N8ue^=&iS>VypxG)TLY#t@G0I^&Ju+Zj zr~d(WhOjc0z5Ku`$CgC8gcSTKH*UGE!BUCIRKSACPDuXt*HOVAD{eyO2?nA|t_9g? zkHizJfqm(L6Yje&qqp#mbFn3^9uBsbS`sIEKi*Wm)A<&Tmos)lesYXWQ|TQCT*jI5 z0|&uDjRC`o=0O<0+CB!dS+tEkV`VtRt`i@#%Y?%Ae}6vQDm;?TI(4>B2kOHdGUHrR z`KN8pvOx;H#4G^zTvqyWxy<$2$jXm*KrhjswYe$_G$(U zli68L_GC$-Ur}`oLSJ~29KrZutB)KXeR`Ad^s~a`w8(6!3p!urt|}ybTax*O4gVe^ zi1;bj+qL}57t~Dh^cxa1#GW&#w;ByCNL%zxY>AEl6)jrsVM)0N?{ZNFJ5bWWRNo6N zTW%;{N{dh`nA(!I5z{-nlSV=<>V1ZFFx2Jcn{*o zE%^pe10fw067+h12>WF3ZSx5N79lV)YQsrlVFkC{&Wp30==X|;|1J~rCAaPa$7-GI zknRVIpx8uG*eT(ST?bujEhjiNEtg8TpHr?zF+Gk((v3ei`MSp4uZI@>+PYLuNc3ow z9dGUCT<$c13o_t=asps483i;8frK|q=2@gMja+JQD^^={ZJTgVt1(2Y%p&AEYKJR~ z=K`b*9{@vc97DXKyteAN((LI?izeH_ozTZ&K`g5!_z~%dXVc>pn?2`)1W(oGq}E{v z?>mvJPFN@#`;!V~6Z0uBoOP?LrPaeBIn^e>PYoAj;sbM4wR>QkiGC_u2(;lXFK zOf7DAxx76Lj4-hoS`j|JoFl-5CZgnR>i3>Peq-4#A1pW$}(()x7utHJeM!J&{(*H7z3R}F~_k~2K5uG^e) zEW6bJ}8I}|h%!p*8AdTBExnvr_EIST*<%j4vuh6xE zunj;20x%d+{)MuEh3Y{_J;@k=uo>h+84wa>tA4qkq#HfpB4ZU`XT<l4OGlhW0#tY6>zPmN%rS8#VNJvYOdC!Alwms;a8NO0OG$&DzafLTZqM z@hvID6m5Qh1G0<0>-05l=nVvOm$%F&bnh$2)IiC;&OMC!U~f6TEQH+O5b~U&xCvC9 z68fX=EXvneo9SAi7}R%z1;YY!Gyo?mT$ur@?jUB*Po1}!g2Dv9bt`jO>)iLD)`A$! zS#q2G>!AJSN)n%@s8aoVbT2IOq8#$Kz6fuAu|lTkOR9-p%B$}%PA78(AY~+;EqIzJ zY-vPkC;JFm=o!WV_VFh&ma4mBgYV4|G5l?G*usZMLSL|>r=v`e4=Q4% zt7Q$eRy4>lY{KXWpVeU62hl5I>++X)!v%d<;X3=K1QDfoK^NG-TB1uR%HvJ>%E0n|W9hwbH?&Iv@az zRP%6ma9}4XceiRo$-P|8IEX&R)6mhog~lSKtg z)feG{;Kjq|qwo^p%6O-I>-cMqD0!NjDl+DNmC3%J)z)fw`l6QMEyml#BSzACw-k5# zz8)`J_!;=J$~DR15(wjYw_;ZQdsJYcch{k|Ow1*7&N4||htl5CzR72b8E1S{W)AT7 z;L|w4BY4?H3?^$#Sf870qWp^ouOFRv{N3iySz}!-TQYa1TOp*$I2w%EzJoLrmxpyE zkn=1Bxqsyj$wsOQ_yhgsJ|K!Vr)wb&?NdwH@e&l*goyFlhcT~h+fe2wLoASd1$?{b zI~xXub_0xeTqROfZE(Y)>3WHu#u407^mo@de=;fR;rSo__s9M{t5L+J8IMxO- za_taoAY~B?MnB?^x?GC=-=8f37o#I{VSsx|rCl0hcQbLTfaLkG>Czdqc}EEj)7G!b zeh=IJgKXHMd?ObyP?t(`{T>3z7;#cT=sh%eOKTIpmqEN~&7BhH5{gdN;QN&K;;A=R z(CItlz9qXtI73>6CevKaSx1CQx-^}0TtqhJlcNZ$l3Js18id_Zi&ettp)MLeCk|c1 z_)%Pd;pZu5jsV(d=k*L1vB&qUCF>1@5lZD~BK01SB42Vc(gj<-NP~*HkxDivNE?_x z$mwn*mvJqd|5VJ;l;{~%F-z!!?XBihc@+~fK$Ro^D;@GilGKp-OHKaojh7mAi5WPB z<|Dsk5_=_fVCPYNkh9_en6!D>P0wi$;G>+S(+&`p1dsFUzOsn_t|Jj;q-G~y_q;dQ zsYj9paW@>My#MX{_jOt_+b)i!JidIn$fdxp`m(p*LR0SLTK8lQWe(9uyWqANVvOvF z$;_%)Os)Zx&EkoeFWX<+A9~(YT)W@5bC129)dk9zo!R3|75zxH&=RTB5>r0L|3Lg7CCm#-s~|hNK~rnT2aABSbgQQ-1L5#L z3Q-WCTM!M+LDjqMJbn>f z+q9Igx{Ymwtux0?Sy(Vz$$h;1Pkkgs<_JBehJJH_Htw?v0~qsl=s|r9m1i`NC@Gku z0hDMKQy?__^pSiFarN<>@4wqL@E!HOLdH(}_-3g>(USZw=PhfN8xY89*XL&4cb*dv zdKTw(sG^PH;c(e>g8Cv(N9#!}w!qWi-a3AdaA}jmn$Hao_}gRsLSsLLRCg_W?goZ9 zstGj|VQF^H$+{^fF%X|(`f$vHa(}M8IB4@Is2ReRYHaDDE>!JQ>-u5Qt(X1|URw@7 zKt5zz5eiRuxpaxuzNO(J0#|v)QhsNjCiTF<37(tKIW+IwBN`_tD0_2+ynQGipvNT8 zT_3OaWWaSb_#hL=YZvR`sZ8LK|G;PS{%-WbNn0DSAC=_kAKtDVPmH~U#kKhb+R>BD zId!E5!;|Cd=SnT+l%U=P971&>*@GItpgM5B?(tVdgw|jIclk2d$pWEQ z%00X&Y-vg_m&mKQKx|1`y5Kgq;0Tw3sbGD`b*FdpN3peK_cR*`SUR`#r?o546&KX2 zdnVbmG}e2RQzs2K0oRwW<-$=e2QR@UW<3I^$#mES+WkH|4CsyI=9_{lB1FsN?0pna zw@5xgnf?ERs8i&V)}W2wUh2c;Kf7w#n%xmf$h;qhQ8~w8^#obWrVTW%3BcmJ)77eP z$RZpK>iVZM>NL>|7Kb#`n^XQWvTP!8%7=IKZ>MgBLV0IBN_1_xPh}C1lLh`$BaJX#EypdjS9_t$``U+F%7jCd^?|SLfXDwm1Ci6#DIr?32DGL*zbDOJc_|xLz z*-<~OH@MaMfZ;zG`G{o;RZ1LF09yr#J4yChR`C+re3;iUT3wPCJn{M5XzQ(QEA(pZ z)nV3E7I?O(74as#27VN&fb}wLUsnL^cQmln=A(^4*`pUYHUoxx zP{6N~l(x@HO+&@W9($Ds#bc!X$B_mFk?t<(I&`-vaOiG0B5~;M{PqFw{oVWSH{Sn`u?J(b z*?X?J=9;zEnrlu?{q+sxs=QwtSGcBv?_yfJWH=k^t}f-ByLTgVy8Yy(dUewUKDZ4} z8>X1jFQvc9ev=UK`^FO&ixnxy|CByI(Qqikdt#csVI7F1zzX*4_jrn`HZ{}oz$e2s zveXd}zQreL*(vFty!?`SH&IX?alfEdWzMhKA3mn2e7$!$9vXFDU3*EFXgM(s7c6c+ zl`|M_{~q}gIFGGLTF~KJf@Gj6%{@qD~xNY>`Q)NpPn}0yt=(^LoI<42g$eld2C%sM)F+ z)*#^be&2Uj)Fw#bxNvOzoMlLecOPT@a;GymRz216 z9n%YEn@pP91uGJHswK4dsG_eNh7hN#&mm-Vk>|;znOxu0#>LG^eS&Sh5iV5(kOb#9 zr%77*G+)Uv_JhU3@u=>#)V%DUJCo$Q3Xtb#HuV>_>aWC>yYquTD+H7M1Zn#sBc05z z#*PkkaRNuwW8_FyXnd;fZBl-6Wo71BqtcFOFFdjBD{|ymn!*z%3>E3F@LKmePw-#Q zyIP)kMD&HA7c@A(VtZpvIGDYoh(0x+tud`mo-*-X`}6~u>{2@#nq*zCOE>|VKX{tP z@WmmGGH_TqxP$`d)H$5b&_VR3-yw%KUS5uoiVFE zvYLw76tf(f4-Z3Bo1KgKbk^VEJHhWA<#+#a)dp$9_Esa8J&p6V*wSUD5=0)H{r<|L zR0YHW0P#1{BFgwb_rQ5{xr{Vk6czg8$$~8!2R#XqY8cNx^%*lgZm zO?q^9#LUIdzL^j`{N_OLDm3SHgr-RQk+4OQK09#>>&eg-e3j)Qh!Cj}X4}hwTr(Xz zJImS2UI_M|ot}lg1TRDjPiTiCj#L5z`9TyI@A+X(3}l5B5hkv8;0Fk%1!*DYuTD8y zcoxaMmJaX{5eiU9d+2A%e&Kdqb>yp|t)y%smUPw1-%Ny=4hlHl^P8=r_{PzZpNpRr ziOB;&CvRip81F-F#d3TCnq@zrgVV9lV|s%;P|UE>z(A>)kbneWVGvsUHKkN##%R#v z+*S~wz=3TVtjq?gLKOT=ocD-LXe5HLG}e5xrl^2yxqNvFmD%c-seFogrdt)vCie5N zKs%UEmd`JLjS+4_WSl+}pQSonq)k?tZ?oAWb-EQoDmF@+Y0CJjy(JX&HGHFyDb*0T zu^ZM3rx+{sDXs%_LyzCBWQwG9)>Y>Dt2^yty%Fw6fIQskS>cCjNB^PFZxN~`_fMlE zAt)g{Gui?)0G3Mk73O6*wRcwpDvaeK-)5$~qEHnJZxXhSruxqEHGvc+cpzSXan5NC zf~K<N8bM!(!Fq=DC)>EFu| zmds~=x~fWEU47RBazaW#R_py|Otx{0I^=Kl$q4}n99+`(WBm?IQw`YRX1wkq=U*z} z(tXU*P<7tPe|PZM2e}JkW|_Ai_k}um4+vxSsQ_&a?TJ}Wp1oJA=X=yzI@8hNJc^g^ z1sMamYdDpUItn)BQuQ21yon6ErjE}XFyBf#X!(TUHvaO+Yn7j)rI{F9JtO>E4!ac# zoHfY+$k?(BLRDyKk?V27aeD0$lWrt>?=-G%lNXh_C6DeQ$#UAPofo)&r|(I6zZHZ` zxC+DrMn0TAdYAeuwQ;9O=4@ZszXZSx+iv>KC0?D56tf-x1MRb&7)ch zKom~&^YuuZjMBu%&4x|jf(5D%@xAO>uRb3lsZs8MrVRyese;h5!zOw`Iq|v{EKfsk zEPXJ2nqnv?w%vkePy?FDFGR`{^JRf5d%0xt^wrM+2Y&9f;*WKVs;VInWik%`5P!)E zr?)Wnful7sWV-kMmFva@xqk1^XypbNYk!=MK<^%air?qXfzgtcEwAJHj6Y9BBNC4Q z7l9YTL=mUIMB3tT0yHF5I;M-Mo=PC`J=~Zh3HzG5_fKq8fxJ{PpMea{8NL{X^UKr> zyh$uDGWH(yk*{wOXq;1%V3pVJa=x>C(l^Z0(1UZque9WZkk`7m?(*dz!2mZU)s6lK z#m#{X_NQTcK(v#?$!jwsXHI#biW${&q-u1oxxC1?8Ebd~ivsuKKrrXk$4n+Mhwt=f zb+#yNE6b&3VpM!FUx7aVMohQ8kBHWvpcb@;5mwmNziq-8bH#u7c6wd2_9Kd0%S)&? zEL2>(gYRKP+h+gft=q8+v?}y{XK)^e!Grs8D6qO2MuzHuzs+$k*s#CeprYDed4J;_ zMSWgqmeBz3sL>#!-YfjE7OA8(OS83LYkB$?14@|-=(x}QVWDQ{ywJfD^VUZQ`cXYX zRnox^K@+36spS-r3>X&`lfc7MswMod3C8|>O-G!AsCb9xKY3e7B>N>f+nA1u%XvfZ zm+~vUVtRmKWv+p|6WqZm04zyPVCt z#y0+t!Wt=WHG}JB^iUqdbf}Z2T<|w+5RZ{lfmc8U_GtiJi$tPyS;ak1t4!WA=w$Vq z8N*RbeT*{BCCFJef636A4!?iH4+mnb8-B=F! z7UDvz?~|J-zf=7{pWg%*Zb-bb->dtK-+Wf85!yMPe>y6n{?P&xG~lq#yY|t8P8jLJ zqpMlKQ2x_kOXT&(y}JNr3HN{vvR=5xz6^q-4$^9C5&Znd1)p0Xn^Shr$HS@x1CJ@o7h!lY2 zgsn6hulRZ^9h1-Z#Z+2^-(3dWq+9$T`|Aiv+0d*Q2Q7Q|W)TH2-=bcvWf5?V(wrMN zE)(^Ii+n|k?3#Xkse?bfHh;3hY+iuLmKN(r*d3YQ^Tk_7IHB)}T%O%3Ep+@e@fxIo z8sJbkaP|e+TkJ^vQe}|+)epEM86$AWpX*#(8AzvkgwAAl<8~o9*b9}KMkLtgQ`);= z#2+B4Gt`EQ=iJd-L08^j)El@6uhR%oqs_>%7F>2C1QV!DG48d$u2`^Dd`x@;{m&GHp1Qey0K0|rHM+=7H@JgZLR;eG+BUL} zh0iBEwuEe3Jm@qk%$u+5Z#@76^bZ68^tkcQjns%CC3t}vrdcE3Z58ubSBlo>_SubN zeFnUzrTd}qwj3?KGO`(kVDX8itJcFH{V6Ppo3fQR6v{$KYF;-q@+PotfD@ZLyATJlA|L`Y;QIbLUh{alnjWUi6lIpl%cl#W#$xK3+eK5bF3d1NfFFgId| z@F8UCLh5EUVt@>*McxHC*x#zWJzeK&6Mr*(xS18w4B#VMJ+u^^+L?g=Bc9FgF7Kf1 z*ajbMbYDU=zu}=rIF42FbI~bP$uY-a+I8jdv{p@$I;YoBIPTsMc-)Y07hu2>BR@i8sip+CESsvF*-%wHn8B30ePw4wnz2V%9z$9(5;RY>H zcToIrR!&scTCX7|Pkw&p>!HPn0N;(eCVlaEYx*UZ=u)q{P{3cTiXjgS44!Cn#!$nH(+_}s{Y z_dl^uOP>Piad(d7TtA$1Kk5`Jj~Xq_+JI?2ljf!%FddaW14TES9(9a~eRTEkJ{{BH zBet~3y6;ym{*UM+asNBy+;0V5+=`|E)`9u3GCoh>ZRF35(2+<}@%fZgM4K0r(?uu7 zbQWKqd=mI?08ln8*>I{74m!;?+zZRwzo=_tJLxJYTIG12&cr15rGk{68`u0Pe^C6v zkw$nGiR;u%>Wv1v73|yJX|)7An+mIg#e9|Kq?P-uti@pmJ$G$uPx(UB7&<~yBYISs zMMrGk-Ydm~y^oW*iNX9!Q~c2`UX)0lrz6&(A0ho5uWeQ$Sc){?G?J#*w-o)M-7N)g ze)sx`M9E|%;tHoc`y;}CfFSfp1fbcI42Xhe5qkdLmkAkQc7hi=8gH2#!F7LK5kY{y zqN&&0VEHe^1wfHlkqK|zSm%FX6J~($GA7aVo&O7wL;~1}S~#)&Q$pw;xOf9R`HJVC zJo!JM13W;yHfcww>))F6@2=4h1K3MCwN7aN1J-Q^z1tDyjK^cl;p@rUKX%8FahP@-Luae5i8#Z$BOs z$Zbw#_Yv>@|9lLhLPawBB-%1h_b*PlG36r&IXHwcAY8hC@+@#qh3Z|AdUf8vZ5)3GfeBE%VemZNGw=L5= zd1E}fY_~{lG5$@r0l3wOcdRbw?%Gt@GS8DU6=eqE7)C>1-hOxwn1vkmMv`yZ8-O|z&N!nl zrf1`tK`H8bF2%FYBs33mw)@Wsi%j&SBeIUv1WDf||BH z#&gq~lWoR33uf=C^Bsf1rjF?ktlqJ1L(W0i*5z6ZabXBOD1kwaI4?i3J{~S1(craj#R7mWXaMYm34$UbvsMXrzk;5*ySKKE=e+VE;R<=PV=lc#nA8f z@H%tz#-X@vnw@pr%iVr<9B!c3hXq#hgs#0FPY0ULT7`(6mPYM*JGqn&j@;C>w4O}E zKE8^vERK*aZq#5|xvFhA{p~jSd04ncMJN8+=K_2x#B0P7S#RNX3ZAt49Yt2KQ)7}{ zrdxlGqZ{#g9S=HTp3^OxfpS^WppEg2Bl7XO6kjJUw>G^#KUtU>yn&(Pb;bIWAVc$+ z_EVUvK#w5F774ieJo=!Qm<~8|K&#&o6#ia2f6-n1 z;3ml>E2#EPNEDEBP2gBuYV}AZBC(l!!#*>;pL^r@w`*!=ST`glk#(IxQ?=>2^sa#8 z!)EgltrY9IW0#X-Ibdj($_{u1s;3g+V+O=ArSf~r4Rq()v(g9XJQ>w zkU6FRvC9!Slq#*je(&&W+{zFtG;AkN%%7Iyur?$ij!Q|iDTq3c@>7CFza`6VDeO74 zmPcYphLSPg>!nz82`{wEdlTbLoi3xY{aI&itQNDc?Df5HGbQVm6z#u120foip}UG*`Xpa(s8n*4=1_M+5+5}?C!?hG zgqsw}O(c2U?)8xe9otDhG-8-vx*hJojv;Kgh1=}Z_<1*hg1#f-m^0sGz?%MolMN`! z23yQw(>r-qYtv&x%-@i>!4|1-S)BAFV`|iJ<$M~;c6-E8u-f*q7CIdl%K94-%9UerIm%t-W|?!^Y{9}$Fyt93_} zaWDN;C<~FO?+77dvm}p#r^FMQuxC{FT=a1|R(uzQBr=G~1mhO7MsNo+Xht8zv=)^} z$H+G$_tH*Z+vm(aL19GswqDxHWDv;f{Zd<_^ZkB&czXK^`G!|rMd9lqe|TrmDz8%Q zq{%u?a>R}f*sRrv7(9G8Pvb@Zm)!CM*o*IBB&u?rxRc4%8oQCWpZ&s9Xi)d(Dh@>&1I7t0U*ywofT0T|2Bjn-BZU zsfPqQ&?|{|G^y|Bg4gMliYH=u2tl!9MMH+xgGx~MY0N5Z@vdg=2-<0Qq7~ZPqL8c6 zh9Kl=v!-q4I99ZWlY8X>?mbP6Us|`Fn=IHCu=5Hdu3}|ln@oB$X7_?T*o?kEs~s{) z%W}&RIQl^V)jSS3VAWAKzs^H>=~5Wg!_sIt;YCBQ$XsP}|weD!aA;T2jc)Onk^vgW6c5SuUZ@bpuIx+evblq>+2U3l> z=U@J6Vo@%%&NQyiowEZ{_SyRwc$BGL?fk}8FDa^a&^XqZUH_0d*VowdL+mHK%rJD9 zyLbZ*vy!EcbcVu7EPoJ#XBJusiyTA6KL%fT@>gkR6!lDdNTJKugV;8)k2zCV<~{f; zx0Cg(Bi&lhPgQHd$-JyG!>C_b&~zgzA>o5_ABA8+(%!qNo~EHMg3DCw4ftUj9CvDYRm!75)HdgPC)`xbm?VlPT)GRk^$GagIcyr=f8{6(?Ocy3B_@|WW9Yio?8Bcg{S%=&8k zu8Oj*2?nEe?Y*nj%IAH%ADhU_ed~hsYzp;DoL)lE=6-IU#?W!!TcvNy?5P^AlOWA| zzUN^@F>w)K>-d-zR>Rmq@ol<+eoknq%faN{uMg zG2QFh@6OaDT0kclv^K6j;8tUIOtq#iz1VcQKDyWrP!&9x&Lbv0vEN(N2t5vgUo|ml zZzmh^bM2u0z_q0dW6bF1(U&P|d-_Eh2o!XKVrmkR?8EmKj6~v`vJc3IlovNE%XUu` zpm{UTn1Vz&^~@;8p#?{!Su(Ybb!6PS!#${rbc7S8vZK6g_G=33aQ+{qYI*YY)BDEV z1rlXTny`54P3f>}9*B$i!eKq|2f=U~6RUmbgpRkfVKt(B1} zpwRQF(EH`OjBhzT7p;fw7qzcL;@2Zm-oHCwBSd8y? zzN-?}@GC>58pMgLM&+%>{L}yr4ZSm3yH_1@Lv!WYsaJH9hI{qmhi9833nZtPPF~Ev zxVdzCmhxFv*?yp=*k2t{pKq>QyO~>lLkTPlYv`pn0QNKX`Z@h+En7Oh0zo?? zhrGX!-Ee-qT-ox{GV>_c>F+)0U2c6>rg@~g42r=TQK{+ZVaGFJm<^`doM|p80Rv-v zKE28U_5_P_8SAnLH?ee>T>@$#gO$v`Gt()S>i-XL8)b#UFScC5k0>>;ReG|PS53aKdl)T2Qk z2V7lPZd_7u)YdrNyTPgcH7ImXb#*(jlm;>aW~ z=EBPmPfpst@Mcq9Z-_4M@GLf^PZCa+2znb3V2mwXCc`dn2n| z$w@o(nR(;2_oKy8wh9^JiTIj+US~h>dzNom4v(*L7hng-O z*c)%T8ywCkvZL`ehu{0eII2B4X3El^+}R)E$Q!u&<#1gS&5Wskp@^_}zLs!^TJ+J~ z8CzqE;p}tFnyvBb2#mq2q0-KMusSi`mg;bt{;<{=$aCtk>lF30R192>1-a_bgny2P z-6)~rz2PB}y5$F>vb9(X*l{yO|8q;*4fPecT#(d(y){d5j$N4wrFt}Jx#ysT7R;W0 z?@Z|UIN2nj^K{TP!xR2x_TKZKFx=m`HOVr_)j=VyP4>L{MBbom>@pT8J^l_9nT>5N zF4^jk;ACOVLdB{D9=qC%KD6Lde^RjGW*53FpvH8?CRtx3v1RzlrD@t)1u}Z&LX~=u zs(qyjzL4+M>JMeLerc_F8Ki5S2&taQx|#{tjjg>_Gd%M=vXGwbNP1C?TOh=y3$xfe zdvtv_fY~|kI6PXnaOfUPO3O;bXhvq0m>V|CY?m0?AWZ85Q~z3Qz19>@x;s<4Hu}Sz zo4s0vvbQHb!F749VxS1VjgG_Iw@yo-N0kKneEv)X30uJn`9azpw3}NhL$nj;Ps&y1 zlP~`D9C!sp(;~tXj(x27kJk~mB3;-o@Z}~UhHd`=7Jdh1UW{=@;@@p}Au})mpbYC{ za5Mfi`xi)WG32|S2=ihF$7Qsc+9j2mm())n1DQlW);?!bkGLg67Czt)i7+BU!i*12UO`x z>?Tc&IlEZv(7G7j5~76~zl7C?uk7nhLnXOr=mR@YW6W>(TVFX#x`lI|q8u5X?ya=v zT$QJOE9L&eAH){LL-hMAcayBDT1g@$Clx;EG7Z+{b^yaOgp}85wUQ*@z(yk$gpLv#t+5`#7xYMzqt_9lXP% z)tN%p7RLR|Qo|d=jG|q8h)(bA+CGv7tEBtl$G%a++*wyvRX@uT?5!o8BvVT9RS3$2 zW(==32OVqLR&Z!VhdM3sEgU3=6eQTPmzdq}4mK4u@qCcuxYWt!%(tPIZ0Jc|f z{{_$O#3pLlzU2V#k%zxVRz@~8s}QWx(o2L1YB9@{9dENTpq^ds82G{ILW8fe5EDr- za>jaZg=)emlDw74}6e+dY-wF z2fJ)1VQbkdYUp6P?vU^a^ZI$p5))|nDU+13;U+;mNtt~K97vcHG=9GxDBkn%Dt0H3 zb2V)CBugeh#klLJqX5~>B1SD>W~e?~lvs)Gh#W$19wHNYQqtm8@PtF#bVJeKhWtyMgP4ClLj9y#l1e#BQXL zff*3RtoqnsgeclXq77Mh=wnoibf}5HOyN7hm!iQR84h2RKB&laJrWEs#sawY#*(*dXQjzsu^b#N#AS1IR{)9KM3q0OI{N+j@?q{&16tx% zq#G?UpYAH;(&Zqh;Ma4_d(L29vtwwd)hHj&q2j~Ed<>(BB+p|C0sE6XTbOSKv-mv% zj6JGJPx-bV2Fd!#MOdp;`e$nUmpEp1k%^giC@ku&gip#59bF&U3OKsl8(Xo%7?_*& z9_waQh6Lenw_j;T8{wk&f0IStx3P=1{yE%y>bPN2QhaZ@sHHYnu(0bAO-fdPm)T@i zPM?|UxiYQpEAvpwOLC$^L#-vNJT$^vC8TGHDRzxl#-Y)ps)W!jE#I+oYrE1jKhz)W zi1!?G)Hv7}4Y!uUY|=r7S}Clr4tQmzED1_*5zq3CNfft8Z0PLDM6jF129MM}y&&wj%PwcOCg3UG;Tlh?8` zGn?!Q_0=S=H|~7d24fugPyUYn)$k>LF5xc;QYhPGVA9#}?v)uvn_x-Xj+{_5{{iNV zHd2Gm`etdgD3PkCYZP+|;)h1-%?__rp*sE+Sb_B;fIq`URj*Nino%-R;Sgc<-e z#{13Y?eUL&?=>ByBX7PP_6U4Ess@_6_+SJ%Z0yk-^oTcFaU@6;wB75~i6@E4ZT$3m zSKJ3Hn~L5(`pL%`^h*ziq&e<&-OTh9-!r5oGmIIY&~{}uTfXwgp&J_ff_6+O(1@Oh z`fEl1vw*Gv%tD#&Dzw4f-8=<+fv3o!O$X0Ia|Ub#N+q45vW;o#J*#m+Y_-|rW{7emuUze^rCRVWdhY$ecC^bS2ZXMll8s(@-@`C0oF z3Ad{1rr45HtzR|9cGGAkimX1Ew@Q^qK~=?YF5`Kix}!EnNu}NADn%TFMrW3WvQf^v z?xa|)n{{kTM4Vp)p1#58CALBLH%vJ6!mHM%)l}3L*r(C9Gr~jm6=ZWf{TgK=x*C%F ztP~Et?9==eLG54H_u1zcnZ<0ivT86MZFgwleo1weQgtsYj#=d=@ytbn7Q82Kv|RN~`)KjUA8j_BhYNOe7RGlhDY<_;^xr`@ zs5I$(yKhXHoFYP@c8B|qyO77QvoLq9JnU2S4Iz%+Z=UGI-kJD~qE&m3XPVbv9MmNC za-FO%$Y8KQ3oZT8o$lmK6hmSpwWr@-HS^-~PE%WdjJYt)1FZtdW?kMzF{EPBnB))@ zF$=zkrRZ)!nSP9VeaFHm>jw6N`@g=}e088ne04)19Hw89;o7M|Qt*7?y8`CedNWUt z?@il>Cq|Q{3ZK(uZ4o5rNZ+|!@Yz-;?eYC%hBJu(5A#6#2lkEh+-i^;PqW1h+orVe zbi_dY_RZTtKscab2Y|~K*%bKh)&J>x#>pe0f8Wo8mi@=YTt$0~N$*Zn>hpgBdyxtN zgDXcG{>NV(!T~(SGnNl%e<=I+mwG|~u>Ksn?i&jK4=)k#|66BX;HSvT#vlacf9@)9 dE6KX!Y3B5NwE>Ak;|}m6`9@9*@>ZD?Qk4)H52Uj5^LOv}^K1TdpDgdQgUa^#Bv7@fiGZj1*Qt}ETs zE8LCq+|cuoAo5})aqXZv>sJ%FTW3)Zxrjo`z(C$_fyi^B5a%xU#2z4B>mk4sM0oi1 zo@;1~ug^#V>+do0O`ZH-$j&?6&FNMktB!GTPzA%E1mp(Z^}xV;-fwE)N>%txM2;(% zNUzoQ32kbFOOaQRcg^hl4Ll(PE^im-H+o3QTw=MvM?rG@puQ|XbPwv} zrS!g@*83Q1qI#@gOi83ZhhQ(g_1oAnrq+R#IN(2_x>NnBMb65S!Mw>HIb!0ks{s(TJls zSWo6U)df73>EN^hY`x!X5Zr7GB_-$fK7g&~9{F-Krt-lV9xyfaDhB z0-`k3b>BJiR`dH?*Nx%2@h7I86!k6$%UjX@Uww?g^TCtu-NxjN4Wy_3g!jG-%oP8x zzvbTVfvd>JO9I>$#6b;UTDTR_7ZT0+sZ%F{VDvGOqHq=dk+Pt1ZJ`-9+*tdC6;8SopyJev|1%FUpapuZf@bl}{& zk(w?xBvV$Sg9O?&?VYYAULM}2;wodalJwr~+Y@|Pi0rF4ps#uH8XJh614Qx$JJ$IP ze`yVVKN+${-(_e?S69;#2lcDC_C9!cFl_g>6B*@uACw$?te}Gf5qc)nWOPJSF(Wnf z-XcszuH{%#0ckcqC6J3=jLU5zz!DU+Llp1&)C?+sEYwd1hwhIv3E|m=r2(S734usd zcLolIG9R}|#fca%h{Fk2+byO59WI3340Gm(GYLZAKd^}ngbdtdL&a6zL`#8^Hb4j< z!V?X`B4Q7NLJj&&tSO1T40kA06w9^?(IULAfQ*iaAB>vA3dD0pX$4CXY0BjWqTHY# z1?mZ%=V$_X>cayRRj9<#F{Pl%gO$WBb7d47OJ1q$XNgrPlo}8HJMW8m>mP0p;n{W5kXQR^GyEhOP`cj%)A%jp=o|fW?V06THqN zk9}YjUCCXW3Z1M0Dgz^c%+74B2(MtTh zxSB}F8vdx(pkqNBtEs0H3*$N%`I=Okp zsQJ^mj=8YK+#)VfWvNG>=a5L^s6=PUxY2sU%)|D6t8HUlQnG~yy zmlTIcOg?7bLjHY@d;y@yzA)Rs5BmYz8EX|A6-x?h8M_FpCbB0|Jklr9GLi$k4f`M! zn7W&Kl8VT($HZrXV;+(koD!7+o-&hq&WOZRRqIe=QR7})P_0sXQ%6_lSZ!77SzA=A zR)bhuS$kP~So2tCT0K`CRhz%mw^*@Ax&)|cs+;)A=veZ#*{Q&Bbqi}t^lSK6EhmC4 zy8V-Fo!#wy=1thG)~>l>wt=zHilNhi%)YfjzLDOpy5xam%826c2c!VBDhzuNdj@;l zb6!HqY+`-Rjn5r+9SyoYZ6~gM=Vi-m!}6Qz`}f-u+xA=8n42h!Xq+?(#Ec}8#3gh< zQg^*-5eH34jX@z`fkM<|Bx8AXoI|}s@C7bPURcN=aU4k;sN&C(+LBHSe~T(CDH)+r z2|D?o5_sg5RA;n$MC(nsb=szGlV^Ery{IavTBscgU^DIwfHwxui`c zT_j)Q)nXB28RC5CMF7ThXaHrvwXT}Xye_-Et6(%$F~CixQtd4LSM#sp7xKqsSa?`7 zSjx{@=-UoNkwqF)8ndKVq!*~b z)zYsoY#3U;Z%B4gaWQgEb1~EfUsGALX^-$UermgEzoor7KtP9&#$v@af|rG7 zfe(#pj$Dlzj@pk(iZqT~ktva}kzq*t#qpViJbgYrcQkU0d2BreE)6PmK?4MiG(sjq zQQB}sXr z=>BI+hf-ut69wp-*ul9b6s%-AHW>+ zPQ)Cf?a%8WH#^+*Jc(sT1bQrcs6J*qQa_GB5x-<56ono}1@-^#A1eYD_3g**w@9qVP-r1G{<3qi!MCWhI=17m z^u5w}|7snoCNYC{ONk(_nmzAA<2KK${7r4GaMIWP+v^GE zneBADd*8(BX(_a#4vg5GMNBit_S(+i5-=x_Ky1uVPE!$>eON!>p){l zd#XuwCwvEK&NgKuO)1f3DGIqpVLJuTLaO|FeJ<5P zt=lz%yw)o{^(geQ6)k(`i`H<@vnO)|D};eaVl+@?a2A1U=LoMKfC`^w#+BhE%|$5g z4A-B|p8GF$8^1=b-WmeCk_3&M5*!w;s_%G#dxx@DHJ58U1II{@F;J|KdWcfccQ7@) z1v2{xeW5-GxUlw-R1u@Rc(|XDPn~}QOFSc$er_`*aaQb5Tq43`eK(|(BQwpU`_hp< zow1z6?tFB5c*v4H)0wS1s=?iq(vs4U7RW!5*!CRIEq4FQ_^|~tDLD-`TbH1jS&3DT zSG!gLR^ONgjdr2qObNw<^mspWxTDOz>F{iDt)T_e^WDb9L)QB=zjgj;9|{TQOyH*# zojr{mgL~py{IX~LSk>BojS-afz;ME4F<)j(c7rRQ=lZ1XT*3Ar~1RXCshM$bv&c;S z?al7!cg@nJp%~Nbfc@Ql3;}iTukTX#w{O4pUPA39I_tjqzL0toTg27x2LGPgwR#TO z-QT}^x)y0t@TsQb4eW^e-pjo`dx?tP17SFOBYDJ6Vwye=A{3ITtNugzCih*j) zyux?KBqzndb>_|c^_TiXnrr*MK6(NAHi{ZD6ROMS2-JyHZ|yv3M{ykbUdlF!aZP?1 zV%ZweQn|;LVXwJhJ`I=Xc4i#2G*GR>>K;%8(j>+-QHu+vL-_C3Q!N8@;CP{j`%(GL1d%o-TXm_3m$!Dd< z@yFYz1*k-*eaJ$nfzVi_mRL3s58;KLPrdfyWMZxO^AYjU7@R#WiS!}VKkl&Z7L`_? zcQP-V&}z|0vRrRxcV>*uI|=|Bc+5=MGn?NivMgqgVIUS5u?mzlr4UY?QADiR=-L-{S_ zlx2}d(I*I=fzLz6;|ETg$+1<$*Tfrx8>Bv>^r8$;4A^utbQ`aw_t#6|*TV;M>n>$c zai_PKP`0r!H>i6s##_$yoUKr3QAs`H+m+kiR0ULO(3y$BDsiILR+>0gY2?f5&SN)w z@TrkZGV7!BbVon)Y-DVXPgw9?Goo~oR{MB8esO)%Y@2>efFObRBJA1ot4BbrRTQpI zxDUbY`b!R(8F^@7mgK^SPV!h%M1q^Dg5qzQJ1SFJp0euFUrO`}*Ndd((W-QMh|=yJgujYBA`3@iG zbaOypem0UZ5jb@ubK0 z>y666skw+Q?phw-v**NpRC_EwC*OnV;=bt`^v3atVrkRtISJ*ASss77ufA5zvetX% z##SS&8j7C`*+jiS)T|~5vsEnbH7E!j1PI-Cki{~P*&!)E<`Rh5_wSweP(fr_?qmlh zpLhk)2T4kFLUb^2}%+q ze=Njk1wR_HaN;lohEBQH({Ll}M*0Nj4x|uaYtmx$SQ+h9({sX)W4U46qfUe(buB1v zQsGezQ>nq21jnekT zDUeUX(>NgJYd=DtIeA)^26t*wElvX4?I2@Hh-$( z=m>;7Bv0BKOPZZqZt%Z_!tLu6W>0FIK(ct|fb#4D4f3O#1TpJ^gb2`x1(g!S2ZAaa zfE$Gr5E&1`N%@86NKAU6ot^8xX{D8ATZd zALUj9Pxtc<&+p^fs@(3_X}CyzKti`dPepfv@HQD+pvI>@Cr)Kdwel+w{v{*(#XH3_ zt^a#bx>|N)svSTVtsT*uG7zDcDn7N6xsWA-Nx+!v_wcCJm|5SQKVNv@pkhzy5Z6#r zx9zCN7|XC%^7$x(zK<=an!kJiQt&ve*LRBu*zl={9XDgLi}+ckn`vie_teiz*e!;S za={YeF^Sws`sLr$^Vdl$>>3u-No5j#?&Y7B80VDdQC3yi7#O%uQq4bve+fPO#T6Qy zRVd(CY`T=uG}bs$+j%m%`!J?ysGl)%4ZaH1tK+r$u-497%XjYO;wAs|^O5O!1ak3{ zID{^|9pa;;8O$6~1av0`lVb||Fdg^n;0RXkVu~@^Ks%ZR8c#w`d<`|_%w%!=ghN4- z*>7`hv*Qf)wwxcuYB@?&o{+jX&189rJIlR}O|V6{`AMfuo@aFH$)#Lc_dorM6q{04 zm6q98a$j%cl4aa{&Mn@1x|(tilhH_7>@n7b&!N#GmBD60>bRVw^p$-R8K8bTSCLmX z+0ngdE$R89*FGPDaKwUm`G39uV}$iiSJ+?Ef#e@Z9|zt zX=}-v(g5*!S(H{=J&Q}~Ee%3O6fp8x29O4&y{tL19J$D~;>Lx;-M)CRcXcfoKr0IS z757HrYV^M7g6mA;0)WU0_lcNoca!n#Vt(*~Duz_wNP0Uj8mX^}GEZ(Z!9sqRJhTYD zU@0?=FX5XS^Ny;SW24r1)9O_O)D;Zm0&Eh40Npf|T}Ro+bY`0T3Wi?!O-h|*XN`lI z*Y_9q#$^lE%16NjH~f+IN;X-xL-uvOl6#>w3+ zsG=pfANI=X3o!)986F5Oc6z$CSb92cpJeUjC%=8a%y-wua~v&@s|~wcxti`~-}iwl z{b!L;cP&BEppW~5OiL9FXAM~yZex2}2165jBU1)M8HTl0}<$SM$t*gKjMu`#eRFp}`Y5)l#cI+~bqD~XE# ztNX`4J`xLOX9sQoz|GB#!HtE%-q9Su#Kpx0U}Od`Gt+;xpm*}Hb2fCRw{s%>e}nvc z98pszV@FE|XG?oKqQAy9G_rSb<|85bYofn@|8Je9?w0?V$b0f9?NK^Pd_2tEc9Fda`hF{&&y+s`*z> zUcg@h{FgxgPh0<2`k^j?Cee>yXykEV3*?;c#I(vR*2#kZv^#g(a<7v+9dfY~Bn&@7j z+&nmj`~4&c0`cb~NC`r$LFw;?3opH> ztRZv|YnD^rSOKdqDnp8;2r(BrzveO^BIX19+>FuMPhZx$w|+aEV9-r1CW2@R z###PDPkw&5Fs!@eQ$MOYEw};^2&c4Jv)(TD^I4hebN5^@GcIW-a@;t+$p-h^u^x_F z^Xfr|aMi=Tq2x&ap)zC#a!})(A~5z()1$#nGqO#(VrWkfBgD8s5Wc6?GmPG6)h5op zL^NSs{22qXK&Dmd*HkwpTgDV$=V2z+Zn^F-xj*+OX9wnCZeVIO-B-#O7sujF-mc)4 z*1o{kNB4_vhi_=S4_~l2l-kq~Krgpge^CljIOirNAMU(4;;iKxMCQgD;nU7h@2qvt z+RY+gLMgu&Dc{q;eJRPC`AzabW!_-8iPr7P=R{P-%(LeMT|`UwV@>7PyE^%)ebPcy zsj4mY>59&lzJs0N0nJJ{~V#hxOot5|D>zku~d`G^1KdLm(=fE3ly_j~$Gy|_MxdeY6Aa@*4 z5&F4gHaV;9%+4Z7-dZdv_wG7k?V682rA@}lw|Ve&J~M1wnb2hf zQ!Ij^XMp`_4fbRP@bJk+Ly&LSi9Vi4wr{M-gMgIE$AuN=(=L#*_7lF=$0@gx7hh}Y z!-M-N`CjY8ncCiZ0Da7-GwD6@P$^K)_e)8!hA=&zba8u|kpg|ioD+`Cc!3$(`^elhW55)>?^DJL+;vIo8FG93TFgu2 zSLYe~A9Z(4{yv=EJWXtqtz!~EfbN{`WjPf|=UMVh&+~cFkWhPxz5YDvie?T>BN}>{ z{~w0wSB!Q6;2i)f@efM9ne5PA%+MHXmIo9L-xBhLpdRCvbPL3kN#!qPbLy!*T;;m~ z6G(MSe7dMgQ(rIZ8g91S4hAQY#nfd^S4@_~J9RwIH)3Xd^_NK5nq(Q$yh~1|5(0wN zma)Y10XRfeCYkiib68$A+?c!ud3bfvkgLl#uS$M)2E=a_0vlp(>%S5qHw~8hJ3~q) zK6mD3N1uxMkcS>tbn;30`aGF>rb52RKz$1(hDf&qMf}t5y4s+z5KL7@dUC0fPC}oL z&fmyiEncJYsrY{~sVdf(G2=VmC8EcF2e3}+Y~tgYj=PC zOSnX>4LDS&>1yiy@6m;W{5u$ptihE?NcnF+Wf`C)^NTTb2iE)9q%O=VzCuHVoeA83 zg4KFwof`L@0-w zJzMJZ;`aa`Pw4qQCTj^t)P)a_`|Qi~odYEC0-u*r&N4Io*8Gr^wfjfOCf+~%38VjH z2?08pOw<^hwcGRWi2|s%M|NEfsmHm=DV${ zIb}6JRNYL5LF2d&lshA&i$G;O$7j8-yiZX|NK=!XNj;6AKIeEXg`GL40Zcz7!Ag@x}v0%XbFD}_cfannxhF&&USf?(Kbfnk6LHyqaAYh0nB~|u0 zSHn_e73HM)-J1&9Dvh!qi?tOm0sXTr(IzQ`0pcks2kOW_&=GR=|6ea6i08qm-^~AT zD(NsPwS>-MV!wR~8o!`8C^Qua6_(!#kG7R_>u%{LQtKqWKDo@BPN>#+WTG zj>h7(4lvzD-5{rl#|`?&KKhRf3PN{(`SRsvPF-Ko+q+$Y-eVvd-}z!wsI0=4`oMO% z{tN86`9xOX!-G2xr$Y{Uh`{TO^3!gDyiP4*lT|?QB+uMtW2R$8K zq~{HgST^44{!A7H1w|>Rah8FDBUwsHii(>%Ej}S(Z_#)lntUph%F@!(d4Cl9Xr>tH z&?y0PR1Xo4Q*foxmL;?0?dh1Bh9+Fk_tk~_>o$b<^%w`)K9k3li88)9Ub8Cn`05J* zK0GX`0LHiD(P@3`SMUNg_YrMti_UkfR^K6XSeJgQDX2e<&Y2mEjg9SQ!uKr!77k99 zmXO&5dd=e?)A?+b%k!wTSmxk1P}{C0I0O;js$ngS&01!E#4{bx@euHKQ!FsGpzkY- z%V9SwKjJ1G@K|F!kfx*E?#Z_1vY_j{V%wAou|+Bo{Sb?saY_DqF3G6twjjEEF?~nj z2ljFWEME{PTI&M`hL|GzrFV5h{lh@~y2dkk3PE`7*Ll;poG9Ag-=4|ptA({`M~lTY zYfWIH@VMgjJP*may`NYiAs`g2PXhu2P1_!?4`v`Sbp+p^7CI$UOyGpHE!;k~b!FpC z?T$3ahB%A^AiP2gajO5ajX4JRBJ{9io?WP1EZ5LHzm8@%1%XQ)x~}Jzv5m1RE7B`J z?a5pnu0!9~38y%|KS4EsC7ir|kA_o{X@4#}MewIX;06cHpIS6Y6;7uKzv%qz>p3($ zoL5#xLtTwXz?%|eKr53Lj7;}rQIQE0fo@Hpkc~eLVx$E#NJ>tY_E%3~gqyKa6pz9o z2iG4$G6U$gxh4hFArHF9)&M0SCQ$e1g>&Y|%E!_go+s&r9me7asn%jnvjx24-WYy# zg|1|I2U;AGFwb5N_<&Pw>Co8voD<1*8tNp76%`e+Kia$z6BBp8kVgM`l5=KICLyJ~ zw|XYj63Z)F!-Gauw6vr=JUq}bF(pC~@#7bUhldN09L&s$UjADA+E#C$8G8j--6 zddVuf)U^q*%t8QW>v`7+my21(S&i0i+Q!)&N{%( zx~Ha;>7T$R6eg@o-S=a23$PtUL;__}Sq*z2mYeJeVF*`KQm~dgyxUk=e1N&37B|4D zX}YJ4_csog!)YUytxyP1^UP^4tAi!#;n0uZ_&OLM3`)SXr_=pqJ`*(SFjtn5exZ|WZR$h_^~9BpDs z75y=o__pyJ+|cZTIU5{rlZRW@>DQr*-EWs*f4b!?89`Co`H1Z7?3?@cliCrBkstI$ zlH&r;OA{sPReD$Q&?CZiw0YuiPG-b(aJgzJOxjkIB(YfE`tRwMgX03kXxAg-xTLDOA~`YUPkoa;-7Ahi`Cth+x%_Y z({UnsDvqO1B`pFfEWh)(QeYwcd7^n^{E!*S%gYOk)F3)g#i091%B~w0^k`*cKAbqM zw}0@D<_~vT8(rvn6%d7KXhdJ%^Q19f26x(UA;9l(Rlu5yVKz+?oPa#6t4IXV7C`g# zk7OD^L=+%jq0>_Py-3Z%x2ky?7Lek-QUp!hY`Z!M`{#nbh4@YOhqH>u#l=m>ESO`M z84{me!Zb3X{4*~>c0lq60xj<0PeK%N`BK*yIYa(2IWmZUG#LnSGpVI9-Jc!^1hfhD zg8@)`!NU7<6@odWA9?n+S42lgIPi%Nu^}fQ{z1@9-P`_!H6)!9at$8(tmLffQex z%YtC2N4ngIt_!uk&$g-9|HL#up;6syEN7d<)_PsXk+Q7TU6`>JL_0ZjgT4m$cSRm7 zW60s}&ZIoh=26I{{*R3A2>z7gHbwTd*FVSaAA5|Qh1Tt~rj1+sxDPsflHS_wFh9@X zQW*iMu0Eip1>5!g&yOtsJ>2mT2V2BqJAPvt07aN9LExJ`XprL~eO{Po`d*}e$1gd- zdnv%JL{qR0a>R8@@w9U~Lp8V^mgRz)W-!r8n1TS!DZ06H{cJhfT@1`hu-2;Q(aVxW zDy+2sNJ5`nlF&r4LK8Kjii1 zKDje59wp*&nYqkh^H~4cT8~4jL7J8P7oK%BG{f=aTfP zL2p>^>H|#Z?kwLJMiJy{DZv!sW3d#`2tkcJ-t@XaNQ3n}o^jvw9?QPB-ujs9f&ZZ= z-39y$`NX=)#Sv^GK0mtTS{qR|a?;Der?LCv9ChRsuM0@csR%vmKOvDnKBJV&4JxQh zhlEImpW}-lJPzITAGf(D3euQ=Us4$9sm={hLNr$r6QiH2Cj4sh4FFp;l7Fu6BmMMw zVRo5)rjfH0kspS*Indvj6WIX+RJWv^wSba73WuU?Z9Jfwvy<(6@Rp7(Wo3rbZ*N@j zgskTqu_?}n+Ux9e!Oi7IFr%Dfx(m4pGCdSBCn>4f8v;o zJ|HYsEe@UDM#;U|q4mvcJ9{Oqkc-Im_d#%e+C!r1Jaiy<95UG5j$?6hP%14gO^l7* zG@w<10zD_MZU<87I$q&xp|59SxDm?3&CDkthlM`h<|MsKQ9W}|a50E^2^_D#>vOm+z30CAv>)RAFD%Mo zLG`0(W?6UnxkL$Fp!yj#({LrBx8>%bK*v^g>*{q{Gw|aUqrftgl%%bEr>Of?X5oKK z90V*g7=*Pub5G7VlV8Fha}Y%*7g7Y+D_r@2Mm{QeT$xfH-LSylJtfP(1O6ZH|8cyW z)xahp+~#>=T)YH8joB1SuX+}p=WNo_7h?$Io@2pi+7z5in7`ZA4uZ&)=p9HvexCbL z#ZelG^gjV=GaVua=5FjcT=kI?l#dT}uhjb|wYMtti?#8<%(T!9T~!AzT(^#^`BBl# zkMb?L|5?HVGpI#?_v4w7Ex$BTe%h=52&*!1Q!v5I%}5Ws z|0oVdfONyV_m1Zhha$K{^uA+9(QL<43FQAkHh#&E{2FOpg>L8`29H5NBC#Bv3uC|% zw8nj^2$HQ`UT=mTMS<4cTeOHfS#{PdR%Z}J!@tW#;=g%XsrpOo} zp{}J5GJ+?a>Ioba6O9T*%Rn(rEJTe`+XF!9T$TW2uah?DVY} zV6XUdZu8Tjoh|9n8!!5v?mEqLd)T;lGl6dHUj(!tS#?+Ytk@28sklS|0++C-^d%I= z4q)^zbKtB3xl3J?YBT3qJd*uHY&Hgh5W$M%VtSXft>@c6)DoiOzpfiiqCxu!rb7?j zNJfi??8iMpSm~P~EE3zGxwHO@xb~NmGqC>RNlCfMHSz)K>&{UNQ((G`JY8<QL-F#>DesWIZwI`4TCVVl6y(%TH*5o_=i_mM?q=R;+15G6uni`xWXv2|# z|66wnFp0C)b{f1x6-ft@&8A`yLGD+HJbkN`&p_+EjBn-Ee2zdezROo3&zn3LjN_Mw z@%O2b75SP)Y4on;Imk}$Foxp=jk!gKuqHhnXZ7Ot5t!Gr=2WcpWZuW>wCbDk0So7~ms z7j)BO4rA`{j_5#L)@*yCDC5o+;o>?te0l8e%*epw)?b}9s%dP@cscS?c1xJ&ihpvp z%$fDJ9NS-cV7k2>IMnEcXg#0ikD^S+7eZTwhvpCX;QcwZbZ0-zv^`%X%b@J*C~N?QS-q-I&^s#NEt3G=CjbhkB9=YgBEZ>#}tIuBH@-o19iZ+om|Tl}Hx*XCNh3@JDi zY?DwcYSVAK%&pDYdmY!7?0pvPxTOK>zPm9u^bNC!P@^xO`P^n4W_aQ0&~V{jEy51r zYwni2S`BFntpxa*^0S`Szw+HSDv4!$kc+T*I$)skq(mgF6myP#;lCug8mPVL)7>@F z+%|ITe#q_o7uBpmuAo(%0l*Btr;N)})F@ap*6lWHAk=YS1{L@rjC*6Dq4pFVWkn6* zq-reV2Fw4%x^_!It9FBm@ta~?_PIP)FpWhFx~uFaGA6gxZzoZQE6^%3m(?W&RSW^4 z!3M7)yrE+(SPeXQIttJs%wc7#<;7B4zihxM6Z(+71uu(>H+cQCak}K5o)KTd@#(qwP- z^TYbHT}x`{&KYbW_vJLG3pr6Mv+}xDOAFtKg&DnX8SJkql5TH1C~2!K0plI60(>?3 zr66=TpK8Wk^sU%*`>$+4!mSLGA z+q1?}pi)}lp(Duha|4F^4Co*t1T_y;eOMigGD_ukgy_AWmn_S09BFH!+vdQsH`OjT zNub>^{4+@e^l?c<($%JPA2n_GJ?5RuND)V!;9Y;aV}bu$o~{-Mk{1!+bZl1b%+B7@ z_$gjHm&|5V1Shnr6N~w^o^Ze+hq6R!HWU8B+`N*vDBW#1Okoe2HgFGLbrxAix^Rpe z-bj<STRWO7r(E-EB_m%+H)7=0a~+%4vFgQL~)_Lz0D}fPo!S33#9v( zjQ*Nf%ToAHWaUCk8VIDH)`CV zTmj#d{mbb{gg$2~mZ)v1Ba4m`VQMd7kt zB8a`=Jh%9}Yf$Z@G!dWcT;I)sCfezlk-jC4tY^se&>rMukyllI%XhP{7pG@^m*Ggo z-jcNAgax}O(y~Z0171RKe|t_hWX2N@6-4Wd7f$bQJ_7VI+Vax*Aye1ir7gUs6kEym zNVU-CBi6BfLV1c74hL2D)dWb@xMI7NC}A*K*)z`S1vg%T$#%4OLlg3~)W=lS>1q;(auXA*&eXL!z18noij&8KoBTU=e7rnrS;7#X2#b|jvvalk24mA31)*i^2qPE~k4N;7q7^chs5O%->tEO^H=7ewi%@7*>?<;{W zF$fAbd?GKsz8{t>6R(0CFAtDYH3nMdbjHMsVu4SRO-8+&?vTak+k?yFk(KlT1>v2( zy6kg?O1X3($70aEveNWP&#Fp=j1WN7K`@7z@<#rK^`?sXA64o1mMtN#=EM*y6R>PT zT@?`g02vBwSb%=2CuiBvDcWf1ScBjvO4tY<6MEXZ4(6!3RwC>oB?_DmnEn5hJV8mM zlV+lc|0N^}FjK<0?#EHhimoQYVe=4OqAy467idtji_q49A`vaQ>f_ATCT(WTI7`MoxfyFamok0|)DVHPa8E>6}IQ!1SP zhmS}rlA_m>xc1-syMV^2nL5KMX$XVAFR)$DbriD^z&T+ZRm*JsGc11}BFJ=ge%MJ= z;FIz)Wbhck3wR)SB;Z*RI$rTQBkT;kV-ZCn+H?eChRu&-$up(aP8{fmcYI7S@}r!v z$sr%mBPE{X*2>z(msdn}ghIAG2qi0aebPTe^gsms9ntiJa}>}ppPY-6A`X-`hT6kC zu`D0iW(FU8JFnu!n%&Zh(33;kCR<--&NAV}lRt3g4&euA1_eb4R>YuZzWLQ{I}35I zP&7*`Z?@QAW!UqpkuK9Usc2bMzlto#Y%2OCfXYSS+ear-LhO#I9d`Y18k#pl z@C#qMh`ObRyKw8Ix3a|ane~C<<@s45CBrYlF@BZ8kw*sHp3H_7YGoaYa-BA3b_K0| z&GLwAd=U^~hXIFvDxygSu#MybX<3@2+95@U9C*LUB1G_BmAX&l@J;if7vNH5svW)N z<@ndc9jCLQwa4YxTmrgDKcw1Y8@dG^h9u)Fq9UyS)<>4mrUv8zID`6jPnZJv@9$Wo zD4-;g#cCufzc<@kY|bK#;=01WsezeTFxaMoCh837D6UFx;lOJc(Mok6DOSN+vft7t$9SkVC(}0+%RUE^>GNVn*Hfu}~oJNMcpXEe)M!-z%NacJQq1 z^;*=`Z82-ZSh7~ALb;OhzZ+n2y(Oxcb#m1+@SZPLjpiiF;!;bre%*!6E46yTZS*lW zcGmg(w$_2eAAIjcJ4cYO?Ld1-2BlOp6~D4^!)kQx*m3Sb_-oC}s+=dkb-HC(!;*x? zb9vCFKJ*BuVsvdhNMFUrp>6vt@CgcZ1)@Mej$OddDpIMOo0EImm6URJaYTQ&J}_U= z-Wbgaai6H?c|~~_Jb_ebW{LT*^JP&CO?FI81}i_>>Ew8178&aAgF^6SvsRBr-NA~% z5wdDx18_doo~oa2lw-r$j|;;_=SY52_!(1P^`4OCZn9HHsLVljfAdjAx0LVzjDRwrHs94H4U9`o~YMtLhT;g1^_?%}|Fro0}|1u|*fiR*?WPh;eX_%TBFQ_~MLU(ofuayW7i1i9d|$_U+P|P9I$BofuqnW}F-PS$NM&xmU$xEgoM*aEp)Oz>$d4JK*c)zJ2K2k26I zI~Tl3WQzJau6bu!qd2-kUx(2}tCnig`Hf;m+`bw9U;ILcydboEZ**l|%ml5kQjFHu zQVN>i&N95HFm&D_In$sAqR7>ioBqBz-hw)l+losi=8PXj!1V6uZ z;SIn4;KG|rm9`79Wue4ImzMtIn{ReE^DSt^%fd`~>2VCe_DJ{OelqT!kh=YE%BMcF zE5yhYyMfV`k1zgCXobhoam2fj7odF@cf-`S;@U7>{c*1XogS<@=k^^)1si;ytv$UOQQ!t9Gv-; z-{$I;WzBJ!k-FATfwdV(d8+8)>e_|mkD#PBaC;Mat-4bE-Epj&9iom4F1#%t5Ks^9BdNV=xY`o-eF!#od{o#th7 zoH;)}hry4=0}t>cxsd((|M7H9fpM+vI+!%JlZI`~#`Z8o;8rm=0?_L=Uz z|8qYV^L?}C#iP~6<;v+z8^%ki4{sqsD1Q45er5#iJfhaetTE*f{ywvGz!F^g_+4$|7idLn@WsgU-4IM`@X-@fD@wvDu zduHY5uMiaf#T)}Fjzk2Y#hqsI4)SCGGw=|jk1Rfx`I*xISA;#{Bw2I?s$lsp(H_! z@at|*e&WX|5ebJ-5l;h0mL7>hVN#vB3mH|8rF`CTg#zHsVni$h5FEax*4eOWNX8r# zGOqG#O9~9hKT90d%g;9pPhhd7)YX<5esmeKjF=j6N5o#@DVYq1 zXJ9(%tOTpn-Y8m`%F>+}U&JA9wszksI1BY@_q%S266cLI>X61BS|m_ICaO?cIvoPK z+lY4cMzXO!%9gY~zyGaA8u9zOQbp~T=>Gu1o>%Bp>h6i(Q$ey_lNsHn#!~P&3!=0a zX|f5LPJ$;$VCW|wEbZ zn7bk(@d~A!_Iy_&=$qY%P9C8z*sq&(_2ZJj4PA>-w}tPj%xXjQfCOm zH$k6$x*{T7=txg~<1RpaX*=7MU2U|et+iUZ{q>fRaf)z9y52NUp9%}cs{W;fikV#d zU8ZgbP2e~&Lg_lmR@)PE)qphXP8J?8T2I&gY9_0h@g?tFnhA6M2OxJ|-#h?!ok z?iz8@DgLZ_4xVxSHXw%H@Ue_}ypEQ{IoiP!&EJy(bw(|%deAle^(p)tMYR@JO7vQ- zZ)pD!?Dpk$*5=epg6UlRYY>O^vB6&f*zS4Pv1#Hz4Rg!NAp$N(*Lor&+D%q=(lRe{PnmjJ&5JU{}fBN_{q=uYX zXv-J8yB*quG>ye7R1O^vX8;)$^Iz$M7Sv&Q?M|w^VLQu*e5dJl*7yHj4}sSs-bwyCNc`&oo;N$Xo|Z=QM&c@d{P!E@P6)k*V{LEIyjk)Y|Q1w)pympNnXC8<<_6b33z z={zTpgdn6fS&ENn(%O3hi4YXKHYDnXR@2+}Uz;7zOr%wa<2cJ%%pJ@|RjbTKnYmJe z0Gbwe82Ag`_^J#{K~VM~@}$H~ziD1Ri=b}TQ*@156V}I2Ft9Gcp1ZA4Wr1DzHf$!k zYl2zQQb!?TvPnAW^*TV z_No6jR$U2zy*A8=DgE*^&p6yhzh+FH#ICBS2_HF9_kL(95=%H>+E6+mpG;$phVCV! zQWL`@@n!9^POQq0HrY;6n0nG`o*p};ll0kAJaHM#B>IU^Df$8tQje04F3A}Jw626e zv1voG$dQk0yCbf1HH1p%FHY&Q9>_a&3xzZ1s0cA(!xuTGC+X*-WtI~a=pq{{QxdWD z(8YAQzdNmDZDwH5r1x>>`}$rY#U6+mEXtX2x-CwvO`2Fdf}Xh+oOgt075IK>>(KQ{ zq46HI@&5It#_etnMxyJJtifrRt2Bva($*BGqdAo>L&yOn}BP*?qPFojKBSevxij$Lv$`_O5baUZdJCN+yy z>8UKeM9KB<4-H@bDu+qm&XXYX)4IkB_g$Fl&4z}1)${ZsJ#y<-C=gquerk9VZ7q|o1ZjZ0`ktnP<-#xSd=66gumO>d3p}q)Vwn5D32(j zXIB92OW>@49U--RlMc6Nm*jX0n?z;qL_<9_W(?-P4_FRKO&*(cC5@oDGz~;g=8_ZP zJyuzukv{F}ih~?fPeOFHd7mVV`I$mtN;TsX_C_#@B^5WyBLz)||GXMR)_2vKJIJbo zK?e{<`Q%7!>PCkRgeDq#Ez9Y8gw7*WMGA9XH^RGTTGs_P!-L>B17 zskN^kewp3N*%WD>=#DLx=?b)IWr%x=l%FFT5c{ISg#7}a%ipWq@Z1?-Vm+rXAONQR zVkwVI#@8bVEjC-I;xDF+&Kl_AJ=jc{(si<3!ozmxBt`#<1?!@Tq2aROfi0SAMM#0Q zdc>11T`#om;mx77pw;RI3IDd3QGtC>9cRWn;Y^Xi^lCkkN z7NxkdHLvm}5|ojI=F5J+GjzCIDMrPBDxAKem0ZFw<`Z$+NZl=2X&;Az8tk|`?tm+- z;c9Lp!&%S7N`-6P@qV)n^ReD%a!{u>wYOT|CBA)jgE83`LoSw3$mTdzJ{zQGXvbPM zE?$7xoS7`i=HrN2^ospUR|;w$Z&OtFD+QrpKeqeBGpm_p4`G{LAIQjhf|S$x4!Iw< z-|5pPf6PzSsrZZ`dA4n9Qmp^Q< zhOB!g9>=3aTGQjBFDNcxr6KXfXgk%GY4_b4p&rw?Og3wREQGW&1-@pe2``Rg3#Ux% ztScge7*)ivd?rFT)H8TB;5=4x4~|B$6U{KS zg;6x#V&{q?9npi$Fda7>+>UZ~yHf&gp`mDwi=b=m4O?GYhzJLDT# z_?ptTlR5e`V5dP46}@fn7r1SwJO@l;OkDMJb(cG5GC;Rol4T_%Wc`S1jOqi6 zeD`p4P$%2mDXEPU*Ms4-Vk_u@ zo2SzbZN~BD{5V?}P!(V@E7+YTt(+M$*DYD|_9>%;<-G27m#V_Jy^}MWW1^y{?!*ld z5@g{dVBRbEXvXZiYk5I)H`H1j1H_A~$q1zr?zH<=JVif{STl;pWy#WV7K6%ZtEv+_ zM}I}L{0=juLX;1LPGQ=`sm7ruD1397CwtWuPA_yaE;)X`g)~cQqWYM{s#L7EDGWn@ z6;UkXeHDDRsewnPm)^6)FcyzF{!Sc_|L;2Ygb#`>uSR`pm8X?iIp^Ojgjld2;ClP) zO8>rn>ctRRWJV6C4SHpDAcV(=w@m7h0AjSayARWwziX7|p=-C#ML*Nn1Bwso!fr|x zG!i_+tHPS`2J9`j<%@(5@TtFyqM_1Lg%Ne6;s2@_9g}%I+s>YG%x5s6)#_s_Dj>`VPL5ij!0=@BN}gus0`(;02=AfC zqjC6mCee-cFfmNjpU|GcihLLl4Pt6&@a;T8))$NR;mT1J${$UPeI&K z@}maWawhh17M+~*abMPhjHQdkS^K{pZ+r{AK`@!wy4@wZ2QH~W9;1uiUNMFpevoAojXV}~d)7#L zMGhNf~^8LWqwHk~cK2?ILNg2tP?I>mgpxRQ={+j@VV zYgFCAMk{+YmVaY%4h8wl2>>c%rjP;IZ0%tLJd|ENq0<4J24We8R+UO00vAXvff(v^ z#2ikR4ypL1QUS7AkL!@-b0e^`l(cPWZ4;1}|Ncl_{Cv$llqPOUi3@Lj%JBD;Qu+6p zy$6TF#UXFTi=$ZI`dl8{p26q#aY$l63{SA0-9$DwMygxJmMqFh` z^7;OJAW=*Ccc?hZ1Oy7Fh6WLhV()JljuNPmkrW6aRu@e--=yI#ho1%;XnYPW8+krW zkn*z-5b!D*Ya+q3H_pr*x&9Oaex%ypKtrUfI-<0v+I!gRDD527)_9g=#Jr7S(s~XvGa^X_{%jM9bSxoGjQK(KvmO4^y$wU|&}jW`Ee{OSVtw5&!7n5XR0` zGAzmIZ?^=3{m%b*lkPkhSnm1QE1VS%rLd=o;pIdPW)$&;JqNjJ;1cKKQ0Z|c_sDec zx%&+GsOv45w9aTFn8dXdJn6TB;Sz~4R0nwY(?Cyvev!z3Ra#q&0Ou5PA~r%@h{1Hm zfgidG*Bxm4Kpy;Vpr^djzP0hyH6J^v!Ew%))XC&tbyubMfQjAqgWrN(c)&1k--yUd zoGOiaY&9*qkuRCo=-C7gF>gF210Zt)xzJj=$(}CT=#6qoO9JFhid?7NOvwnls4vG? zN(GK`6h5-)#c(0t;UrFRFR$p!$5XlWn@_9d^Z(| zT7irdvAw_eKJm{%4U>gh#9c_7f!U{)+5fTt1Y1*36j+NgtwG0(V7yeIdt)Xo%x%{5 zA9LW#%DfVv#WbjB>Dka&cN)z7v23L&iifpMi_PIwuiq!RWdu>>=k)VrOgYem_5BErqCOMCnq#fhCy`R18$nv2*h4%t%GG25xPC z74&JBvM^BBL71#{5a~3fyRsm4kEYmkvfZZmw-~{QID&!EK&Lt@A(rRLjGD;Sae|19 zZfLm_H_i!F;Wol^hm_tahOI`yySm4=Puz%8}ai0JrG z{dnrNV2+m1+HvXHB8mnmoZ>VA&aX^kzuyX$Bk_uMfj4-;wt!WdfE?RP!bEd5;>O4& z0&A5`+neExXL=v1{ORvG_Em#NFUUt8-UqLK`0a#UW2*s`IqU2biyHIqfbN;%o1J|1 zs&&84)6egtXBvkbS41HL-J6s4d_JNV`87rWEpCvQLrJ~1(ReKstI@pk#^{wMy$gw_ zMGHYyEyip;nSEvU0LYI00tz>d;~Egfpau?#ry@$Cno3|pPM8pl4&)4#oKu0f84{NV z3bjN^D4nM`Nku~s%VNDO-^eZTYUw&#Wk@HDDkY45uG<26AFJKZGf}jn`csQ8DVo^# zn|{6coy&3ww^?!Ozg2>-9eVE<*OlU0E;NQi%=M)JTD4UEV!FA%OHwfko~pmC$GGRv zki`$$%&o5Pz;$m~?kqSGnz)7{#V7MKj+Xs7nyZ+)sPcV2G47-uRrek(S>_ByyHNm$ zty0<9K?7DK^t?VmH=2|XQDyHTFf4D2wMEn1dm$YlBvGri8ehApf5^NiNXqfEdVj)y zati$Gro3_on>iQ$qh#=HF-$%P55fVcc_x<05%RP0Q&JOfzbf-^Avj#?q3#fI{>jB! z!xAP?9)SQfP+A^EF?0gy>|(ozp1_(jN#a<~IY#DSc~Ys^ z3ff|Q&`PU!%+uZ0I`Sy3e;rK)be_+v7Ql_p zDH0BIGRs@{8ua~%nG;IQ{JNbj<7%W2@z`RErv!pO(!u4gUx!Dc8N-qfwz(SU3A#%r zzjAN@f#qywsfms0x8VocWlO`RrFK5Va!8`3ejBLb-7G-hbNy2vnomx)Dd>~$O|!c5 zaH8|`d`o!n9rVsbr_JIt@Qw+{4@26L@y+d|0z39uBg4s1G%n*cRy=Pi27SKti9-KJ z9o#~vGNlSrI>ip348bd6QY06uFshg<3zrp!bieDE`?(c7yUY%88mqW(RY#e44@Emu zOXp28Fy`eQxR}?Sn~|MY(2Wl@^4!GV?!{37pOT@2ADRtB*9QVX7C{>dQ`-drjLyi5 z0-Q`r!aHkVbF#^+tf9xsVvhnjFJm)|rr@Gz>_<28x?}0Hlk6K;qa_$@@EO(UV2VA2 zk8WSO-nH*xeunBWR0i~jspUrbsb*?ICC_8PoqT-M)qjn88X$!;##KZyU+KNZe5{$T zUSOU~X+F5{Zcq8Gn3Kq$F~|N8dqKO%-@;NuFoD9csaV?tv%Y`-a4jg(==(V(g2e%` zqFFOd1u7yOQ48v(Cv~S6AaKD2Xy`FBKTw^K3YlTs*%^`-;+(_$bi$0f6Qil=vZ&o% z8gLZv-WtCA`*`eXwG!JTGb*GDMH#PFUsS*!gxHKZT&7=yKe@Q<4vz~B^GhqRrc!SG zYa{7lgx-1SnZwqiFo=Q2kb!3RjU_~V}n|n$uj_s8tk9? zm*@-UOMWv715WS}MGq7;R8ldg2|c1K6sw4+&hAx#-^p5gr|?s^IytePKhKHeY|TRq z&j2)J0>Q(E?f*>DWONg^EHT^CUKf1KX|4do{*gQk+op8IJ(3*8yt2QuKEkdl(1y%<XWc-qD~)-+8)jr!3krjjJ! z=EYNg*?|Ie2WtejHs;gBMiUusU5%k|_{xek2Q~aCJMMB~$n*qJpytZk*^6jC$rj`jtr{LWnZ^ z`veaeU9zWj4t;Y);xA=N;ybMj6~P!nH>PBXVcM+(53n0RX9?+}N-w@5%vTSS>Pb@q zV@Aaz;{;J;_^{;qcH?psu`K#|mG)SEs@C98(VHbzU?yMO@Hb8CfQSuYpfbW`oo;Ml z7$Br%+UcobC&k+53~6m(=>ma0ufbK*dejQPx?Y;Vfz-7< z{@H}t_+cxN?!gQjUL<=M=BGOQ;s+~k5xDCr>duqlOIKCAHyA;lM^+NCWnVThmMCo- z`d`*k3^DL@CZ$(v+`Y^46}9O66<=zj9%?Mo4*-g0Uec1^Q1eRv&dX|B;Rkh<)C6-eR+)Jz9T}ESDED!!B*?&D3WE<#X zrTtJwi%jVW;X)5hOfCin`O#Mp#kOilrDY`75Hw>s@>;Td#gs?I(7n&m=hW@?&V7!O zXmQRvtzGqI>Pu$%oxOKf6A0z~;qa-2jOoFP)a1!-0T8vQM_Jo0f5bLMQHQ2Bh*)Fk zVROpUQi~3rf^8d~(|`UW(BKiz)p*~FM$qTQM5GukP)EWa%cF?|9*9c|0c7%5qu zlIAX1w8tU!h!{rW&=wBT;gEpNPPyNHWJA~*+^@BLSYFl6INe{Qaya5W+grlZN|p}5 zXc?#HZXgQOL+t#KsxpK`j_l20GMIcIqm)#NFah|*Bxd#H{d}j@gnMghAK>pKU2_Q4 ztKOJ-5IUN=Mc&3^L0b71`KgU}DWLg?sdmqta`1@ZZ=k+I24)OBUc2 z8q!e2^1oJz4Tx63MBcx?{6gza$t9b*yf2_;460_el@7)LOf_rc>nyNLcEm*O#XE1@ zh%cTp{*n*&vAe1ED6QOAGG_8a!8}COyKxb^-l`K;e?pooX2cM?F2AI>!^#W2W#wB27RpbfjShR+*fbaat0Z(nQ?PorM{I#W`KFq~Z&H@h zE=M~87Rjmv71zlNj+wo5NuEpv_88#O>bL1m3wpL8Lj-6~;T3sA0-O0BdRRx??L;8+ zLmL~7N)Vb3D(Ny97poA^T|gU-^lttEicPbItp8-ioH2A;|FP{1GjJ;IZgT1fl3i1F=%mJ0XuLWG#B9OhLfc)X2igSv)?BA3ywgkQ=7+*0JE!|A^ z!~(CdOUZ>wC8RtMjzZld+I>p_2K{`yYeYRkd_+1@$ z6{^4VwZTjx(pxOlSkR6GQ0w-(Mlk-|4v)-nPg3ZtXyHKL$vT$7@Qz| zg$i5ijzS^tE7bk2wjS*&LQ%_iw%^;~U$JT4()Sj{9`73)lN%^mLN+jD9HYG)mpv^P z=`f})ay}Dpad3;o7yV;cnT`cMa5?j+p;RVz=Mb$a@@;7;@b_@nlZ|uX|L$TyKUNxI zSF*GRcSGnVZ$cj=hnKT^S>cWi<-Jl2ANW{_kkx1jLe!gG|63`ZfQgJl53MEuyS9GayL0enV%2-LEeJ((Y&pPJ_ z1BoZd#{&WcTg^UB%F6PXiMf*!Ms;ZZBwaz`{C+OjN5sMxJcIKYoX!@;DLgRNp1_XA zA4aqx0@?KGyn99=wSCkr_Yd%_<4zm9{oZAl>QW>8P8cw}S?ZRVEBi#0{<16D4y}T~L7fz+u$pcb&rY{w`E(vbj zwx|;=lp(P?yudqCrETE22??v9;a3%XNPI87tIVk3ka&HC_;B@XYJjZ#Cc%ef{i3&+ zWX|fUkRbJV@%UTHSU~oh_W<0)X}Q1ZxQ7#WrT%#%`Q?R4y*cfo_ENN2LL8;NiHPv* zmQ9`Ee^NqM04TQaYyfj_RbXL>L_Vmzed>6nSwK;c5_*O}N!j7+yCO9&LD4JDAv1>Y zc~$?-`Z68%yw$pLS(w<*&dhc0ms=!YZ{^+-(ZjhQA`DPQdy$`6wwIVRVO(9)>8X81Z@>)KpwU z5K_b6kr-9NZaQq_I^&Wa=)L?H+C2_3TacWVRkp6@hHb{+mhqFn z@D7w6Xl)cW;}FAsNYq~ml8WL9|F`6bfic(__`P&xU;6O?&shvSr<(T{d-HKRwRC}u z{V(`m@yvf(G(V)ud{-b5`KblxUDZa+r(y#C`TM947ZW4z^++75m&e_(mUM@6z zMdepG=ZibmVbFNC=pwpr-Qqc3j+SO3azv5~LJZ|zWa6feo(?V1X;Ik^siFEGYS24r zu6c1yJ1%^&#Mx$T{whZk64FeyvX{?uU6%9JT97XX=|5dHR|H?y%F|uppD8?0Gv_%w zW~oAV6OCI~jP9XAio2gh5Z*?0#xylYQmHnhShRvkdgm|^^z7z{Ec&W)*DE@IXCq0Z z^Uh@7xvj9}({?X|5NAHwrl0Abvl%LN%`1HO@Ejnl$y5=?kYI`+1m>t!Tm1lKJII<>Ep}7a3wdr*GT{;!;7@&25 z?X;SK*lKY^ELE4UAFwa8(^2E)oNwwIj3JtNe$B{o6Hyy1mSdQJOkto5#`t(PD{{2T za^@7cxgVF|tscFx1~u#iWEoW$I}|%me|2c}aUAu9m&Vx*7=;RjJZY<&WpX6kM8uFA z6m)Vise%)k-{1%KJ|*eX(G22@0~uR`2XEmcq@;oxa$g;8yTl7DUVoi^$Qp9gFZVTU z9Z$%FQso2}tT8-MynL)8wRT>G=@)n`g9ct6vJdblCg~ASZ9zv#5AlNBe?eR?_7=gmKtkk?KlI>zRTUzzwA9 zs~{Gx#o-;GVB6na9n_zYZXKSm&()v(fo)WF{ql_yLk)v^IZ{0_#D}X8$K)3X-Xi?MSi<}=MzsJ| zSKc8U!-QMt!$wo8RPj2E#fl3tZEDb#T@eKBFC`QGhejcqe`NrwNJtnateMMFu(Pab zZIhJK6;AHL852&$0n75~^NP)2tyYrWy|#xpmp^bpYsb>U4ol!-2dhMy*H%(QCU$!? zi64T=nI+Wocp7#aHNwQEiPGnlIokvjCxMwDON^JWCiC(DCf$j_pX-&;sh)#QLW)#U zi-xbP>%C0cJ^Ud^p$8#3)7l8#&XngIdW)Ca5V?*PM=6D=R5H(Oq%-? z3l-qrKkO^GY$qe6ZuMjbRZaq5uYXG?zWi)tq-st||4&^6q+sq^P>d>AiWWK3QpiHG zXJ=n7BcoF@+J7T~5*H!b^0K%b5N9tQ?6Z{sz1Rw1XF#HcrVTx9aWTf&m-^#M?%(_00&ts9QY1>QN4M9FgAs#MTKY|f0=kK}5Y^gAe5kVbtVwO~#Uxbi?4^4iS zWk5?Hd7JHlc(jS)VIpIp|Mp(h61o$?=2m#JcL@A5)McK-qW0lB%_@x2DobU&-2=#% zir3H&Zrinvrgo9qowLKhf`|QGEYn;Zg!bb^g*3x#NKF)8$ANDoxm! zvI6^iMUTKFvmtjcUGHvXKq#Ckyfqx>_08?e$M_jWVG`Y~3{sY!qB*Usb|Fhsqhw`Q zm}%}E>w$*ApNX$dG43q4rkc;8n&)a-d(v>nIb$IU`AVy2;OJc0Ky)sqTR1hh97-%F z#$n`cUS-AK<7F&QIN?={|7nH?T$`EX?i9pt%mTeo>ghbZ?xx)9-2G}eS8b&`gi2G~ z@W+qL%kHLvtOj1~Xz6#2^x1M)TJQP5tpfCP?9_-5!N-4zt_S2Ljd&ULc`LAr=VXNA z1-HsSQl!(n!1C{^gK0Kd>Cv^Hn8-{WJusx^%x?kF{HLoXalDcj%b628iZQ5kgtMMe z)7*H7giGPvckf-95a992Jpi*JM%bM_yI^Jxdv?6D39X78#T3gZrGAc$=tPYg3!3*d z!#nXQ7R((pli{TK)K%j{eM!4%-PCT?+IU|NG}|rxR<{lx!?V&xrS@_Q`K4{y+59RB zKZVG7_dXM-#=Jp+o>?vkIi&aR8Kj{S7p^a1DnqeQfj;msrp=k&>8!Xl#QDWmFT4R9ODv7Z}Ls{$1N~~cUXz4NqWN@)7TO9!-R?mLR z{VU>H%-w?ZJ1^T(oB_fo*ZdmL=_iTM1*rdvAzIT06t+s1K1}jPL*SrQlMv1zK<5yq z_oQpp=Jd9L32&?&2%C9SqNmYHdK@j^AYyt+p?LQOC|P_zkr(X@Slc}%2?!K+h^3`% zMB2eq6X2;~O^;AqO zR&Hnu!lPGm8uK^#S*ClJpp`b3{EFvi2=iQjfpNqx_SEzUXftHo!vRO9Yrdn6d4LL> zk((T4 z`$A))ReH_qU~vB|-~K)FG{uIxDRhu8Qeb%fn_|ikwA@P5~m%C z`p4Ors|+UIIC#^xgw@Gp01{`S&-ATJwEUc7Rycu>E4;FK$HMHn22eu7OPd;C6h3ZF z+s?RZf4T09iW0xYv37jhBeLk^!yq!8UEQ= zf6Ns-RkG03PBjkkt%{N!!#LYvk%^x6AMTEi5sV@Bz%8le;V^(b8W0}lTeE@E>J>!D zB|orarx0f{&pmJJ8+9&@_p4P@`WP0zE5Rm8)Ol6`s!FCPMR!T;l@aFGOwUT(i@^w6 zip;5sw?HX=D?kbc@Rcl9McGV3)0d8Y=-JI8B9G|nUL?43Sl!1Q_AHa}-J!ro+o%eu zj1POiKV8b8iB zG{}rqT6U_|=I*F!aW$EL*OUfC#*nc<04qYdBJZ~$t&$l>ZbsiIQb~V7o-_%PNjoG? z<3HA=&82Kp6Zez`}8H-BbLnU;DBnw4}#_ zyx)&93u(U}8$8}3(CztUP@zq9fg4^qlhE*XR!CWI@0EL%7@!KLxzBt=s&=zJuuuPp zsxJubfbcehb`FxS%eqe|XYrKzQ08(JYlrZ#enj^}B2s0}!<}CfzBKUm*2sv0L&tM@ z)!&JVu;7)_8`4yKNSP=DiHLLij3^n?Q8!AEs`~I2c^24lp~y9KJ-< z8H#3mb|26DYs)B2;EJ4cDiyKkS~jON7&;}4yQ*v03-1xD`sIHaSxL1s$zmswjG zJ&Q#^K2Q<*xRj@O_kcVBpE)XE%LmTwH7upUU_x*F@nVt>LDJwoyw5~PT;_(UbFVH^ za&T!SFN0>^!}@QWm>&?UukZ(_4R1jmMo04H%=F&%efM`$Hb^Ni}=khdq9bx9IWC#PRJp8It zbdWK#H?WEWHSnquh5grM^*WX1QYAQKlLw7a zkdpn9%(I*=4qlxpsfarYuZ&S_A($}&XCq?YW;;l&W;eb(o*Q4dWs>;k!u%C$m?x^UbFvx(66AAHD z9!fFNGC7{!Hj^4f>JC&IdtiFO!j}x|HTd$p(Kf_uaj+Y|%LJ?MCyP&r=Bd6wIfj>g#bQHT z%Bmc05fD&FU_OlU%!0&6Je6v?9-zVwDK|xl0DXjX+Kc_On*0n_s=rJ;UA&yJ>v2UB z^=bJH_g{z#7`I=j{FBLm=GRQD%?7Sj?d~wUi_mk$V&Inba}k2mhc^4G(;0Olw*G)OTUm1(RliVhrt<6DBdQ|>n;On;E<^!2E@!O@V`cFVqUa)+^7>&TEm&H zR4=1T#20D|F%4a4eEaQ$;D_N?+eT(5vlUQsw5qz18`*n}cKvzg1?{PPpY%Bl<`+o) z{W}qr-}l(Sn#&f#2pl1ZxsjS*l?y6pD>A2%V@n^BoRx#eY`dXotLEC_NO{C9wclu= z0L$NF0rm0l@v%u~-}ihbE?q8n_xCG@S8q1hb7VI2FlqWUxO4hWnuI3;{ykTU9K+C`wpz-khfP zy!h}g>u6vJZ}haTVkzXwd*y1u(%N&flXL~w6c@VK7mYuocrBqtp|0NL zx^|iyD&Qs6nBkQR`Wwaopy{1--O+zFmtM%nfK)Y@a8SXZ?J(qtvlpZVwn>C<7eU)| zkD#W20a;~z1FW5QA|xY#sU)|e0nl}E6#@}M<8LHWZgJ|-cfShd*MF}LGWgV9{M9Zl zFSaLa$~v&YL8-j*N#=S8OJ6kwRAIaNLRv>ibtrgfkq(({HSi)Ve6K+ZR5}$L|5f{% zXGtps9JkN32evk1&Y;5>fCVZ7asd|^aIRkj7IxR6uw?Ru)E})};e zz?5#|pbXKF@+7F~K&I442&xisF?F&<8DG(6W0ZB_`rKX){6W4^R85_lMmjZaU%NUY zr5+GKX5=66<)&Do7?>lbRBWuij|>t?Dhik__P<8@))U9hdIE(TPBso+M0^h*Y@B<^ z`4vv-F4&;$#C(MkcEToEzE4=~P2*A6e0(HB19L3JJO0dtYDqI-naNnzxe7dI5>RG| zTXE)+TY2fdNcbyX%oz)M$*8g3Itq>bzIcLn5cS<3##cGox>0LDnJIir8i8CJ`3bFI z>b8iEOxX9!^xI+k&XjxP@TLb6ADyDW-LFVQdN3J(-_Tm6mwan_Ab})crw`$2FRA^wPMSD^_7dHs>Jc(M?|} zL=KBBIbZ&fd}H_$SG#?W^D4t{K~LPqSDQ$q-8Wp_@&e{~>R6y;e7*veatfXvtS-*} z$SqQsU`mMegjb&90&XB_8<8TOhM0<+!d7O zmRG`TU{eFZg-%ozRQ!%v*XhN%1{WeDmec&f?CI4L>JIC^e#)kpy9SbxG zDq`jv31aN=*_YkxJLh7}r|axzs93N!yNL|DUsYhaN-~8DGLp>QV6?+fcVvK$X`Hof zeTI)xUih%rlWZuajl{93kO}ABlQiU5ZB;}{_61SZ5fDmrl9oQ)xw?xmw3roaq})^| zj6;4t!F){L^q@)lTDHs(9fjbvOb7}L9!W6_Qah>U+cojkhr$q&mP#ETPiaiYnYEEZ z*W9-BrWbWrPbs{!%_jH{Ls|5O!S7%n0OlP~Px)7he6(d&fx zcQAc@zzZXoK9EE5>c}j( z%<|qW=H72A6bWnZ82TgJ#g9GJ)zO(>%#pgVK{xPh#&&yW+z6}u*P6B+dbEy}e^oD5 z2p18cDw_>)e<)huevh*kcD1GL617Dl`H(q6ukcQ0ib-8gE_eA5PoxJ6Y;v!-pGd)% z`v;w!JR3%3(0nQMZ`y5#NDyF81J(gwUpY#RB8BS}Z_nsW1in4BkaF5Q)ffW^{NRX; zMUAbieU7IXG_oTmmtJzP#6|UbJ<|pwUT!yS86cgZoFK!C7ZTy<;UpI?R=`3#gG#GQ z{}q#3thP$RiQM=O3KN?fvHvT;UCloZfFpr$?*Hk1>$P+<&A;?KO?LG=XVdR)49DRF z+^ijrv-u+Ek`S$U!;O>c?ma{wKuMr$!fIaiXjbaqK`Wd3JP=T0He!7>mrx(;-G)@B z$tCyvPPv88rVPK%1_F)$T7&j8RxgaSri7A;@CHSAOfK_?u zjW_G6?`)AbA!NnF!sENf^W0U3;1eM|bCRn)yK|S@`#ao(SY27w5o_Nt1-3Tw#0{9v z`JdkSexUum2R3{)y*(QO?xd|z(XDr|V+*FKMFM|M#J?cfP5yy>d>+OkZ0njtY-qq+1AAlqNtDVY`RKNAZYZ2US)Qp?5*mbd`(xuBfs z`|Gy4ufBCvD|qf~^o7{hon6<&MdR>^Vggu@?vyXZG#qvT{pTg{)R-8} zPaA^Jv>ck$U$PxHAGd=DsskH&Kajb8P(Y2>pT{PdY7iB3-8LT`U z5M+5L9CSBFa{+}|2MLzA5xSNxj?JRlv>>L?ycn}(TiS?6j=@F~H;gbg7&Cg$3-Irp zW>yM=-Y_G4DE*8I_c*J9QgR{wp%R6Hh+fUr9z!f_0^cU)u^G6z!2<5hWRo8V88gpR zPkc7kjcy-AN)(gi)W7Go8VRTnSI-DCUW$KgpN&pXpwFkj40Tw`1Mwt_mu!edo)Om8 z)w+5JFPGq|VI2bJ9IC9>r29j-Dji&sE2*td0f#WaCL#RAgLv0$kxxtpCm)}o?LH<4 zcJkAcwxOv>Ke8H;e3~=%?g&S%S__-U4f)N&9y0m(OvU8-cGBVmKkWhV#vpy0rIrmOynBPLd>B|@HGXo1`x z!+peW1pYn|>QEV@yn^L+!8LHUmBYcGQJQ88oEyx@JMzn5p!Q+KgBV!R1FD;#R$Z}+ zG`cln)y~2iD+(h2$JSTIMfC=4!?JYk(%rqZq;z*9-Jt@~-QC?Gox;)}-3=-!rF4gM zO1|s=d7f|Yr}ORn&dfb=%{4Rk8C@<$H>$v;ot8qSzzc%ZFnySN1!P9b4QzboRo97z zG`8LQyx&K5YWRv!yeEo+zOCH3hAD}-)aErRwRV5Ej=na&JyZwN~5d1|M7aE11bCC z-va(m*ly2b*OINS@yfS*&lcf#3|NG;u;H-a;XwujqqR`yj3H7Gihj2DswRr&FWxVy zQ^b>Je|TDdIFKK1Vy)^JSJ-sz*|uv3rPK22LN*nRF?V>GaM@H8*35yQLW4%CV&|dk zljR0DyD=K@Jqf*9bb~%@n9&V-MDT=iW)ZJU*FJp`SpE1)81Ne~1~XvL*vl)VCViGR zdE-s4m4uaWAD&SX_DN7XxHWA# zPYy8=AJS4<9XX${ZN~K#z(QA6mqGATb z-K>3%Se8&HC_n4-a#ZuW`JH;+g!(*PV@&O=bU|+)JDd$_LpCnvHxjCFjy8GRC5~6R z9*LE2mTOq~*#AND&4&)V+e0O6`lWB40?K4UE21^7khmaJV2=aS0IG;V%hCBT&MMlv z5l6_dY}y7b-lK%3EIM!AdDEfgu-qmkecOt;=sK^o*j6@Fbd+%^t3sIVOp zS}-i}8Y&*{Er)-}H~gqepVjr_soOt}z4mjsWepEmgHnzD$K|g{8z~pk=-pd5@$9uB z^uHFdML?4p!u5ooa(TyV{kKMP5#Wc{#{}OFcAEj+z@RWB`ZgU`G6nUdC)dE5;+-P` z?Ves^cLLq4iw`wzl6EHbNE27)vVHGB(_ZqxmRRbt{q&BBX)m(Vs2~e{Qof}&Yx^ih zb}yE{4Pu*5OOuaEMc497*nj#xc_uCeQyrQ6?w^wd4maIzXL+61OepbiA3C@E{cTl~ zAUBmrJxdEHAB~DfCx*9b4sf22h?}u+&)_0JHQD{>BP~71lX_y1{z)(0HZ=e|Zy6x% zZ&4>&?LrAauwZZZqk4ys*;b12wzyN(MW~X{ll6Be^Q0$I@$iV&N z6bg4>6UbZ6Y63zbBQ}iGmlcMpR0q4!M>|rgv2#bl=F&-R#4dytOIo8`rpt);$K>r3LA^eRM;%*dH>~~uMmUxcLEck&41Yz+r zhWXeXq5u+l0lJ@*uJEmPnl#4WAR$eEGdDE@lAj=BAvk~iT>Q1Y!m#9mE~U{H_qfMb zTLRc?f6|+?U|3xkwmx>Pz2r%TppVJfaiMie9*0L^cL)GHkw&7yvJu}hpEkKYu-z{V zPjd#L8qM_X7jr{QjDL&pXViQ!cHi-5G5x4Gi4T%bENyGWrER9p7G}`Peb})`E~3#9 zQKi-{9AeQL78`pl)V)Ek`|2Z@r;3t{p443x#nH$NNP8pT4L1UmD@N2yPDB0jSl`C> zPm=4&-Sa^hJF}x+c_^Du07NfW4qMjRU5|@yl?eICu|mK~(e?}ys417NN)qcx&+4sl z8h##d4|hR(v087S$O9|7R%+**@n067I8#%?q7O!JDJNZu)c6FOM?><3lMy+5QPx4J z95?U#K*yb6lnivSKhI`pY>x$mhsj4fZmt1Q=4IQJ8mO1#ri-jgzV@qgN^A!r5t0s# zrDPgD!`O9shsI55?~&=L^>8?DYH@ld7Ero~xX`HxRxf8TNo2qFZ_ku`TBTb&8$ua- zH7E-OqgI}eNyVDIKf-A9bt5uUb!`(I!?qYzj_}<}2qhxE+!z|%tZhdFAV(o~5aAne zqew@@5GomogS=MRvL*TR#dXBkz|I3ZXe#V(@*PD}tBO#8Gj0HTwD`}yA`7E=gd@1I zn1x>snd7BOA>*MgC;;7GGkN-IK6I9OV4D=CRa8ZSPASsR3F`f@Zw_l%h{F7%i%g}M$_+ehK7ZuRBI9i;WfHBjV-^`~^evynR zae@`t!Q8)vw$4Pf;6$+I9Vqrk%m+)A?3rZk_X9)MMcE0Z{QH|M^mar*0S|AX1-iPihW-+fg?9bXTNU$svLAuEH7{~toPcul?Ynd9AiUOq-S23 zDlgfBM(JHt;0vd98(^fQ?ZM&c9x!U@qv~vI#eb%zN`ohJb^ieQo|u9$GddyoF6Hwx z#>0(MxclVyYxyH_nH;)d@*LvgciOp@Q0mXN$%^${EsoBV_)7I6l7eC32H77_rieqT zUEs)s4L*UryiXxS8`wUOs#Y+3vUksU%%kN8$ zV;VKPM7N8PB75;Ss<_%Ev9DnP43nj6+W{p?fv&?x!|&;u$m+X{!jvTkh36x=fNfWE zkPTCKUb}zCS}*|#`Ydhb4Fh&RqVFY+qb$Sy3V!a?m&JHrv7_cF%L_&$A@Ue784E7FYP2D+?sc_-d60cV(u2H78)I zVb9h2=z4)9<0AXS7S~o7KEAz83GSW$dQYQS+98`0)J^}l*e9zsE=UEm#IY4zhVS)q za6nC{=@>yyo!F4q<;VFx4$muf#VftGJ89Z=f( z?5*JR%v90E_sK{M2JGKIefyqDM?g;j$T0npzjbte4r_y!piFoDX!x+uk& z3^EeK+fkj%(&G?VJnjF?ZT0!l*x4=pw+4y$J$PYJH<>}fIeKyf1m_PD+{uzpbTd}v z)sj?x!&vcP&N8H~;L`Hq*E9d{kK0;3!m)`@1F@VWK`TZ^Vq88nmg!ox#$Nkr5SQQN zZb_-e!Fl7OrLZ!d(t7^5MS+P0n;ZACU=a;d@5nJMY=}=o=OFTZedcWWV55;BMg(t1 zO=WagTM}XzOM!}{m%oo(T&5H&Z23W=rI`%l;$ZKseT}HINSt_}WRS_JZ3i|A487+N z9CFPRjP(1Tn4+lvsn~F#+IyEqAKtk+Jdj6#DLIsAJ4>tJ8Q=;5*`(k_94PVTLY9nH zJ{hZ)$`#)U#!P6LRjc$rf=j~Ib`4v+Wq19Taugv2 zwf-a-3B+4#@)rjuA$^+p*xXX-8l3|(>MN^T~~TF1X2XPG%bsx9OvfX zQ~hc_uJ#y~^wuOQo>PK0t6lUUfI&N2iU}JtQYwZav`7}jv-F{*yh}J71d5K$ogE+Y z*QLG+)QJ~eRgp1uFnElXjeLn5@|Xa{;l3N=UmyM(&J4zwn{f1yr5kzE#0iKK@0Ad* z!(sIiV}i5TwxEWe`vi2V4(x3&dhRaS3fE*>JuN86uyqoEmdI`@;V7`lbMJ=j4=b#i zzQ3e{ak!_EeK-^0^0P+Ntxt!uC`n~@#xh7dEliaLe}oNUT34KiMO@IX^#&SAH$%zh zCV=kx!mn(~iKSTYI7=SCWtlN;uXo494z{LIRH~@*;)ZZ}wxfbSb?>Nv*w@c|zc(X6 z^HCy6VtkqlDQMNgYw$;REHrALYN|gA6d8J(lan#)=kw6#2F>5D$>SMDX`B?_m(X`xf&_vz3$;z0~4-y*1b2i0v<1dT>AxDeED9tW>io+Mt7EyxB^j)K7Cl~UH5UDuP@Vj_qg zCePvbp*EV?UiWi@?{5_KF!7-|4StOeEVvYsQXM#d^e_Nuwt?g+e5&MzZnruq!dz*?;7}$vgsE6uN^?TDOPf#-x(J?J> zfw5f8lD7EakiK~0P?m0JVCaVK8>5*~0ytZdU1@L^Xo{y~i;yfjKE~@819oBrW<(bA zdIf2tHR2Jtdy@2a#jAw6{ zyFVW%p`5pIh$mo^75U2y0Wc{-l@6+Uv8Bq{JqM&rPt!kaKTDXdZDqeWlVl#$$Ql)g z@5`jec?R%M8`!6`!VY&@4Kxh}uD4y1lL_7<`d$8Atbe^KxxYjFBj`nd_SG#RfLrwR zJtgS=Dt)`Y*Cl|-B@wc&7VbTQ*%%z>jwgEQq258}d+ zfykOuXSB!;@>!C%=Umz=S7)d7yQ0 z7f4Lahy$Q5SzQPwMC_G33!9te?p9Q3{ld)VS)Pnjo#a?6#=ITv9urlUVs0R6soO}e zM=9+W2X?Xr`mprBt1uh%E$E*?Uv|3v#E>iO3;)`#5RP$8(9IeC_Hv z8I!oDI8R^0{tl|arhcTrJxunIa4Ww!LX&y5#pN{qPw z*BubRgMT_ZVSl&We%UuIPb9{U$dIT+5QwY!K|I8$^dta2&{YA*V}rywF2Y>(i;Hi8 zh?BV+c6U&mcl%ipX{uZNnt)@*URnb=m{8eSiinhTr6LZ0&owTHl!nTsqGPjd)qD)r zorultf`+DSNcLzyjiuP4yRd5VF!3-Peb%LZ7GVEECfz!RU{e2Zl3H)c!9vW0{(kh> zoejTTU{t~S{mEJhlBGL8A%TK5y;GEiD9TJ$a3T?9vm~UU^gGBNj!uYz;4m4lxQXk1 zl7)?@iW5l|nDEJAmn-gWxRRB*Egu>VHUoB0!&H@mU%RXX>iXDO*5> zsUd%l9A{ilf0k=ibD8<6H_J+d&*m1O?O*#FpWgm!{iC9wm9dQHK8~L#;hKy4sm^pA zIu^U}%1P2gxG>Jb4>EL8_RTjvwcv$6Vo1L;EUN1>qGT5Ryv_bf55`0<(EE4}`-)pf z)mAPVMqvNKhLMOmv=!MI<=8${0>mwmN{&1*bDQ$_B@jVz%HP^XY=@kM4;fR&#DVRw zOv55&i&GSJ%yJiNv$=FI{QgXD)}L;2CFz^mtqbE@#)?Qy&v$@z9DbvtQhyWw z!OVKE9HY>G8$wCPNL&-E28%sGt~4+xjFrua4@meEImO?LkZBRoZ#ElUgz)w)z-({O z)x7!sG&gk9L?1gC{fBNcG1!r>tgG*xpQgb+QrUGC?CPB;B9j_u>%zu2dP~k7esdv7 zr12kZ>fH;#GWAx>d1js1NH}RMGtUEujdMa-aTz##ypKp@PzSS25&YDEo_Cfmhkx%D z^ksZYxi-UI=c|pp_*{DW!LvKYrNJ{S5;|-1yQA<1GZsqD=|g);Q?GBIXwgaKGd^^L z={cF^@)3j}Z+F6%wKrjQ_>~bR3KZ^dC!4354gR@0SUkc$G|dej)xz!2_Z=>VZlOIl z9ct>&XvL39!JD+X)NxCf*a^gjf(x(ZZ@v^3(}iWJCYG)mHjJr=H5A=HRo#GwY=2qV zTnnuk1pqg`XiG0n=CS8Hny&IApN{#NGSyP1ZG~c6&l|6<<}b>wA$Iz!d-Ks0`wRDO zv|t@vss#@ckqTRGdn`Tv#JTt_qOZmJs$2g(<^omLc1!CYjHv#HaYZI#3;PEV9)AON z%jgZBdjMP~Uw}>Sdg!=|t0FQ$h$1Vt)k9y=G>upPz-Ef|rswb@(D_?sDdop%TsPr2 zq^7DW2bzR%eH&{|Ikf82GbPSWEb<4L;aOh@B?kjucDCu5oi(ra1=8{Lb2J3Vmh( zX(qWoz)u+TSIw~tb&NZYY!7=m^`bp+gplCHo?rX5m(qqnnBUqsg+M5-Mw`uEM9AVH zY6PH)G4o{%wTK?Q3HkFo?C}(&Fp>yb^%ZqUkmYJzapyBra1kGDawOo zDnKyCr!3!#b9Dh<11#y%^Z~ZeC*nQQ`1d2)$Sk!vwT!4JuX zYrUrxJAAlJIL14Va7sy7M{xF>fyYSMh*f+i74K^h8tp1|{^<+T#3H9U+258O#`zMy z)r?|@NVqVnhNU~(r{pac{?Wk5UXA9Sjk71fvvpOvJo+2|-;dpoKSM2G|8B9|xLYr3 z)kpT>QoflXohUoToT&lqqI(;B0T$sAFvu@*u4P$-nNtzR!ks2!Ybjnuzyj<8=|5Is zNjfHwDSSxiG$_01*Kzk>URysiZ!z!v^-sxo-bFnjPtzs3^pC99ONBG_ zyT5VdDYbyT;t*iX&sSjtU3Hrq0^R3%0dmWm3qhEU4-f!~wHrqS)(k6=Rri|v;^H7I zbKSKGTRd_DG0#SAoycuhd|Ks~yFcFd-?~l}IraBuXSmS{4jXP|F+$@#Rq*{V9xlrb zEPr1AS;yl=&j0*O-&gVS<@-+X=#+^@RR-GdD=5{CGJKZK9$ONm$YrR?W-z>}j`gz` z5GAR&l1Tf$fA%C>-P8!<@QtFG1}D3E5%nhBAkBRXxhgrU7_m#=F#adH+JB+rIi%sW%;|KMztHSsl38j6yt~K!V8|$e zL?h3}O$C>WDVwzTPEUy!Cyqh?kI`scgHX>*tHCrpn;+?nz5yxFrh1xf+o#GI$mXNa zxlCF#q@`{8bpm9AS^NDiK zeA*Mo`dMO*K%DOcZ-r9*a0Tq|xfIIibuxBV}mr6LcXMOK(~3^DX0vs2pn@O;=R*+j$?mmw-nu{W~Xp zm~NeDfIHlII+4Gi&WP`lBYwU8qVsaZiL_niSNEmD`?we-Dl-!Yy9=zKDCuk=bJ;m{ zrJQiD*lFZlF@LWVo@epZ)F@1lb5w^R>DN3i9-uHqwjZhX_H%U~WneHGwf9Ehd@GFJasnGC zGbhZNpw<7%T$B?HgD>WX)7k0-yb?S)fquR&Hcfv$Rro4j&{7$&3FE(^%gTR?s z9E9>qD0|);{ytI~r{MJA8?Zo?OiX%2gE_8cWFPr{lKn*|DZ^Yf3WQrSZ3&-R6Affw zH}}4O#Il0392t|a;9L6N&a)_Ye=6@V*n9g>^oWCV2^3-w5lhWh!uIhYoV;&ni3*@4 z>K9T|d5fwhKa&v4{KL88+CU1VA&;FT2nH za=I$rb*j^#E_eW%g%qQr=)VNf)y?=A$=}Fn^`eG7wtKeg!PzCfVi2raf7OP;{BwY4 zY|W<2%3_0-vTdna(tM5EPYwwrIiax&#E(w35_a`9Te+3QA8?ZcK#LMi4%#sPpsyvVSeS ztQta?q9m_5Jf)p0Q@So)!&NK#YPJqR~-8RK#*xo7$7UyIy=Xft<#D#J8JW5S-32uTPqx3<^Dng3CMFE^>ib96Gsg( zPEhiF|6FOfV!m!)CQgT?!-<-Ln~|EPII0q6R5b2RRBNVcV(}`Acs(5xzAD?xgR~lk z`uUdqbGtOU%a6B$-MNVv+x}o)cD=S4Lv^seUHtC1mO>Wx3D~d~J4{M?ujvDx`x!B| zaK`!GvmcRZMf_YcDPkBm?izSrdL+O`=2xp|o;$JZs-euyWqM~?xUQK?_4PHj1!dQw zMp~1;EaVicAYHe?gIH-EQhcM+p zdwT9l&nxl$wc&Y*GdEO1%1W;mbuI5tH>~+teRW^Puihu=9~%O5L-=3{Mb4ZvDq$M3 z;-9?-(q123P~5zA1{?hRDjVB)00R?2f-(xTIAe9HF?#Xj?79ngyBSrHo%YdwGO2_h z-9p_swF8fQ21YytzB#el-_9=sFkSJa84_;ow42t|=iwUT= zU04*K!rvonq=8h=e#3QSp8nNW6W@g%;l(wW!^Z%dyI8^rRtOk07cy$H#x-iq1F0|H z&HsEechi=Wps^CH04~K7JpCwvYm*Y8sB|uC)aCsjBFz}PN(2G^p8T{aC$g1^%d>pN|Lp#TU zo!7saLvd^+C&mr~<4S1oB{Y4j4g=q;k=n~^sPD-uj?2PXADHmd+@T+8Y~lA!4&oeU zP@)ot4ql(-!2d|z{nfXKc=dXsG<+*yf}>h)3}0!xwtQch66R>|$21#f@l*DcMtzc% zn@0T$h>Me7QE&Zeh9$`(1qiQ`jb`3>+>Oznn_Ox+RKm@%!Pzk_9a_<&cZ##@pAY%(-%gr zymPb-j=WD!at5$V0m{rR@(GXqN)_m4i$f<)ZsREq6qb!}?};m^&Lm8;#3XTSt46)Q z!44@d=neJs4;-WG9~Jc%d8wRKf~cAZN1Nvr1^uIuJwbP3OX#L}&cgL?Rt4Qf_D{OtYGKkOGAYR@Z}q`U&es%@2P9c zo=iL}BSRBbHHC7@}yEx%&?3 z!n^%mOTdk2AF&j!WWWER%o9Eg6Z~m=nNi7nc@%m6h{q7z>Oil_s#4Z9_@5w{AlQ_~ z;^dzYHaVpYA3H>zX8wWGJjSAasUGnTVS|nDlOkz8NyCVs!ux#t`wF&K&s|{c`Gnr; zt}DZ_@u~{VK)RDcy%YhxJ-9j&HF9aLw`qj_o+-N3Z%;g%dV$PNtdF0+A3g9zhouieL<8PrEnfJzu64C$ocGCRMCQa+0%XO_ti!K-kN%Hx zWCU#4|3b{0j**VZ2O+DmDqUlKLqLDHP<`?BzKR|QQYvw>9bqetGJotmu%y|(zh4gus7cf>dZ0xkLDPqlCX@Ao_R~-{*D+ z-s$1Yv2WBm?||e4+d26YqQ{z{jmeZ~Z9k9vePixI28O3mb^@e5>-Hceu}xq*8Ib`3 zu4D|^4Kw&XTwSDB+botrWZj6x{x6IC^9E4>rHN1|^ymGC!zCdh7juTqpP^1$^-R`- zts%XeJT^Fbmw*h55RejSJ0iR@T$}i<>KH#<)AUk9GuS1d39TcayZWhWaSA&YIVzpn zW$I5V8Ept@DaK>SrMz;pLE8Ty3;}uoiIUR*!reetzYBh{&)ZK&BAhc#2r(?X4dz<= zo1PV92%ZzlK&BT@-)-w4+|W&I!^r{D+X5g{3S_tG!!W7Q#eA*F5I9_s-b>D;QhrSNUI7LqG-JHi)he3Ad`S v2ifMTK-q?gp5K>TZ}@3a_AzPil${I zmA}iol`0{waBB{t?25mmHbHcVOS4b2$Fik;5z|K6eegK1S7&fFW2B*{KPkmJhpJ`V zRkr`vjSxU>Wk5*$BO+2zK{@SO5~p?J9_iV&Bk*?7Lf9#~Yxk<6qOYaQm5oBZRE<*{ zuESVk1=Mpc9B?~P^R~DYx>keGHTpkxG))L>(Iz9z6q^NOpGgU(T4bZXZ{<;lHNs!my&S*HjGL18qv!2|%Viv0}YlJw<)Zv-gesECd76)u8#cq{w zHrqtl-s`cr@LQighz#97@V{4HkZqbwXU@jsktT1WVU- zxaNNoBLUUbgpZ_qKmNGq`BB5-^%55Rg&1@Pt=1u#fa~|y5ne-d_;gV0(o-*UvU5$w zke>mR^7$(T8bVMow*&sABIl?}$$O@{g0uW}9oOQ56Bwjdabe*X`{hEvv_fR&Uq3Aw z+IO!S*OWC+w^RKnH9fvG z*g^;1|1RG)g22A&nixLN@jHGs6T+e%>gk*O+bVJ4)`kY&!4LhLE1ENZ2EvGZdDqW= z!{A>?DUNdWxFsPHys~UK=I$!MO3DMC*?v&q;;jv84=v00Q(7M?qMCk1G`@$tf2}?k zqhJa$qi^e7sT94|+D0>G(PDb~^sprQGeYs)%q@~ZgP2}!1({^=jdz6DKM=nRQcr`| zoytD?RDV|{)?T_%=*F|%^qDBZABl4_2{9;!N8yEkGgC}4B36wpBDO!1D^0GkbkN#H zzqRmf1*^eALT@rjC2&f-SjqR>N+I10-7tZnVzu>IkBcwtYx+<=()KCV^O3N3yuun= zrD4Mg+Cpm%al}Uusq&Ubyxx1GYq`Ne7iOSz2T>+T@(D7n1)KeP+zbJ56P)o8nz~G^ zDO=*p5xa~Mwf>yMen7N>CU?|0e`?>CKZpW7Ui3txzV8ms7 z)3d=`bKC1e@UdQHc*9Z)yCK()dTDXABmJ{Jsrdx8#`}=rVHk7OX9GKyB^!ru`?oX< zE{k05!6~F(Fyar&!!yqz39^6%#|A}zBh-IZO9W5vfQ=VEF;a7Ld=#BzJNVUAyofF# zMM;JGeR`Mu?3I=9x&H+j_NVb@^*K6w#||_@J5C|M#FMz+hhe0sXWgr5k&c3I*(r)# zA>alV^IzaWZ$1~%NNGWUmqY45J|PU|QL-Dq4T2qpp7qAilyLWXGj5I3p-e#o13|5H+0Cv^Cj-@TL6tlOxnDfgn>+l7{ zi|Eo&mH7Y*SkK`XeD}et3e1NlINv{vMt6_FXRV7GjUb*m51a>maBa;-?2=E^rvEES zCJ|$;!`Kdjy&hJ9#=wpT$svv;QN|`HpJU0sFwK>Mfv}QtClD?vl#W=%D>fPPYSFb^ z;KPS@-G4}f5;%>IXO+S8#YPr0QHBGs15YRXQFQ}ABl>D1F_S#2ze1PuLCN&&Gz9j? zETgylO!|MKF_6t&BtCBc?OJ>HsRXL67DkBfne%B=o8BNq0f40Eo@}Ty@jsPg8y}z= zNZQuB%WoAR`!)|Y4gJh4dY`@GHdZY?GKL;Hba2a_XJ?k?mj$z}6~tic9>oAhPhuyI z5rx>5c}0kqTN3ZDT~5vcKTXLSx~2lGxq8?JFbY3kweBZHoT4NQk{BCo>MUaSE9fFeO~8m4JBG3N0&E?s#6;q!|=jgZ#ryeMryHneE_y$-|>!{ofAl<4I5e@Wa37)+%dq6FUqm4VPIVFO{; zyrTB_CUGjrcQUTrUm_XhxXIL`-L>^CAlsI4PhP;DIoxF80_;v^$|up6ih5k4{SWci z#UoOfGFvQ-MwGUdL6w;;63cD#g-Q;L1iP{0N`2@QapcRf8TUB<$-F`e&-UY|kGR!| z7j{o2M({9fmHU*)01Npg0Iox&z0054dx9>D%SQNnjtJP|t!?4vgLu?H($~oaRX3G^ zt`;nU2DC(fc}cLT{N}rOaH$;J__p2;eCU>6M1Y+yO$Gk;s`xr= zP2FJEC6|}GDn$M8bGY|YoJ2tBq&n{2_GII}|Rl5m!8r|0HT;qBO{<33CFY?qg zzOd*ECY^mCZXrcr0h6n&bTr$)BH~|K%SsHIXX`hf#-N|i2Q{6G5>dv_^M?=2gm{RD zp0Qn^g)7WS?%o|&m`xQ)aC*9vgl&AKGd5Whzad*`#GLesk1VLp!XhfN)?Ho;3&Yjz z+Gx+cN^&-!pF9uYm228;5Bq)ci%=?lrZj$~EX>T58sSQTZ&N85-%Kj$!udb7U&F>! z5K`IQ^bLX+*98%g5L*T)TMNQr))bMN3h6(^==z~Ko=s8H#c^|+i+zg(pS-0;J*vo2 z1Gu`+@|{chyBL)wriS>ok*Dc9;2nm(!#{%|_VurMsnh9=C=QG?s=KvbzB>K;j{Yc& z$J=JR29J=wVp1#cJu8S?oTV=`wWNr?6OXF(v&wJtUfOtoa%zi~fCP(X^?o&+(4Ty# z!MLC~+CuKFy^H}n8`X8gPWL2MCmMdvcFB^`I3gLMx8>3R8xv&Nx=ZzcpiLS!kuUdIHpvfACyw( zBUTuYBK}XAaa3mR^m_K+N}A9mi!G_ko@uNy1JDT_83y&p_m304A#Z;65c4|E2On8) z@S=3j-1-A_JCO!M{%thpB7B@A#KZe|YyXv2(;~?_9hg3`Ldi}5siAm%kptU3J{-E% zzn_ryUV|^dL#Jcy`-*Zv-TczBjlS{$t~yT$pRuKhe45rF1?IlFRyxtqjY8A)3 zbPuCXW&1)3pquDn0u*5~0yH7#eu~@6TXxgsPQMb!uKtqQl3WclQl1#R~;~z75!4?@lx| zgHk#9Y{IkhGxn%*y$6i z?;OH&QHK^C&Dsb!HOP{kg&>VV?!uCyqgShr`vm2ehJ4M3w{iPBFM&@tG)RL>`qDn7 zFfeX{LkYF_4mNV%Ol-m2~i^8Ri&~Cdw3}-aRW>D2D`7KOVqg9Tv4HKsF zg94QMZ(h1gsXId5#9>rPM?b@ zpQ&NSy@UWTn@O=cpZtaJdLic6NBXFwUTKVShkX}z0jWd82bY?H)aBfE7O12Hd#gU6iGZZ}N+bCMP8Ug7VMsD(xQZ0{WCi*^8! zv@PZj@b5oK_U=LB>Ni`)KY}xO66*x=addGn;nGizJ4^Ift8Eh)GGq8Fx@5$(41?!C)z$nNU;TK;RtsiEkNxtl}EV0XxpMD+p-0`=>JGgwo&WjnBy@$Y0;8e zK%f^Z;UhgD>O~2f`A9voRR5K6GNz5Qkw;iJ08ohJs!*K5GO_ffq7ypBBwyxTGU zvUhe7naVrdFzcJXT)r88Axo< z-gX3L9S5vDvmE=Egv+Pwz;h>fxcV23PpBo~RJTb^6-14`M^H{Nz|638#Mt)$->)Hu{e z{llCn!9|*=r}2-B4eY5X07~+)F~zR-r#E>ex6i-4v_=pT?@&V!LofcuKcQ|d+mw

    &^?$v)Xa00reVD$|7igcq-cv6_l`5{_zsd%n-$grRV zMtmm6%y#Cq+OMdj^exvn7C*1wT_mzJ13oAsdc9oZF$~Ni<4{_{{Imq+3xQ@yI7Tj= zb&6glwVLWrUTIOqzc88=hW-XiXKf07pp+&gxpYT(-=~ml8Fy)$2-t!Y?_>EO&B01k zu+7V$&4c*RMndCad+J}Y82vq$?T)b?E8VIt&Yno^yD5oU+-~FhoQxmZAS=kQ;NavA z8&6+oA1t%~nuD6m$DCmhXOc7CAO*)j>%C}^D@<*9&{KvzF}FK1{{E%)3FZ3_mB@>I zlVJX73ya!gs`TGRJ_?qI0imw&(ALm@Y$U8HWJP^aL)Le1ketIdNO)D{6Kux0Q1$1e zs%h(YSC{d7vER+?trNAsDqwr;W&u>?e3BY9iw`8AiP3j9&hysc3%dCU8mDF`nTN4_ zrrakE_FpJUM(2fZ$VhGR7@)Zrspur6^PEgjZCic%0eaJC@&pR=<>CLjcEmqCRiR+^o=lcAn zxJGpfaj1k4yB_HM{5zE0ETg1F{L{vItpyHMx^Sz%<@+@xkm}zt>Y@{+IMv_KY7BKA zA9Itbx9nsBvTx0yf6y0~NEH*zpg+gbiDde;eE9lQ{stBqXPk!LK8Zi(^4`gKM6(d< z+U%b_IRJ7#1p+Qe&3oE?jjaJ z^cjBb`@ADDsPe5X_|#DfzDHI3O1hA0&gX<-NxA}gDkEa1(G)|_!+%eufAfE$)BQLmJ=-%it@OTC?31VloS`B&&;H3* zTD|m!262vMwFa~3b?n6thbIG_>4erKeyEthQm`$nT?Q)5k(hPz6@JLc@oR#fvX7hUttfy-C( zqAm(}ow8rwkT^;+A$M`ub>ADU#54@LXh~)`Tp%?1`A$*A@6?ac6cLF@;8Hrd131*0 zSylFA=Oir~SG?DhimlZ!4PNI4J8qZQF{cdC?M=5N!dg1sYEaKtQ#1ORnm0)YA~cuI zwD`7kW!2IBo}JqXjRw-Hg_vZ21R42ZWBN(x(n@%})T z-4y(f3>gq`9XJD5^q@fe5k+)mE`}0k{Erz)36APE81NjtLTqX66=-z7U7@vd-B;ni zD9JWe_M@aHn);uz3^19rQdBn#H!2GgP9ZCp2Ci9S8tKr}r~TNWZZ3F#8O#yd;dz5% z4=G=3o~wcbGM#`+wIsZf3IWx|oJ`dSZ4nfRsSHa7UcENZ3q_R_R&Hem-HyX4sBSTn zxdX~|aXxx)8i}*>X8)MkVu+{K2D(klPRZ>)OG>f#^a}A5B`Hye2CL}H`JXMza_=8WY*U#K!I#GrbFTq1Ec@G-JYsLMCGuvB+2gr4DG{6VPCK(iTw5kkd2mSBIFh zp7R(g2Ws}}Lv=xLG<% z3#vN|Y!|U_yiz#@)B0N}OU?HjYvR!Ue?2?8Hz8-jAU`K+YM z8%WoRJM$!~$)x7^c2b=Vs1YI!X`KP}>Ie_+-zC|xvdi{@tOGco%w7;j9l zgsTDB)Y7FZ@lJp?)$#eA_S0MUd0w4Yr* zSi1fGPt&gASOBl14Xe`d?L~HvpkWJN_h5bY&?u>QepbCyDdY38L^RqBU+ffFCG4-{6qe0l*0w44NvQAb^9bhUWLprdU(tCD>S6 z?SZfC%^1~&BC`u&QIgQB5Dpyz?kx?g?i*_KPVLZgbeJk0Y9Xy;abOpOgmo97FB~4NYcOq|86NXB z)(aZ2B<1hyc@B7xy|Q71Pngdt76bTm~l;K&Nzq0YmnA>tCvLc>U+~sUBUX0zlmZR}MWFQm$`Wa$(9bdC zw%U&*C|s|eukQ9z-@azlVs?&zHtcieh*4 z%xj?o=ry~v7kNE&X3@a2r1T*{--(OnuY`E7U(*T-Qf&EP#_nO5?~TVFQT?mgO+F}S z_SdnER-E|mKzSn(R_PQ*=Rvq_6S`IQC1>7U9Lm?pE;emrc;+lvq5@gN_19 z61tG>@`PoJgb>R{F}wwLUW?wl%6?r?#H>TK1)C0I^;gLN$}iL4pqudr<%VR8Z&Z|r zs<9Sdp{oR)VMAK~h+bHz1^GARD!D?NfN@Dv%9sOS>a8=#X%s&H87mIGaKuit9N(*J z0SdHwRb=qkY}n%aMcY1k3y_$D+AB0{Gq3PJYu(SruFt0y>K8x=guW}vy zkaJl0)X=vwj!-TKdJe>OzrMRL_->y0E7Q!{rXGTA+g?`U%Yc$wxRB>Jd4TThXd=j>@6T&`@p{()AlUT^ zF}#rv|MgR#3F=X)MHxdEy8&XpsGiEsQg-jmD;Y-F{#j1K6WFF#sUohYzr6Mr^isYE zk6v6QTl`m6QpNW@<=xpT$qgv961djI?=usk_m|=jpy+;R*`n9N3kmS;B962}(aPr0 zUe?1bE|jh#s;X5ZfWMgF$-8Z#XNMMBMTM{2GC6h@P9@zuGk2?~&fW4>%)(R+FByw7 zueVr6L3u_`7)CH!T`D&+A_{X<^@5ZS*e`g7VCCl2rr%q!jqE5%VERh3)>58BTp9zo zWEDx~u(bVSb;SYigaZVkNpUdf&@NDki=u?%=iQz`Gmj!DN7R*(%`#4Uyd=TAIB*Hk z>1xgP0BqyN*Y%ym^f+qg$F9u66;cXmOhBch0lHoL#W_qvt>Rdn$O>u&oeVhEgO*aQ zOpbe0$g!d_MudW7iGg~KncXBJpozWK+cvUqBeSW7kqwro&X91FR)?~({)xa??~cFE`+aUP5m{m|ekvS`d6gPQH^|=7}J;trG}x z7<6DK7lCTTb^|$J9IX;xp%x`(lua01A-#9{tnL2+j~z|~$;9)hn6jk*T*y$J+;d8fx`2!*{xW@UifI*|SF94J zC|9ESr#OoK)Y?7(t#T|rst8-@j0}sA(GI1d=ALr5EMfmBBq6M5$t-(QAIP+ZRVizi z8k?ZdC)HjyisjeyN96&V_Jp}?SVT0{!Y8L=p?1M9VF4iNiVhVr_Reo3 zKxf8{$_|RgD%Bi47Ig-mc+HRMZGACPMk!~6cwnpzO^s4R0_%cL-E~@UZ0mh1Z;eCf z?9=?_J}nub09vLrjn*}lS#Gbr#YzanD3o@m8ABi>kp#1N^HYy4w+(~s!J!co@6C6) zCkkQw9b6uI6dr6uv3}Y5(KA%~EmdkzjRBiVp(o?ddKQa}pDDViZBMYnW za$4pFx{3FKz(=0r_ewR?kqk21gZhUa&P&Q=Wu->22l>hd$oP3 zD+FGUmJa)3dAKqoPrWg-$r>DDHJuQ{05W+Hz8&Ber~a_0Mi~rmgBpTmK`d0dW~!%m zu9^1A2cP)`+Co~R&2=5_IDau_)8e8)wsLyB)t>)O1j|YtrZP{4_R#lwRPgg}z$q(S z2nVYpVR{;ZqP1&Wo(xZJwv-}b4+0p z`OHo&l{|F(^0C^4!6&Y?4OiGO=`uqS9l}@J1v!}5zn)qmt?o|A`JP`N@d4=!tO(;H z>mme9OZctWyN0G5+8)lou6ev*p8*X&XmsXobv z`lFzrdr6^_lD2YjOgT^Oj7usP`DR>I$s@%x^@@hkMwcrRVT{~T*XNrbgQTL0O+}Z2 zPCr#m^j$4O@!8>sphf7>QqJ2BOxYc(RX?j589-&~^ueeNlI z3BH&5>j}h80-@hw{lmaIQ8V!?F=CGpE}(iQI6eqk@U3Ed0a9E=#SA7D_8eDhnW~~J z2IXOb0!D$9)m*55u=&}LS#;#WkI)KK_)+-K-LIh`5zAlZ;C{Ox9?2^KP<%2;oV!V#`PAR7u}vF=IX+K1bu#8<_u$#XL6=v z`d(X;mYVXY>b<>OpPb>BRanN$-sM#>ty(v+Dl)bmSJy@`xT6xHwVUo;1D0*q9&k9s zT|XlQTLEJ2kBQ+IDliR1y;Gy=QolrdzIU{dt(^=v*;OLR=&G0)u)k&mg0gUYeI#kx zna6qut>f6UF=v;sAlw)Dt^8lu*jJkfg#zEV)=1wXhdNLGjW##2Hjn=KVKxBuS7g!W zs`-A?dI(U(QD3{Z_>CH!zS10C4KlCEhb(Htx4e4@#AH#@-Wh#guq}SehHMyI z9(KSt8N<-Db29!uqyZMhTUa;I9wQuqU|sv^i%d2&%fZ4|rgK8Bx8 zafMl{1y7j^7x9iKGxW1hHk(#WDl{EZz>%(XaQ3Mvv5>nq~ zP4zcV6>y%>Ci2)TkXOctj{7>8$QdBe(>{Q2fJwYdCcLZ1)XM(UpV0Tf$gL-eDpB8R zbkOg2f^rX_GJ9*>*wb{(5~p{UCA>Rr)^ik|A29w>p(&Qqv=h6&u0eJ$v?;>GNJSVR z%0_%Uu4XidyqK4faseWV$Jrs@jPBkDW`@m+Zf zH`Q32G2n%iFWPe?ij67RO&he7MD#CRi?rD@*gp+zbWqx92TKt37fM9run3u9#|F*BC*fFWSjy`ztf>GBOJ5l6~ix; z#){afOi~mI^vtr_y4pXc_}53%Q5YO_R17zMWbKummQHsBtH&&yLykOwD`(%^8~A27rpzUpGE>qZ zyMJM6#!!WAA*$#*9L~;kp~~;x5l+T2U5yv+6=oik-Puc2` z*C%8Y?r4x#ZDa2{&`@?+Xw1;xazyyCJCXmeT!^eCrmOA7y^x+6nrYP=zb68)|{ zRs%b_0Q1TL!y#?)g<)t?=|Y=y-3)C7vFzYH+(32n(5I4y1a9?F5=~V$*_3QG<2Xewclv7z-uM_JwQQ` z8RWY-<&;p1Ts=1&JKqW!g^veB1cgYliM|Q0&v&#QVu1?SjmwpdPJ-)UDo&6F5`)Wk zAQ29%bylu-bwKE{QtP)YIb1>Tqq|Ywx>p6q#kwzikbWb`2E@nhr(U^4lwxseQFC}) z&*X+H&nj*dGMK!F3|SC;B`#iwk2y_y&YbR$i}=GDe?-0M>*F+d-r6d zp84X~0s&xy13_O3<$ZO7mV%1A&1h4v_X^b=Bm{CK0{WujnJ|1XOXdwtk&-5M3n1Z_PY7Rk-xJ1zhzC|`a?}%eJr(cj+X-di@sdl4175)2mY29vFF+*?{?Xa* z_0iYQx*Z`z9hT0<>{j@M3J*-J<$|;|Oe*S5@UqcQ^Q1p1PuEHUvRl1j+(|f+Ky!DFuJILP%0JQ$uq`Yztvr2t zVV6;z7YPAqUBWnGdu8KVVZxXcOF4EQuN3HcwKs` z{{D^B}*?6K~SBiVrC~Va~A7ZtKEf=ly`g87Q9lEx#PutGOOw zJmRtrw{OydyZWz_$$$Ino=%8IF%T~7py(VRKHQa zPJkJ6Su+R46|F$tv+y<5yBa$Mc7sBPZr%s$iEaEmkejD7bl4&Yj`p6q2%_={jL|CJ}%E#8+OfLI@g=bZD4 z?dst4<+r`&x6bMlz>z^=M6}Ly^HcXk{u>Y1nzYt$az~g@u;z~%i=R0c(P@VgyuCyb zzk710@MTVfZ3Hrq2j{xz6{?cHAb*Lfjc}H0KQS*G44QJ3&Uc2m$NAn_t}$_ir)=XU zeR_2QC^HnY%XhuG`q}^SYhc=hMXjerWVAy$p(?ybY5a5a!w5&?xcRQHg` zUDpKkqi!E6rbp<8NszYdEqUaW^ov1skVCD`HRi*|56)13bf zWgM8-+qrPOEmYZ5sIGdO7?iyy1H#Hi#bA#JY|3d)l=d ztS_)ou2e#l@g1*8L%Wt5!e%?Hy_s&;-?7*F`AU<(ZSACh_uwfP#^e5=P7->Ad;e;~Ri)mbg_Km-q;K>kp1!raO}J5jf%Lu>JHp z!j1;GtBFIk_)=fvxM#bg^6llr%|**1lfo+;Ai&y+L4dhCUjDLqMUM)wA-CqPH&7zS(mAdT&E?qRq|Jw^dFEF-wLfvgR`<4C_ z7!I;Nk<3F)9d)5qZ$XUDp6{7go?N>h`hBx&##7=orV4Q5$nUBlygwWnZQ60=P8znp zU)*NYlYSWoF@pamFW5PReucYu4S;Rd&s*v}ALF}K?SBBmIKlP3iZx}xEY>~8W|znG zO^hyU8zkE7RZI4tr#H=wqV#D33CrAt696M@k*T& z9$#?*+cuQb_w7yiv)9y_WdAJV!k54N8cJi#XMA6slup~PGL-oCjuMa7>T|flR%w#{ zLGh^G!l&@qcjJCsG#>;8m5?9~_f;L;#CA8lYQcLP3{LM0@~i3XC4v}U0S0a2{J>l= z7K)9ypj`-$A6>M>e<|fjHM%cQfzeOzc8iJHHLW`ngvUhMfJG->g}SQE@HY|RjoyJ{Em5ySP7M*i%noAwJIx>0 z{<;QlYOSc+QYf$t#!D!r(pca#pLZo6WL2X}N(eQ42g#YuE>d`$QyzG&#)Ws@ZmdKB zHYG_2Ad18#1k{a~oV;8iCT42A;Uv`6Q;FgI4MebpUs(tL9N0y&(1{u*I0 zsXgSXs)N*TW#<&(bh2tatNiZWhmDy_^`#V)za;sKyqjA7a@cm<0R7z}#2o~ul}NVb zPWMI$5!ptZ0sR_)*BOczr`(%JfRM{n{}_M!^$lC)a6ykXk?Yt;Ce1ncLi0Hb3ot?B zm0eJ|T@#0v9ismd6Mv=JPN7dXb$$kUcCDvEyxhXNxz8QLQ>v`WGNO>IF2(-Oyd|#| zW;}JvpAPpdw^6T%{@WFVa}T8-*T-+1;x4ekZMNeH+66pQ1BabQ^8T*iFS1q?Aewpc z<_a!URbLq&)b;NuCzRkY?F33%gS7v7^iS)Bu#bR^%msk%-wiGaVE($%MfAo=|Axoy zMdzS!TlilM+!COZQB(>0ljN(U`7ii*P(dOHoyY2bH4u=2WKK>|__vn>>yP;R)6F*n zjuGxCl%Ve44dBsex+n*7Q^3K0Sn@yJ_+>zc=(TY4{J$G~MBgyNUH#nCH~uSF{-Wjw z1q7m>dv=fIzZ=?eKx4TpIxeP>|}8 zv19v39{rvBWjp_0vi$#%y%Fk<&=@S`kc?fcP=Gt-R7V3fC04W*v3iW?ORaul4=x?Szg!ohCGYS zYC6{KoV(_F`OC`?1lUlYO!kh20`6Z9(*NSZ6&{Etidpx)II^t&cLTpKJ%~S?!MlL} ziwk%_T+m!VuZH%&xB#R#+Fx}!mlm7>VN$6Sh601(K|22)# z%QPi|YIy&iMizVYu{dQC+dh;5UUB;v*rVa3KjS+=*>D&gaY2cQhLRlpfXzEw4eS0k zy&F@Awi_5&eL9)Z=I!AC@h*O1b(vMgODU!K?Ry5K?#Weut)$r{>H_mFyCR`bi!;tk)q~gL(VDhk?L5|xOUK>pV`+LXkBR^ z+-~BTL9EBTLx-ZlwIRKi^tf1zp7lOJyRapR!J7`6w6S=<#9PvNA%}VhZz@99;&uHg zjo>4U+N$L~m+~I%Vz#HwsU4d|RL6bTDgwSm`}ESC>B^db%29;o;1P{FUZ0mV;W|>UzVkXfn3?U}=tRWzF~S z#$^B6&RxAI8aNA8a8<2Tcg`tDaclOagG3w9Sx@pXG&NiA_?~qOuVdtPb2QwEMy^@| zpJLN(w`B1kC1`tS_t;((OP;-(%Th)|Eph*+W2In2=>Yd0usNN}?9knr*O`={=5T6@ zOU(0pu1(JE$&n5G3aU{f64Ak{D0;0qjo9QfcSu1JlBi`n?*myKB4r=&E372y`6yb+ zH_WJk`>Io=K#>*`_NM*!5BIRavdfc9=Ud^+pqihMR@3jf=o7Yn=FMyR>GWHX*!4Tm zdgxMb4daDRMpfeO$yoN}e`3LDb|52oza2cmp<(+|EtiJgm~$Z6&UnGuYUtE*)+ryN z6N|2==#2p7Y!#*kecca>>(3jS)%w$6c`5kL@7>v~MPey~YWDD27Hi*m&*=oJEA-fD z^=e0d`a(X5o47ARx0t&Az4CVUON34(jEwZ#gmWVI#)Itt4<%0@zs5m}^}4st%rVz3 z!rAr@In83&Inj}&Qhcc53v?|M5zNfqqr5-7o5D~WNiU1Sk1oLz&7YcMlYGNF=LA)U zX1rFIw{BB#1Gcp6UOM2kp*_!L+K41N5c|=aV|q9mnCJidM%he8i(JQgIU)MAjL-S# zaKW^@U50I4%!pl?y8&+faiHSsVjW;jo?=|1#_e?L3hB}F=x0r|*$g86Wv;Sli=E{U zrz3}{7H7w!pO)pU%X#W6;WIF0FdoOeppCj%K)*mt(OpC{n!0{^(S(Tjo(Vk~5pu8S zXr*Hep(23K=j+k?utD)+#)0LCZ1g;z?vPYYq&Q)QawP+hGD=mFr2oLuh|vyn`L(<- zUqcBdhYb80Ib2A1LskmsdlXzCA)w)gyt>f7!b2GE^B^t~&!EZ2a4Cm4h5}l*!GY9s zB}UPq_RepiIG^m_b*Iu%w%Q}~NaLVBte_1NBn7AyV-soGqxVRv|6qDQW`ZQ(R*ePI z*XfFejjE%`Ovu0a%|+fi$gaV$_r4P5>641?>8Px*j9N&|Kr-P#9%gJULvxwP_!=3` zcbw?oYI51I05!b#k<`1KA6SOpU6#y9I69Qp-ewHr??U8PamE=YjHH{3;d7y0%Mkb% z8sx~iwY;@O_|O?K8kJ?4i>E-RHIi|9Oqfm3yWbwi%kIlIsRrUoFSIzG;xy zbedUhSXa!i(QNQPY-`sWSlwy0#neDN$LN2qsNN2;pS4AW`%W6f6pjF1<*k8{O6XS{ zG>_!N3Mn~5+F?f3_1&BII6(*f%KHX??+Bfzx`ykdQjnq2z$njQ-+H<&yU`cvLs?xZ zE9D!R)~0G9Yn%Qe?2`u79Nq1iSveadEXj}Ey*XLvN@tt1#a|!sYwkoIs`?1O&Z$-- z7dWUJhUzWLQFz9ZqM;lb^ zls3cVWqHX;)iu?kg6juI2G_1c`qygVjvIq2MO`h)kLj49%^`Jaz)nppqv7AH*5C!^)Z+?G40%y)RbU_Yxsf__KZqe zG4Dk-scL()M_VrM>vNR&y1C4+@wTJo@o)QU1PZpL)9mh-4oC1453^X8<%rhG%eCLL zqn_k_e^c$b*QnN=lJ1K(O2RG{9j4HDTE~N}8xCp9WBz(gFq&Ko5H&#j`ql~<%B60iF&L6r2V;;|iZO0^@;)fEszA3d0HaMc zqP3|C;df8DzToMT(|oyfa*%843U*?|;3`sYqimR`e#!|6)wtH6e6Y>EN8cx&Q@&>K zb~h5~!#TeLKdz8e03vdaNu^c-aATz=d0KyfKW5+YnTDu3kjir=^h+$lg)4HklOA{~0 zWv7<$>}NcR7Dj%+z)uqcm}|-nOz+Ev z)316!*uj1%SZG;IqfGs5Mq{B6{@6&QK{H;7FK4yvGv2JDR|R+^k&SR4CF2X=Am+$x zDDUG7-N!{j`53`Wf<}!c{2vYN$s0Dv4PL6egh+PqV~T*CTJ2!F73DZ@_iMmEj(jInJ@^ zc67QR=VXWHeK>!8D%hp8^w(n~t19(9|9!{i z;RuWvtT(SPwn#`LivNum-kQ^c3ed9lopy3uB2w~ zfRH4Y=5M%3t~j!^;PMcMYA4~`7>NHW)?B2q^gALupOQ)H7+NawJt?1|M1#f5K3;(> zq!}=b{1?MmCcqT)+9MpkxCyO zFZC0`VT3f<5|@i>t#$TK_9K`hrWax--b5e;?IzgWD=WA_Z|&5(cTL34?I-*+Td|?vs2?sjf{;5@L7j9ZTr-L z2nhDMXiF+D14HJ%g&+u=_t@EiT0^%Ew78 z#i_?d?>b%YJ;uW0u7{!l&xzmQN~yb|L4~z5Hr!6l29hVNc#iUzt1mSTh^L?QpyNtD zPlmdUy@Oz){_W@Y;q9u^w7&_*BRah~qH5Zlt!kd;iVv#GZm5ZCNT%|#&*HvQwo?aM z3I01$`niHSd;Hu4D+y8)7-{E{mtmXSDHk!Kc z4ON^wUFX>J-#*2dQMbp+<39vJ6Pp>rq}7e^?ad!*v}SNk=vQtpBHNNW5^3ushW3+u zx9!9Y6_HcV6>;=DLi~miE(Zv|0vs})hNyrPscKQvdqfp3^6ah}@R9Udb4ZqgtFkL+ z=hONeK=a<0~k@MTa8eZ}(A)G+j|KbHYS@JxvL^F<3qn1Hx)RM=43OF`B*!@@zuS zY$CfPg?#Fp5U!Z^@*aOE*+huBJ|$?nG;X=Tx|_9i5xL;ylo`IY%Q&Vg&&w!4&m*^u z)s7lDB#>VUrgwQeQl{zGbnO&+mYEoi&Sy$SUgJ1BaE>R$30yP_35~zg&+7{@Ic@vZ zuroWo#4ztM|Ey-VTc+k0ukW4NWHi5waw_{v6n*}R(6SVjrPV)ks;%8P{!s8xY>zK5ybU(;Lq-%{{&lG`9&O(Hxak;Fh8f@BVPOexk(~U&HN4 zr{?PLGSQ&os-;S`bshad&TQoY?er#F?+|`YS(HC|zHW7Z>F)8cbi0a!8*-#PE{L}- zvq3?0_|~ko=4+Gvcl)oxl+|-%Ym!r%O!DL^-2>{kc;gs2Q+s5CvySd1F=?WMd&l-Y z?|`>BD(2T|Gs)V;Q?ci6!@Ha#*9kK`JkE1-FS3|@zS{Db8#`4^ z=nJ>;jYeU&wmRVU3jTnuE1?kYV=wRQq_X-xN;i5WMu!ORc_Q(BmT>ovkZx=mhgYk^ z*0}`k;XCh+4$q47idmPZeu__q54T#T6dJfX*EyFQBY96non|DJ7VO^L)lUpBle)=w zl?Oq?7DnWo)Y)Ax|D1|7WH=%S^2R?0ry5)Y8`?F6Yyw~m@ zGmV=F|1wP>BkCX1gpZ$(l0D{g%`=7Spg7L=!~g+ho(@4sU3@%$0r>svtCIdE&))iM zfhHaD&q0??dq;^GOIIwAQsnXOMiKS@7+wJ1KtQq;qZkK+mg+MDr`mr4)Nt2}^NQ0} z3437ClS2?!n=J(;cR?#t8^ZU8pVI zCYXurnhIr1{YN&wRe4K<5>7r+qrvX-ZDjP_Z!wwobWRxBLh)*l>8xPlpRiY+_U|j@ zpxVY2Gw#8c=)OlMZ^P=%lfC6f~j1ksWs&CjD{wC7%Hk z@1rhTQ3o-qwvPW93_>;diwgylT--iGrWxVyUVG}xM7v1o!Yeq^NlM9m91Sg<*T2p=53 z0F>uL8v4~yq0C2Z8sE{e4Qzs`7N8faAFW(0brM%p_0Fr|mKn^{iH_-0wZmo}$3w0N zWcj0-e9S(OqF>^$gWSqae=#F`nI;k&kqF4F2T#8Ky8e@jAIkNrZ!h=@A-ch#I-4UuxO~W5-SLRx0`DY9T`K&lJk+nwM*^{F z$DeWDmxqG^p!M|DawZo-I?_tnWjAND9?QE!%yQm+nNp%;!)5H!4a}>AZ=CiFM`Jcv z?)h#b4An*#gi<*ez@>t4GIsartUE5U@a`8LSYs}=-!{y_gF^uxm?XGMj2wK$A$WzV zT4|bSr-3fonD=60d@puaYM2Mc!pC+CB%mxzTiKw8%&U=4n;K-@mZV7nzPPp-S0I9z zxo^j%0@O>}ekeBGIZMXweoYJ((10=R(qUTW{o=C5c9Xok5DivY){W~2Z1qHa-j79e zEi9|WW%(+TEtum=<^qZ&wId_=hDpQF=yd^Sm1+rI#Y{qKg@HAQ=O2nZ*fGgLb81** zIqG;e{`ItEmfU8NJz`+6Yp)jB#9~ z0ZWTRIvIoLIA59eeFSu|%8H6)PoF?z%SU9<@AJGFogcSWGIbssotoQHx6sm1Z)0k; zp17zMYj8gGuIeh&6_}3NNWHg+9k6Yh4M^>f=O`yqGeG@KKjR_v$4Yvl?K{oT*IXAu z`w~w9av3K0e7^AG%!fG@tgMo|7!MK^GvD0=s~=-P>wN5*Phm&p$Nu3ILx35$ISxNM zoepeh4O?xx5?8VIW3i|Hi0n<`#%#TMbF|1#pR@}R7sJVlPCT7dfEADPD?*nvQ2q2V ztBPGsR#$@%!kh|grVh9c7YTf@8yqAkE3<_L=|yI)8l}dJ@*+(o0+gS7_G}vz;R77g6BH1r%H8eDvL0@T z9&8$w6P7>3=L0$9w{-pyuQLb$uM#&;Up4;M2RowR8r#&>bjq7xvl*C?7qkU$GsU9B zP~h-6aR7GsAsOUHM)=^1hq!5c?$A@<*`haRNTiY9eFuMv-7GhS93!XkyBbhkVNV%k z@OTz4>9jmw?H_#lIc^qv)Ao^@*A~U8sRlDp8oZ*qfmv$aHSU}odXqkWN(OLBTqmNzte*%U^*Eqyg)CSKLv1p?GY(AI7UoN4Xy-$i6kd5fAZpu`@V0xx(2Pd z#7ZcK_#G=R@bi0vdqIBb#og8r%KDiF6U&&H_>5a6>Z7bA2;J~27*3i3A_H!H=fs?B z+7GwHAEskgQ>gpfLjRn8D>M-J^c)^}E3_1^ta1_UgORFc^UnL$gCeO!BG#E28aUjZ z-#2DZ9GABfOX33=?9*>Bds~I7w+YRztS2nlp)1vHdcf(T;b4-lzZu3{-&5P92H7kf zW@YK?04%$_c^LL2hN?LfD`#6DzYZ1#qYWaN4;CmsQ1`6esa!nYW!03K$of$st<6A0 zqHXJ0_;75!5SsPGLfJraMHS=`jhhKSaoxJoREu|SAFY@B%z`&q?mLYa9g7?pH!-H_ z7$!4T3nNSs?cvKc>rXfpAGpR zcSRcl)SMOPnO{c*-0LyEVhrKc$>a=j&np@@Qdy~52>b*tw_axGA|bB%s*O`~PA%z6 z_OdszGrVy_b@3D zdmPZeIE>-m;0S6yy?ng0qBcG0P912X`-OzMoa?9{F$1SG9?~Ed_%`0Yt)vo$Q1AjK zv%i(x&)k;5b`Ou6Y0?S#*Ll#H%}yJNNtu@5m=TpKR%aqMUAM^>s0jTw1QBA#y?_lGjV&*zK}b<1=C+6yY?825;$Dma=`njzla?jIsd z5l-%k^nPORoB0cJfjqhxMrIhUxFw8f>MSH@5b}#vDpH<*{NIC_xYz8VU9hueuBHR!CCJA1h*u>ubpybWaw#8bY)>FC4 z7X^>^qgiS4VBX}%ANw7~V-CP*Ge*E}=-cZ#U-`zx7tGzS`3IUB)E~*A;|sDT%oBD@ zx9e#ImXR+MHA5F$!39Ba9Q{6AZfZChQq`fk=DZK}X<9dE(wy0H|3?B{zqnUhIE_O8 zttC2dGUV~*b`om{=1}7K#Tf^f&ZhU!v6%heU<-@_3(X0K99xuxc=Ku R6%6zzE+Q>lCaCN8{{UMu0S5p8 literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc b/docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc new file mode 100644 index 00000000000..d9759629b75 --- /dev/null +++ b/docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc @@ -0,0 +1,296 @@ +[[search-aggregations-reducers-movavg-reducer]] +=== Moving Average Aggregation + +Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average +value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving +average with windows size of `5` as follows: + +- (1 + 2 + 3 + 4 + 5) / 5 = 3 +- (2 + 3 + 4 + 5 + 6) / 5 = 4 +- (3 + 4 + 5 + 6 + 7) / 5 = 5 +- etc + +Moving averages are a simple method to smooth sequential data. Moving averages are typically applied to time-based data, +such as stock prices or server metrics. The smoothing can be used to eliminate high frequency fluctuations or random noise, +which allows the lower frequency trends to be more easily visualized, such as seasonality. + +==== Syntax + +A `moving_avg` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "movavg": { + "buckets_path": "the_sum", + "model": "double_exp", + "window": 5, + "gap_policy": "insert_zero", + "settings": { + "alpha": 0.8 + } + } +} +-------------------------------------------------- + +.`moving_avg` Parameters +|=== +|Parameter Name |Description |Required |Default + +|`buckets_path` |The path to the metric that we wish to calculate a moving average for |Required | +|`model` |The moving average weighting model that we wish to use |Optional |`simple` +|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` +|`window` |The size of window to "slide" across the histogram. |Optional |`5` +|`settings` |Model-specific settings, contents which differ depending on the model specified. |Optional | +|=== + + +`moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be +embedded like any other metric aggregation: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ <1> + "date_histogram":{ + "field":"timestamp", + "interval":"day", + "min_doc_count": 0 <2> + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "lemmings" } <3> + }, + "the_movavg":{ + "moving_avg":{ "buckets_path": "the_sum" } <4> + } + } + } +} +-------------------------------------------------- +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<2> We must specify "min_doc_count: 0" in our date histogram that all buckets are returned, even if they are empty. +<3> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) +<4> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as it's input. + +Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally +add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram. + +A moving average can also be calculated on the document count of each bucket, instead of a metric: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ + "date_histogram":{ + "field":"timestamp", + "interval":"day", + "min_doc_count": 0 + }, + "aggs":{ + "the_movavg":{ + "moving_avg":{ "buckets_path": "_count" } <1> + } + } + } +} +-------------------------------------------------- +<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram + +==== Models + +The `moving_avg` aggregation includes four different moving average "models". The main difference is how the values in the +window are weighted. As data-points become "older" in the window, they may be weighted differently. This will +affect the final average for that window. + +Models are specified using the `model` parameter. Some models may have optional configurations which are specified inside +the `settings` parameter. + +===== Simple + +The `simple` model calculates the sum of all values in the window, then divides by the size of the window. It is effectively +a simple arithmetic mean of the window. The simple model does not perform any time-dependent weighting, which means +the values from a `simple` moving average tend to "lag" behind the real data. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "simple" + } +} +-------------------------------------------------- + +A `simple` model has no special settings to configure + +The window size can change the behavior of the moving average. For example, a small window (`"window": 10`) will closely +track the data and only smooth out small scale fluctuations: + +[[movavg_10window]] +.Moving average with window of size 10 +image::images/movavg_10window.png[] + +In contrast, a `simple` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, +leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount: + +[[movavg_100window]] +.Moving average with window of size 100 +image::images/movavg_100window.png[] + + +==== Linear + +The `linear` model assigns a linear weighting to points in the series, such that "older" datapoints (e.g. those at +the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce +the "lag" behind the data's mean, since older points have less influence. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "linear" + } +} +-------------------------------------------------- + +A `linear` model has no special settings to configure + +Like the `simple` model, window size can change the behavior of the moving average. For example, a small window (`"window": 10`) +will closely track the data and only smooth out small scale fluctuations: + +[[linear_10window]] +.Linear moving average with window of size 10 +image::images/linear_10window.png[] + +In contrast, a `linear` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, +leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount, +although typically less than the `simple` model: + +[[linear_100window]] +.Linear moving average with window of size 100 +image::images/linear_100window.png[] + +==== Single Exponential + +The `single_exp` model is similar to the `linear` model, except older data-points become exponentially less important, +rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` +setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger +portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +moving average. This tends to make the moving average track the data more closely but with less smoothing. + +The default value of `alpha` is `0.5`, and the setting accepts any float from 0-1 inclusive. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "single_exp", + "settings" : { + "alpha" : 0.5 + } + } +} +-------------------------------------------------- + + + +[[single_0.2alpha]] +.Single Exponential moving average with window of size 10, alpha = 0.2 +image::images/single_0.2alpha.png[] + +[[single_0.7alpha]] +.Single Exponential moving average with window of size 10, alpha = 0.7 +image::images/single_0.7alpha.png[] + +==== Double Exponential + +The `double_exp` model, sometimes called "Holt's Linear Trend" model, incorporates a second exponential term which +tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The +double exponential model calculates two values internally: a "level" and a "trend". + +The level calculation is similar to `single_exp`, and is an exponentially weighted view of the data. The difference is +that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series. +The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the +smoothed data). The trend value is also exponentially weighted. + +Values are produced by multiplying the level and trend components. + +The default value of `alpha` and `beta` is `0.5`, and the settings accept any float from 0-1 inclusive. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "double_exp", + "settings" : { + "alpha" : 0.5, + "beta" : 0.5 + } + } +} +-------------------------------------------------- + +In practice, the `alpha` value behaves very similarly in `double_exp` as `single_exp`: small values produce more smoothing +and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult +to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger +values emphasize short-term trends. This will become more apparently when you are predicting values. + +[[double_0.2beta]] +.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.2 +image::images/double_0.2beta.png[] + +[[double_0.7beta]] +.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7 +image::images/double_0.7beta.png[] + +=== Prediction + +All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future given the +current smoothed, moving average. Depending on the model and parameter, these predictions may or may not be accurate. + +Predictions are enabled by adding a `predict` parameter to any moving average aggregation, specifying the nubmer of +predictions you would like appended to the end of the series. These predictions will be spaced out at the same interval +as your buckets: + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "simple", + "predict" 10 + } +} +-------------------------------------------------- + +The `simple`, `linear` and `single_exp` models all produce "flat" predictions: they essentially converge on the mean +of the last value in the series, producing a flat: + +[[simple_prediction]] +.Simple moving average with window of size 10, predict = 50 +image::images/simple_prediction.png[] + +In contrast, the `double_exp` model can extrapolate based on local or global constant trends. If we set a high `beta` +value, we can extrapolate based on local constant trends (in this case the predictions head down, because the data at the end +of the series was heading in a downward direction): + +[[double_prediction_local]] +.Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.8 +image::images/double_prediction_local.png[] + +In contrast, if we choose a small `beta`, the predictions are based on the global constant trend. In this series, the +global trend is slightly positive, so the prediction makes a sharp u-turn and begins a positive slope: + +[[double_prediction_global]] +.Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.1 +image::images/double_prediction_global.png[] \ No newline at end of file From 2a74f2ce0f8f963e0111be1c3c33b9d2201dc5c3 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 22 Apr 2015 18:40:34 -0400 Subject: [PATCH 070/236] [TESTS] randomize metric type, better naming, fix gap handling - Randomizes the metric type between min/max/avg. Should have identical behavior, but good to test - Fixes improper handling of gaps due to a bug in the production of the "expected" dataset. Due to this fix, randomization of gap policy was re-enabled - Bunch of renaming to be more descriptive and less verbose --- .../reducers/moving/avg/MovAvgTests.java | 464 ++++++++++-------- 1 file changed, 263 insertions(+), 201 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index 9c3a6f23419..d6fd7750346 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.reducers.moving.avg; import com.google.common.collect.EvictingQueue; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -30,22 +29,21 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.SimpleValue; import org.elasticsearch.search.aggregations.reducers.movavg.models.*; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.hamcrest.Matchers; import org.junit.Test; import java.util.ArrayList; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.smooth; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; @ElasticsearchIntegrationTest.SuiteScopeTest @@ -62,16 +60,16 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { static BucketHelpers.GapPolicy gapPolicy; static long[] docCounts; - static long[] valueCounts; - static Double[] simpleMovAvgCounts; - static Double[] linearMovAvgCounts; - static Double[] singleExpMovAvgCounts; - static Double[] doubleExpMovAvgCounts; + static long[] docValues; + static Double[] simpleDocCounts; + static Double[] linearDocCounts; + static Double[] singleDocCounts; + static Double[] doubleDocCounts; - static Double[] simpleMovAvgValueCounts; - static Double[] linearMovAvgValueCounts; - static Double[] singleExpMovAvgValueCounts; - static Double[] doubleExpMovAvgValueCounts; + static Double[] simpleDocValues; + static Double[] linearDocValues; + static Double[] singleDocValues; + static Double[] doubleDocValues; @Override public void setupSuiteScopeCluster() throws Exception { @@ -83,13 +81,14 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { numValueBuckets = randomIntBetween(6, 80); numFilledValueBuckets = numValueBuckets; windowSize = randomIntBetween(3,10); - gapPolicy = BucketHelpers.GapPolicy.INSERT_ZEROS; // TODO randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; - + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; + + docCounts = new long[numValueBuckets]; - valueCounts = new long[numValueBuckets]; + docValues = new long[numValueBuckets]; for (int i = 0; i < numValueBuckets; i++) { docCounts[i] = randomIntBetween(0, 20); - valueCounts[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket + docValues[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket } // Used for the gap tests @@ -104,14 +103,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { this.setupLinear(); this.setupSingle(); this.setupDouble(); - - - + for (int i = 0; i < numValueBuckets; i++) { for (int docs = 0; docs < docCounts[i]; docs++) { builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() .field(SINGLE_VALUED_FIELD_NAME, i * interval) - .field(SINGLE_VALUED_VALUE_FIELD_NAME, 1).endObject())); + .field(SINGLE_VALUED_VALUE_FIELD_NAME, docValues[i]).endObject())); } } @@ -120,24 +117,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } private void setupSimple() { - simpleMovAvgCounts = new Double[numValueBuckets]; + simpleDocCounts = new Double[numValueBuckets]; EvictingQueue window = EvictingQueue.create(windowSize); for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - window.offer(thisValue); - - double movAvg = 0; - for (double value : window) { - movAvg += value; + if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; } - movAvg /= window.size(); - - simpleMovAvgCounts[i] = movAvg; - } - - window.clear(); - simpleMovAvgValueCounts = new Double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { window.offer((double)docCounts[i]); double movAvg = 0; @@ -146,7 +131,34 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } movAvg /= window.size(); - simpleMovAvgValueCounts[i] = movAvg; + simpleDocCounts[i] = movAvg; + } + + window.clear(); + simpleDocValues = new Double[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + if (docCounts[i] == 0) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + window.offer(0.0); + } else { + window.offer((double) docValues[i]); + } + } else { + //if there are docs in this bucket, insert the regular value + window.offer((double) docValues[i]); + } + + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + + simpleDocValues[i] = movAvg; } @@ -154,14 +166,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { private void setupLinear() { EvictingQueue window = EvictingQueue.create(windowSize); - linearMovAvgCounts = new Double[numValueBuckets]; + linearDocCounts = new Double[numValueBuckets]; window.clear(); for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; + if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; } - window.offer(thisValue); + window.offer((double)docCounts[i]); double avg = 0; long totalWeight = 1; @@ -172,15 +183,27 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { totalWeight += current; current += 1; } - linearMovAvgCounts[i] = avg / totalWeight; + linearDocCounts[i] = avg / totalWeight; } window.clear(); - linearMovAvgValueCounts = new Double[numValueBuckets]; + linearDocValues = new Double[numValueBuckets]; for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - window.offer(thisValue); + if (docCounts[i] == 0) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + window.offer(0.0); + } else { + window.offer((double) docValues[i]); + } + } else { + //if there are docs in this bucket, insert the regular value + window.offer((double) docValues[i]); + } double avg = 0; long totalWeight = 1; @@ -191,39 +214,17 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { totalWeight += current; current += 1; } - linearMovAvgValueCounts[i] = avg / totalWeight; + linearDocValues[i] = avg / totalWeight; } } private void setupSingle() { EvictingQueue window = EvictingQueue.create(windowSize); - singleExpMovAvgCounts = new Double[numValueBuckets]; + singleDocCounts = new Double[numValueBuckets]; for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; + if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; } - window.offer(thisValue); - - double avg = 0; - double alpha = 0.5; - boolean first = true; - - for (double value : window) { - if (first) { - avg = value; - first = false; - } else { - avg = (value * alpha) + (avg * (1 - alpha)); - } - } - singleExpMovAvgCounts[i] = avg ; - } - - singleExpMovAvgValueCounts = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { window.offer((double)docCounts[i]); double avg = 0; @@ -238,56 +239,53 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { avg = (value * alpha) + (avg * (1 - alpha)); } } - singleExpMovAvgCounts[i] = avg ; + singleDocCounts[i] = avg ; + } + + singleDocValues = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + if (docCounts[i] == 0) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + window.offer(0.0); + } else { + window.offer((double) docValues[i]); + } + } else { + //if there are docs in this bucket, insert the regular value + window.offer((double) docValues[i]); + } + + double avg = 0; + double alpha = 0.5; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + singleDocValues[i] = avg ; } } private void setupDouble() { EvictingQueue window = EvictingQueue.create(windowSize); - doubleExpMovAvgCounts = new Double[numValueBuckets]; + doubleDocCounts = new Double[numValueBuckets]; for (int i = 0; i < numValueBuckets; i++) { - double thisValue = docCounts[i]; - if (thisValue == -1) { - thisValue = 0; + if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; } - window.offer(thisValue); - - double s = 0; - double last_s = 0; - - // Trend value - double b = 0; - double last_b = 0; - - double alpha = 0.5; - double beta = 0.5; - int counter = 0; - - double last; - for (double value : window) { - last = value; - if (counter == 1) { - s = value; - b = value - last; - } else { - s = alpha * value + (1.0d - alpha) * (last_s + last_b); - b = beta * (s - last_s) + (1 - beta) * last_b; - } - - counter += 1; - last_s = s; - last_b = b; - } - - doubleExpMovAvgCounts[i] = s + (0 * b) ; - } - - doubleExpMovAvgValueCounts = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { window.offer((double)docCounts[i]); double s = 0; @@ -317,7 +315,56 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { last_b = b; } - doubleExpMovAvgValueCounts[i] = s + (0 * b) ; + doubleDocCounts[i] = s + (0 * b) ; + } + + doubleDocValues = new Double[numValueBuckets]; + window.clear(); + + for (int i = 0; i < numValueBuckets; i++) { + if (docCounts[i] == 0) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + continue; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + window.offer(0.0); + } else { + window.offer((double) docValues[i]); + } + } else { + //if there are docs in this bucket, insert the regular value + window.offer((double) docValues[i]); + } + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + double alpha = 0.5; + double beta = 0.5; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + doubleDocValues[i] = s + (0 * b) ; } } @@ -332,8 +379,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) @@ -342,7 +389,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .window(windowSize) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -356,13 +403,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + assertThat(docCountMovAvg.value(), equalTo(simpleDocCounts[i])); SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(simpleMovAvgCounts[i])); + assertThat(valuesMovAvg.value(), equalTo(simpleDocValues[i])); } } @@ -377,8 +424,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new LinearModel.LinearModelBuilder()) .gapPolicy(gapPolicy) @@ -387,7 +434,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .window(windowSize) .modelBuilder(new LinearModel.LinearModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -401,13 +448,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(linearMovAvgCounts[i])); + assertThat(docCountMovAvg.value(), equalTo(linearDocCounts[i])); SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(linearMovAvgCounts[i])); + assertThat(valuesMovAvg.value(), equalTo(linearDocValues[i])); } } @@ -422,8 +469,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) .gapPolicy(gapPolicy) @@ -432,7 +479,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .window(windowSize) .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -446,13 +493,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + assertThat(docCountMovAvg.value(), equalTo(singleDocCounts[i])); SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(singleExpMovAvgCounts[i])); + assertThat(valuesMovAvg.value(), equalTo(singleDocValues[i])); } } @@ -467,8 +514,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) .gapPolicy(gapPolicy) @@ -477,7 +524,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .window(windowSize) .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -491,13 +538,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numValueBuckets; ++i) { Histogram.Bucket bucket = buckets.get(i); checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movingAvg"); + SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + assertThat(docCountMovAvg.value(), equalTo(doubleDocCounts[i])); SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(doubleExpMovAvgCounts[i])); + assertThat(valuesMovAvg.value(), equalTo(doubleDocValues[i])); } } @@ -509,12 +556,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(0) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); fail("MovingAvg should not accept a window that is zero"); @@ -531,13 +578,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { client() .prepareSearch("idx") .addAggregation( - range("histo").field(SINGLE_VALUED_FIELD_NAME).addRange(0,10) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + range("histo").field(SINGLE_VALUED_FIELD_NAME).addRange(0, 10) + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(0) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); fail("MovingAvg should not accept non-histogram as parent"); @@ -554,8 +601,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(-10) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) @@ -578,12 +625,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field("test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new SimpleModel.SimpleModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -603,13 +650,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .predict(0) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); fail("MovingAvg should not accept a prediction size that is zero"); @@ -626,13 +673,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(SINGLE_VALUED_VALUE_FIELD_NAME)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .predict(-10) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); fail("MovingAvg should not accept a prediction size that is negative"); @@ -655,12 +702,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .addAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -671,12 +718,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(numValueBuckets)); - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); double currentValue; for (int i = 1; i < numValueBuckets - 2; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); // Since there are only two values in this test, at the beginning and end, the moving average should // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing @@ -687,7 +734,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } // The last bucket has a real value, so this should always increase the moving avg - currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); } @@ -698,19 +745,19 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testGiantGapWithPredict() { MovAvgModelBuilder model = randomModelBuilder(); - int numPredictions = randomIntBetween(0, 10); + int numPredictions = randomIntBetween(1, 10); SearchResponse response = client() .prepareSearch("idx") .addAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(model) .gapPolicy(gapPolicy) .predict(numPredictions) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ).execute().actionGet(); assertSearchResponse(response); @@ -721,12 +768,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); double currentValue; for (int i = 1; i < numValueBuckets - 2; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); // Since there are only two values in this test, at the beginning and end, the moving average should // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing @@ -737,15 +784,15 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } // The last bucket has a real value, so this should always increase the moving avg - currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); // Now check predictions for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are - // not null, and that we don't have the_sum anymore - assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); - assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } @@ -763,12 +810,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ) ).execute().actionGet(); @@ -789,7 +836,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { double currentValue; double lastValue = 0.0; for (int i = 0; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); lastValue = currentValue; @@ -808,13 +855,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .predict(numPredictions) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ) ).execute().actionGet(); @@ -835,7 +882,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { double currentValue; double lastValue = 0.0; for (int i = 0; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); lastValue = currentValue; @@ -844,9 +891,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { // Now check predictions for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are - // not null, and that we don't have the_sum anymore - assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); - assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } @@ -864,12 +911,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ) ).execute().actionGet(); @@ -888,9 +935,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(buckets.size(), equalTo(numValueBuckets)); double currentValue; - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); for (int i = 1; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); lastValue = currentValue; @@ -909,13 +956,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .predict(numPredictions) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ) ).execute().actionGet(); @@ -934,9 +981,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); double currentValue; - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movingAvg"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); for (int i = 1; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movingAvg"))).value(); + currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); lastValue = currentValue; @@ -945,9 +992,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { // Now check predictions for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are - // not null, and that we don't have the_sum anymore - assertThat((buckets.get(i).getAggregations().get("movingAvg")), notNullValue()); - assertThat((buckets.get(i).getAggregations().get("the_sum")), nullValue()); + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } @@ -962,13 +1009,13 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { // Filter so we are above all values filter("filtered").filter(new RangeFilterBuilder("gap_test").from((interval * (numValueBuckets - 1) + interval))).subAggregation( histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .subAggregation(sum("the_sum").field(GAP_FIELD)) - .subAggregation(movingAvg("movingAvg") + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .predict(numPredictions) - .setBucketsPaths("the_sum")) + .setBucketsPaths("the_metric")) ) ).execute().actionGet(); @@ -1014,5 +1061,20 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { return new SimpleModel.SimpleModelBuilder(); } } + + private ValuesSourceMetricsAggregationBuilder randomMetric(String name, String field) { + int rand = randomIntBetween(0,3); + + switch (rand) { + case 0: + return min(name).field(field); + case 2: + return max(name).field(field); + case 3: + return avg(name).field(field); + default: + return avg(name).field(field); + } + } } From e08e45cee8eeda7e4f8f865048ef25429988521d Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 22 Apr 2015 18:42:47 -0400 Subject: [PATCH 071/236] [DOCS] Add link to movavg page --- docs/reference/search/aggregations/reducer.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc index 5b3bff11c18..75ac8b9a49a 100644 --- a/docs/reference/search/aggregations/reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -1,3 +1,4 @@ [[search-aggregations-reducer]] include::reducer/derivative.asciidoc[] +include::reducer/movavg-reducer.asciidoc[] From 7bd4654bf397267d6298b1ce909f360f3637be59 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 26 Feb 2015 10:57:57 -0500 Subject: [PATCH 072/236] Snapshot/Restore: make handling of expand_wildcards option consistent Closes #6097 --- .../create/CreateSnapshotRequest.java | 6 +- .../restore/RestoreSnapshotRequest.java | 6 +- .../percolate/MultiPercolateRequest.java | 116 +++--------------- .../action/search/MultiSearchRequest.java | 91 ++++---------- .../action/support/IndicesOptions.java | 49 ++++++-- .../xcontent/support/XContentMapValues.java | 19 +++ .../MultiPercolatorRequestTests.java | 54 ++++++-- .../action/percolate/mpercolate1.json | 4 + .../search/MultiSearchRequestTests.java | 43 +++++-- .../action/search/simple-msearch1.json | 6 + .../action/search/simple-msearch4.json | 6 + 11 files changed, 192 insertions(+), 208 deletions(-) create mode 100644 src/test/java/org/elasticsearch/action/search/simple-msearch4.json diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index f999740624d..284e02e3bc1 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -385,10 +385,6 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest) source, IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed))); return this; } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 5e8c3fe5b62..95582ca65bd 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -519,10 +519,6 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) source, IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed))); return this; } diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java index 1e4438ce7bd..d998bba748d 100644 --- a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java +++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java @@ -37,12 +37,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * A multi percolate request that encapsulates multiple {@link PercolateRequest} instances in a single api call. @@ -175,13 +175,7 @@ public class MultiPercolateRequest extends ActionRequest private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException { String globalIndex = indices != null && indices.length > 0 ? indices[0] : null; - Map header = parseToMap(parser); - - IndicesOptions defaultOptions = indicesOptions; - boolean ignoreUnavailable = defaultOptions.ignoreUnavailable(); - boolean allowNoIndices = defaultOptions.allowNoIndices(); - boolean expandWildcardsOpen = defaultOptions.expandWildcardsOpen(); - boolean expandWildcardsClosed = defaultOptions.expandWildcardsClosed(); + Map header = parser.map(); if (header.containsKey("id")) { GetRequest getRequest = new GetRequest(globalIndex); @@ -189,52 +183,27 @@ public class MultiPercolateRequest extends ActionRequest for (Map.Entry entry : header.entrySet()) { Object value = entry.getValue(); if ("id".equals(entry.getKey())) { - getRequest.id((String) value); + getRequest.id(nodeStringValue(value, null)); header.put("id", entry.getValue()); } else if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); } - getRequest.index((String) value); + getRequest.index(nodeStringValue(value, null)); } else if ("type".equals(entry.getKey())) { - getRequest.type((String) value); + getRequest.type(nodeStringValue(value, null)); } else if ("preference".equals(entry.getKey())) { - getRequest.preference((String) value); + getRequest.preference(nodeStringValue(value, null)); } else if ("routing".equals(entry.getKey())) { - getRequest.routing((String) value); + getRequest.routing(nodeStringValue(value, null)); } else if ("percolate_index".equals(entry.getKey()) || "percolate_indices".equals(entry.getKey()) || "percolateIndex".equals(entry.getKey()) || "percolateIndices".equals(entry.getKey())) { - if (value instanceof String[]) { - percolateRequest.indices((String[]) value); - } else { - percolateRequest.indices(Strings.splitStringByCommaToArray((String) value)); - } + percolateRequest.indices(nodeStringArrayValue(value)); } else if ("percolate_type".equals(entry.getKey()) || "percolateType".equals(entry.getKey())) { - percolateRequest.documentType((String) value); + percolateRequest.documentType(nodeStringValue(value, null)); } else if ("percolate_preference".equals(entry.getKey()) || "percolatePreference".equals(entry.getKey())) { - percolateRequest.preference((String) value); + percolateRequest.preference(nodeStringValue(value, null)); } else if ("percolate_routing".equals(entry.getKey()) || "percolateRouting".equals(entry.getKey())) { - percolateRequest.routing((String) value); - } else if ("ignore_unavailable".equals(entry.getKey()) || "ignoreUnavailable".equals(entry.getKey())) { - ignoreUnavailable = Boolean.valueOf((String) value); - } else if ("allow_no_indices".equals(entry.getKey()) || "allowNoIndices".equals(entry.getKey())) { - allowNoIndices = Boolean.valueOf((String) value); - } else if ("expand_wildcards".equals(entry.getKey()) || "expandWildcards".equals(entry.getKey())) { - String[] wildcards; - if (value instanceof String[]) { - wildcards = (String[]) value; - } else { - wildcards = Strings.splitStringByCommaToArray((String) value); - } - - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } + percolateRequest.routing(nodeStringValue(value, null)); } } @@ -258,68 +227,17 @@ public class MultiPercolateRequest extends ActionRequest if (!allowExplicitIndex) { throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); } - if (value instanceof String[]) { - percolateRequest.indices((String[]) value); - } else { - percolateRequest.indices(Strings.splitStringByCommaToArray((String) value)); - } + percolateRequest.indices(nodeStringArrayValue(value)); } else if ("type".equals(entry.getKey())) { - percolateRequest.documentType((String) value); + percolateRequest.documentType(nodeStringValue(value, null)); } else if ("preference".equals(entry.getKey())) { - percolateRequest.preference((String) value); + percolateRequest.preference(nodeStringValue(value, null)); } else if ("routing".equals(entry.getKey())) { - percolateRequest.routing((String) value); - } else if ("ignore_unavailable".equals(entry.getKey()) || "ignoreUnavailable".equals(entry.getKey())) { - ignoreUnavailable = Boolean.valueOf((String) value); - } else if ("allow_no_indices".equals(entry.getKey()) || "allowNoIndices".equals(entry.getKey())) { - allowNoIndices = Boolean.valueOf((String) value); - } else if ("expand_wildcards".equals(entry.getKey()) || "expandWildcards".equals(entry.getKey())) { - String[] wildcards; - if (value instanceof String[]) { - wildcards = (String[]) value; - } else { - wildcards = Strings.splitStringByCommaToArray((String) value); - } - - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } + percolateRequest.routing(nodeStringValue(value, null)); } } } - percolateRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed, defaultOptions)); - } - - private Map parseToMap(XContentParser parser) throws IOException { - Map header = new HashMap<>(); - - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - header.put(currentFieldName, parser.text()); - } else if (token == XContentParser.Token.START_ARRAY) { - header.put(currentFieldName, parseArray(parser)); - } - } - return header; - } - - private String[] parseArray(XContentParser parser) throws IOException { - final List list = new ArrayList<>(); - assert parser.currentToken() == XContentParser.Token.START_ARRAY; - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - list.add(parser.text()); - } - return list.toArray(new String[list.size()]); + percolateRequest.indicesOptions(IndicesOptions.fromMap(header, indicesOptions)); } private int findNextMarker(byte marker, int from, BytesReference data, int length) { diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index ba62cd9f65d..a27c4a590ee 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -40,8 +40,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * A multi search API request. @@ -111,82 +115,35 @@ public class MultiSearchRequest extends ActionRequest implem searchRequest.searchType(searchType); IndicesOptions defaultOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - boolean ignoreUnavailable = defaultOptions.ignoreUnavailable(); - boolean allowNoIndices = defaultOptions.allowNoIndices(); - boolean expandWildcardsOpen = defaultOptions.expandWildcardsOpen(); - boolean expandWildcardsClosed = defaultOptions.expandWildcardsClosed(); + // now parse the action if (nextMarker - from > 0) { try (XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from))) { - // Move to START_OBJECT, if token is null, its an empty data - XContentParser.Token token = parser.nextToken(); - if (token != null) { - assert token == XContentParser.Token.START_OBJECT; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) { - if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed"); - } - searchRequest.indices(Strings.splitStringByCommaToArray(parser.text())); - } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) { - searchRequest.types(Strings.splitStringByCommaToArray(parser.text())); - } else if ("search_type".equals(currentFieldName) || "searchType".equals(currentFieldName)) { - searchRequest.searchType(parser.text()); - } else if ("query_cache".equals(currentFieldName) || "queryCache".equals(currentFieldName)) { - searchRequest.queryCache(parser.booleanValue()); - } else if ("preference".equals(currentFieldName)) { - searchRequest.preference(parser.text()); - } else if ("routing".equals(currentFieldName)) { - searchRequest.routing(parser.text()); - } else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) { - ignoreUnavailable = parser.booleanValue(); - } else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) { - allowNoIndices = parser.booleanValue(); - } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) { - String[] wildcards = Strings.splitStringByCommaToArray(parser.text()); - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) { - if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed"); - } - searchRequest.indices(parseArray(parser)); - } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) { - searchRequest.types(parseArray(parser)); - } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) { - String[] wildcards = parseArray(parser); - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } - } else { - throw new ElasticsearchParseException(currentFieldName + " doesn't support arrays"); - } + Map source = parser.map(); + for (Map.Entry entry : source.entrySet()) { + Object value = entry.getValue(); + if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { + if (!allowExplicitIndex) { + throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); } + searchRequest.indices(nodeStringArrayValue(value)); + } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) { + searchRequest.types(nodeStringArrayValue(value)); + } else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) { + searchRequest.searchType(nodeStringValue(value, null)); + } else if ("query_cache".equals(entry.getKey()) || "queryCache".equals(entry.getKey())) { + searchRequest.queryCache(nodeBooleanValue(value)); + } else if ("preference".equals(entry.getKey())) { + searchRequest.preference(nodeStringValue(value, null)); + } else if ("routing".equals(entry.getKey())) { + searchRequest.routing(nodeStringValue(value, null)); } } + defaultOptions = IndicesOptions.fromMap(source, defaultOptions); } } - searchRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed, defaultOptions)); + searchRequest.indicesOptions(defaultOptions); // move pointers from = nextMarker + 1; diff --git a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 6a66acd54ea..84e458ded21 100644 --- a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -25,6 +25,10 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; /** * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded @@ -136,20 +140,33 @@ public class IndicesOptions { } public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) { - String sWildcards = request.param("expand_wildcards"); - String sIgnoreUnavailable = request.param("ignore_unavailable"); - String sAllowNoIndices = request.param("allow_no_indices"); - if (sWildcards == null && sIgnoreUnavailable == null && sAllowNoIndices == null) { + return fromParameters( + request.param("expand_wildcards"), + request.param("ignore_unavailable"), + request.param("allow_no_indices"), + defaultSettings); + } + + public static IndicesOptions fromMap(Map map, IndicesOptions defaultSettings) { + return fromParameters( + map.containsKey("expand_wildcards") ? map.get("expand_wildcards") : map.get("expandWildcards"), + map.containsKey("ignore_unavailable") ? map.get("ignore_unavailable") : map.get("ignoreUnavailable"), + map.containsKey("allow_no_indices") ? map.get("allow_no_indices") : map.get("allowNoIndices"), + defaultSettings); + } + + public static IndicesOptions fromParameters(Object wildcardsString, Object ignoreUnavailableString, Object allowNoIndicesString, IndicesOptions defaultSettings) { + if (wildcardsString == null && ignoreUnavailableString == null && allowNoIndicesString == null) { return defaultSettings; } boolean expandWildcardsOpen = false; boolean expandWildcardsClosed = false; - if (sWildcards == null) { + if (wildcardsString == null) { expandWildcardsOpen = defaultSettings.expandWildcardsOpen(); expandWildcardsClosed = defaultSettings.expandWildcardsClosed(); } else { - String[] wildcards = Strings.splitStringByCommaToArray(sWildcards); + String[] wildcards = nodeStringArrayValue(wildcardsString); for (String wildcard : wildcards) { if ("open".equals(wildcard)) { expandWildcardsOpen = true; @@ -169,8 +186,8 @@ public class IndicesOptions { //note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return fromOptions( - toBool(sIgnoreUnavailable, defaultSettings.ignoreUnavailable()), - toBool(sAllowNoIndices, defaultSettings.allowNoIndices()), + nodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()), + nodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()), expandWildcardsOpen, expandWildcardsClosed, defaultSettings.allowAliasesToMultipleIndices(), @@ -245,10 +262,16 @@ public class IndicesOptions { return id; } - private static boolean toBool(String sValue, boolean defaultValue) { - if (sValue == null) { - return defaultValue; - } - return !(sValue.equals("false") || sValue.equals("0") || sValue.equals("off")); + @Override + public String toString() { + return "IndicesOptions[" + + "id=" + id + + ", ignore_unavailable=" + ignoreUnavailable() + + ", allow_no_indices=" + allowNoIndices() + + ", expand_wildcards_open=" + expandWildcardsOpen() + + ", expand_wildcards_closed=" + expandWildcardsClosed() + + ", allow_alisases_to_multiple_indices=" + allowAliasesToMultipleIndices() + + ", forbid_closed_indices=" + forbidClosedIndices() + + ']'; } } diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index dbd11e1bc43..3a69f911fa0 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -387,4 +387,23 @@ public class XContentMapValues { throw new ElasticsearchParseException(desc + " should be a hash but was of type: " + node.getClass()); } } + + /** + * Returns an array of string value from a node value. + * + * If the node represents an array the corresponding array of strings is returned. + * Otherwise the node is treated as a comma-separated string. + */ + public static String[] nodeStringArrayValue(Object node) { + if (isArray(node)) { + List list = (List) node; + String[] arr = new String[list.size()]; + for (int i = 0; i < arr.length; i++) { + arr[i] = nodeStringValue(list.get(i), null); + } + return arr; + } else { + return Strings.splitStringByCommaToArray(node.toString()); + } + } } diff --git a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java index 26af18c90d0..50f915bfe67 100644 --- a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java +++ b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java @@ -38,7 +38,7 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json"); MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length); - assertThat(request.requests().size(), equalTo(6)); + assertThat(request.requests().size(), equalTo(8)); PercolateRequest percolateRequest = request.requests().get(0); assertThat(percolateRequest.indices()[0], equalTo("my-index1")); assertThat(percolateRequest.documentType(), equalTo("my-type1")); @@ -61,8 +61,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); percolateRequest = request.requests().get(2); assertThat(percolateRequest.indices()[0], equalTo("my-index4")); @@ -74,8 +74,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(true)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); percolateRequest = request.requests().get(3); assertThat(percolateRequest.indices()[0], equalTo("my-index6")); @@ -114,8 +114,40 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value4").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value4").map())); + + percolateRequest = request.requests().get(6); + assertThat(percolateRequest.indices()[0], equalTo("percolate-index1")); + assertThat(percolateRequest.documentType(), equalTo("other-type")); + assertThat(percolateRequest.routing(), equalTo("percolate-routing-1")); + assertThat(percolateRequest.preference(), equalTo("_local")); + assertThat(percolateRequest.getRequest(), notNullValue()); + assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index9")); + assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); + assertThat(percolateRequest.getRequest().routing(), nullValue()); + assertThat(percolateRequest.getRequest().preference(), nullValue()); + assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); + assertThat(percolateRequest.onlyCount(), equalTo(false)); + assertThat(percolateRequest.source(), notNullValue()); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), nullValue()); + + percolateRequest = request.requests().get(7); + assertThat(percolateRequest.indices()[0], equalTo("my-index10")); + assertThat(percolateRequest.documentType(), equalTo("my-type1")); + assertThat(percolateRequest.routing(), nullValue()); + assertThat(percolateRequest.preference(), nullValue()); + assertThat(percolateRequest.getRequest(), notNullValue()); + assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index10")); + assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); + assertThat(percolateRequest.getRequest().routing(), nullValue()); + assertThat(percolateRequest.getRequest().preference(), nullValue()); + assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, false, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(percolateRequest.onlyCount(), equalTo(false)); + assertThat(percolateRequest.source(), notNullValue()); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), nullValue()); } @Test @@ -147,8 +179,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); percolateRequest = request.requests().get(2); assertThat(percolateRequest.indices()[0], equalTo("my-index1")); @@ -157,8 +189,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); } } diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json index ceb4acae44e..44079390bfc 100644 --- a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json +++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json @@ -10,3 +10,7 @@ {} {"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}} {"doc" : {"field1" : "value4"}} +{"percolate" : {"id" : "3", "index" : "my-index9", "type" : "my-type1", "percolate_index": "percolate-index1", "percolate_type": "other-type", "percolate_preference": "_local", "percolate_routing": "percolate-routing-1"}} +{} +{"percolate" : {"id" : "4", "index" : "my-index10", "type" : "my-type1", "allow_no_indices": false, "expand_wildcards" : ["open"]}} +{} diff --git a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d4d82ede283..ee520760b53 100644 --- a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -35,20 +35,26 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase { public void simpleAdd() throws Exception { byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json"); MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); - assertThat(request.requests().size(), equalTo(5)); + assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); assertThat(request.requests().get(0).types().length, equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); assertThat(request.requests().get(1).types()[0], equalTo("type1")); - assertThat(request.requests().get(2).indices(), nullValue()); - assertThat(request.requests().get(2).types().length, equalTo(0)); - assertThat(request.requests().get(3).indices(), nullValue()); - assertThat(request.requests().get(3).types().length, equalTo(0)); - assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); - assertThat(request.requests().get(4).indices(), nullValue()); - assertThat(request.requests().get(4).types().length, equalTo(0)); + assertThat(request.requests().get(2).indices()[0], equalTo("test")); + assertThat(request.requests().get(2).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(3).indices()[0], equalTo("test")); + assertThat(request.requests().get(3).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(4).indices()[0], equalTo("test")); + assertThat(request.requests().get(4).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(5).indices(), nullValue()); + assertThat(request.requests().get(5).types().length, equalTo(0)); + assertThat(request.requests().get(6).indices(), nullValue()); + assertThat(request.requests().get(6).types().length, equalTo(0)); + assertThat(request.requests().get(6).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); + assertThat(request.requests().get(7).indices(), nullValue()); + assertThat(request.requests().get(7).types().length, equalTo(0)); } @Test @@ -87,4 +93,25 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase { assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); } + + @Test + public void simpleAdd4() throws Exception { + byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json"); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + assertThat(request.requests().size(), equalTo(3)); + assertThat(request.requests().get(0).indices()[0], equalTo("test0")); + assertThat(request.requests().get(0).indices()[1], equalTo("test1")); + assertThat(request.requests().get(0).queryCache(), equalTo(true)); + assertThat(request.requests().get(0).preference(), nullValue()); + assertThat(request.requests().get(1).indices()[0], equalTo("test2")); + assertThat(request.requests().get(1).indices()[1], equalTo("test3")); + assertThat(request.requests().get(1).types()[0], equalTo("type1")); + assertThat(request.requests().get(1).queryCache(), nullValue()); + assertThat(request.requests().get(1).preference(), equalTo("_local")); + assertThat(request.requests().get(2).indices()[0], equalTo("test4")); + assertThat(request.requests().get(2).indices()[1], equalTo("test1")); + assertThat(request.requests().get(2).types()[0], equalTo("type2")); + assertThat(request.requests().get(2).types()[1], equalTo("type1")); + assertThat(request.requests().get(2).routing(), equalTo("123")); + } } diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json index 6d31863fa3c..3d98f375153 100644 --- a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json +++ b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json @@ -2,6 +2,12 @@ {"query" : {"match_all" {}}} {"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]} {"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : false, "expand_wildcards" : ["open"]}} +{"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : true, "allow_no_indices": true, "expand_wildcards" : ["open", "closed"]}} +{"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : true, "allow_no_indices": false, "expand_wildcards" : ["closed"]}} +{"query" : {"match_all" {}}} {} {"query" : {"match_all" {}}} {"search_type" : "dfs_query_then_fetch"} diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch4.json b/src/test/java/org/elasticsearch/action/search/simple-msearch4.json new file mode 100644 index 00000000000..ab6b8206b01 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/search/simple-msearch4.json @@ -0,0 +1,6 @@ +{"index":["test0", "test1"], "query_cache": true} +{"query" : {"match_all" {}}} +{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"query" : {"match_all" {}}} +{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"query" : {"match_all" {}}} From 5705537ecf839d4de2ebcaff8dcbf14676995ac4 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 10 Apr 2015 01:14:48 +0200 Subject: [PATCH 073/236] Added field stats api The field stats api returns field level statistics such as lowest, highest values and number of documents that have at least one value for a field. An api like this can be useful to explore a data set you don't know much about. For example you can figure at with the lowest and highest response times are, so that you can create a histogram or range aggregation with sane settings. This api doesn't run a search to figure this statistics out, but rather use the Lucene index look these statics up (using Terms class in Lucene). So finding out these stats for fields is cheap and quick. The min/max values are based on the type of the field. So for a numeric field min/max are numbers and date field the min/max date and other fields the min/max are term based. Closes #10523 --- docs/reference/search.asciidoc | 2 + docs/reference/search/field-stats.asciidoc | 170 +++++++ rest-api-spec/api/field_stats.json | 46 ++ rest-api-spec/test/field_stats/10_basics.yaml | 52 ++ .../elasticsearch/action/ActionModule.java | 4 + .../action/fieldstats/FieldStats.java | 455 ++++++++++++++++++ .../action/fieldstats/FieldStatsAction.java | 45 ++ .../action/fieldstats/FieldStatsRequest.java | 77 +++ .../fieldstats/FieldStatsRequestBuilder.java | 48 ++ .../action/fieldstats/FieldStatsResponse.java | 87 ++++ .../fieldstats/FieldStatsShardRequest.java | 59 +++ .../fieldstats/FieldStatsShardResponse.java | 71 +++ .../TransportFieldStatsTransportAction.java | 184 +++++++ .../java/org/elasticsearch/client/Client.java | 9 + .../client/support/AbstractClient.java | 19 + .../index/mapper/FieldMapper.java | 9 +- .../mapper/core/AbstractFieldMapper.java | 22 +- .../index/mapper/core/ByteFieldMapper.java | 11 + .../index/mapper/core/DateFieldMapper.java | 11 + .../index/mapper/core/DoubleFieldMapper.java | 21 +- .../index/mapper/core/FloatFieldMapper.java | 25 +- .../index/mapper/core/IntegerFieldMapper.java | 11 + .../index/mapper/core/LongFieldMapper.java | 21 +- .../index/mapper/core/ShortFieldMapper.java | 11 + .../rest/action/RestActionModule.java | 3 + .../fieldstats/RestFieldStatsAction.java | 84 ++++ .../FieldStatsIntegrationTests.java | 214 ++++++++ .../fieldstats/FieldStatsTests.java | 194 ++++++++ 28 files changed, 1937 insertions(+), 28 deletions(-) create mode 100644 docs/reference/search/field-stats.asciidoc create mode 100644 rest-api-spec/api/field_stats.json create mode 100644 rest-api-spec/test/field_stats/10_basics.yaml create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java create mode 100644 src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java create mode 100644 src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java create mode 100644 src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java create mode 100644 src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 51b199c19f7..79d3c7a93fd 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -105,3 +105,5 @@ include::search/percolate.asciidoc[] include::search/more-like-this.asciidoc[] +include::search/field-stats.asciidoc[] + diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc new file mode 100644 index 00000000000..b6cd5db4967 --- /dev/null +++ b/docs/reference/search/field-stats.asciidoc @@ -0,0 +1,170 @@ +[[search-field-stats]] +== Field stats API + +experimental[] + +The field stats api allows one to find statistical properties of a field without executing a search, but +looking up measurements that are natively available in the Lucene index. This can be useful to explore a dataset which +you don't know much about. For example, this allows creating a histogram aggregation with meaningful intervals. + +The field stats api by defaults executes on all indices, but can execute on specific indices too. + +All indices: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating" +-------------------------------------------------- + +Specific indices: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/index1,index2/_field_stats?fields=rating" +-------------------------------------------------- + +Supported request options: +* `fields` - A list of fields to compute stats for. +* `level` - Defines if field stats should be returned on a per index level or on a cluster wide level. Valid values are + `indices` and `cluster`. Defaults to `cluster`. + +==== Field statistics + +The field stats api is supported on string based, number based and date based fields and can return the following statistics per field: + +* `max_doc` - The total number of documents. +* `doc_count` - The number of documents that have at least one term for this field, or -1 if this measurement isn't available on one or more shards. +* `density` - The percentage of documents that have at least one value for this field. This is a derived statistic and is based on the `max_doc` and `doc_count`. +* `sum_doc_freq` - The sum of each term's document frequency in this field, or -1 if this measurement isn't available on one or more shards. + Document frequency is the number of documents containing a particular term. +* `sum_total_term_freq` - The sum of the term frequencies of all terms in this field across all documents, or -1 if this measurement isn't available on one or more shards. + Term frequency is the total number of occurrences of a term in a particular document and field. +* `min_value` - The lowest value in the field represented in a displayable form. +* `max_value` - The highest value in the field represented in a displayable form. + +Note that for all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked +as deleted are are only taken into account when the segments these documents reside on are merged away. + +==== Example + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name" +-------------------------------------------------- + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "indices": { + "_all": { <1> + "fields": { + "creation_date": { + "max_doc": 1326564, + "doc_count": 564633, + "density": 42, + "sum_doc_freq": 2258532, + "sum_total_term_freq": -1, + "min_value": "2008-08-01T16:37:51.513Z", + "max_value": "2013-06-02T03:23:11.593Z" + }, + "display_name": { + "max_doc": 1326564, + "doc_count": 126741, + "density": 9, + "sum_doc_freq": 166535, + "sum_total_term_freq": 166616, + "min_value": "0", + "max_value": "정혜선" + }, + "answer_count": { + "max_doc": 1326564, + "doc_count": 139885, + "density": 10, + "sum_doc_freq": 559540, + "sum_total_term_freq": -1, + "min_value": 0, + "max_value": 160 + }, + "rating": { + "max_doc": 1326564, + "doc_count": 437892, + "density": 33, + "sum_doc_freq": 1751568, + "sum_total_term_freq": -1, + "min_value": -14, + "max_value": 1277 + } + } + } + } +} +-------------------------------------------------- + +<1> The `_all` key indicates that it contains the field stats of all indices in the cluster. + +With level set to `indices`: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name&level=indices" +-------------------------------------------------- + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "indices": { + "stack": { <1> + "fields": { + "creation_date": { + "max_doc": 1326564, + "doc_count": 564633, + "density": 42, + "sum_doc_freq": 2258532, + "sum_total_term_freq": -1, + "min_value": "2008-08-01T16:37:51.513Z", + "max_value": "2013-06-02T03:23:11.593Z" + }, + "display_name": { + "max_doc": 1326564, + "doc_count": 126741, + "density": 9, + "sum_doc_freq": 166535, + "sum_total_term_freq": 166616, + "min_value": "0", + "max_value": "정혜선" + }, + "answer_count": { + "max_doc": 1326564, + "doc_count": 139885, + "density": 10, + "sum_doc_freq": 559540, + "sum_total_term_freq": -1, + "min_value": 0, + "max_value": 160 + }, + "rating": { + "max_doc": 1326564, + "doc_count": 437892, + "density": 33, + "sum_doc_freq": 1751568, + "sum_total_term_freq": -1, + "min_value": -14, + "max_value": 1277 + } + } + } + } +} +-------------------------------------------------- + +<1> The `stack` key means it contains all field stats for the `stack` index. \ No newline at end of file diff --git a/rest-api-spec/api/field_stats.json b/rest-api-spec/api/field_stats.json new file mode 100644 index 00000000000..e3c5e6d45df --- /dev/null +++ b/rest-api-spec/api/field_stats.json @@ -0,0 +1,46 @@ +{ + "field_stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-stats.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_field_stats", + "paths": [ + "/_field_stats", + "/{index}/_field_stats" + ], + "parts": { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + }, + "params": { + "fields": { + "type" : "list", + "description" : "A comma-separated list of fields for to get field statistics for (min value, max value, and more)" + }, + "level": { + "type" : "enum", + "options" : ["indices", "cluster"], + "default" : "cluster", + "description" : "Defines if field stats should be returned on a per index level or on a cluster wide level" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/test/field_stats/10_basics.yaml b/rest-api-spec/test/field_stats/10_basics.yaml new file mode 100644 index 00000000000..61a575603da --- /dev/null +++ b/rest-api-spec/test/field_stats/10_basics.yaml @@ -0,0 +1,52 @@ +--- +"Basic field stats": + - do: + index: + index: test_1 + type: test + id: id_1 + body: { foo: "bar", number: 123 } + + - do: + indices.refresh: {} + + - do: + field_stats: + index: test_1 + fields: [foo, number] + + - match: { indices._all.fields.foo.max_doc: 1 } + - match: { indices._all.fields.foo.doc_count: 1 } + - match: { indices._all.fields.foo.min_value: "bar" } + - match: { indices._all.fields.foo.max_value: "bar" } + - match: { indices._all.fields.number.max_doc: 1 } + - match: { indices._all.fields.number.doc_count: 1 } + - match: { indices._all.fields.number.min_value: 123 } + - match: { indices._all.fields.number.max_value: 123 } + +--- +"Basic field stats with level set to indices": + - do: + index: + index: test_1 + type: test + id: id_1 + body: { foo: "bar", number: 123 } + + - do: + indices.refresh: {} + + - do: + field_stats: + index: test_1 + fields: [foo, number] + level: indices + + - match: { indices.test_1.fields.foo.max_doc: 1 } + - match: { indices.test_1.fields.foo.doc_count: 1 } + - match: { indices.test_1.fields.foo.min_value: "bar" } + - match: { indices.test_1.fields.foo.max_value: "bar" } + - match: { indices.test_1.fields.number.max_doc: 1 } + - match: { indices.test_1.fields.number.doc_count: 1 } + - match: { indices.test_1.fields.number.min_value: 123 } + - match: { indices.test_1.fields.number.max_value: 123 } diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 21df6223a28..49d841567b4 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -134,6 +134,8 @@ import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.TransportExistsAction; import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; +import org.elasticsearch.action.fieldstats.FieldStatsAction; +import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.TransportIndexAction; @@ -312,6 +314,8 @@ public class ActionModule extends AbstractModule { registerAction(GetIndexedScriptAction.INSTANCE, TransportGetIndexedScriptAction.class); registerAction(DeleteIndexedScriptAction.INSTANCE, TransportDeleteIndexedScriptAction.class); + registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class); + // register Name -> GenericAction Map that can be injected to instances. MapBinder actionsBinder = MapBinder.newMapBinder(binder(), String.class, GenericAction.class); diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java new file mode 100644 index 00000000000..ea2d4cca90d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java @@ -0,0 +1,455 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; + +public abstract class FieldStats implements Streamable, ToXContent { + + private byte type; + private long maxDoc; + private long docCount; + private long sumDocFreq; + private long sumTotalTermFreq; + protected T minValue; + protected T maxValue; + + protected FieldStats() { + } + + protected FieldStats(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq) { + this.type = (byte) type; + this.maxDoc = maxDoc; + this.docCount = docCount; + this.sumDocFreq = sumDocFreq; + this.sumTotalTermFreq = sumTotalTermFreq; + } + + byte getType() { + return type; + } + + /** + * @return the total number of documents. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getMaxDoc() { + return maxDoc; + } + + /** + * @return the number of documents that have at least one term for this field, or -1 if this measurement isn't available. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getDocCount() { + return docCount; + } + + /** + * @return The percentage of documents that have at least one value for this field. + * + * This is a derived statistic and is based on: 'doc_count / max_doc' + */ + public int getDensity() { + if (docCount < 0 || maxDoc <= 0) { + return -1; + } + return (int) (docCount * 100 / maxDoc); + } + + /** + * @return the sum of each term's document frequency in this field, or -1 if this measurement isn't available. + * Document frequency is the number of documents containing a particular term. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getSumDocFreq() { + return sumDocFreq; + } + + /** + * @return the sum of the term frequencies of all terms in this field across all documents, or -1 if this measurement + * isn't available. Term frequency is the total number of occurrences of a term in a particular document and field. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getSumTotalTermFreq() { + return sumTotalTermFreq; + } + + /** + * @return the lowest value in the field represented as a string. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public abstract String getMinValue(); + + /** + * @return the highest value in the field represented as a string. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public abstract String getMaxValue(); + + /** + * Merges the provided stats into this stats instance. + */ + public void append(FieldStats stats) { + this.maxDoc += stats.maxDoc; + if (stats.docCount == -1) { + this.docCount = -1; + } else if (this.docCount != -1) { + this.docCount += stats.docCount; + } + if (stats.sumDocFreq == -1) { + this.sumDocFreq = -1; + } else if (this.sumDocFreq != -1) { + this.sumDocFreq += stats.sumDocFreq; + } + if (stats.sumTotalTermFreq == -1) { + this.sumTotalTermFreq = -1; + } else if (this.sumTotalTermFreq != -1) { + this.sumTotalTermFreq += stats.sumTotalTermFreq; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.MAX_DOC, maxDoc); + builder.field(Fields.DOC_COUNT, docCount); + builder.field(Fields.DENSITY, getDensity()); + builder.field(Fields.SUM_DOC_FREQ, sumDocFreq); + builder.field(Fields.SUM_TOTAL_TERM_FREQ, sumTotalTermFreq); + toInnerXContent(builder); + builder.endObject(); + return builder; + } + + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, minValue); + builder.field(Fields.MAX_VALUE, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + maxDoc = in.readVLong(); + docCount = in.readLong(); + sumDocFreq = in.readLong(); + sumTotalTermFreq = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(type); + out.writeVLong(maxDoc); + out.writeLong(docCount); + out.writeLong(sumDocFreq); + out.writeLong(sumTotalTermFreq); + } + + public static class Long extends FieldStats { + + public Long() { + } + + public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + this(0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + protected Long(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.longValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.longValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Long other = (Long) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readLong(); + maxValue = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(minValue); + out.writeLong(maxValue); + } + + } + + public static final class Float extends FieldStats { + + public Float() { + } + + public Float(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, float minValue, float maxValue) { + super(1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.floatValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.floatValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Float other = (Float) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readFloat(); + maxValue = in.readFloat(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeFloat(minValue); + out.writeFloat(maxValue); + } + + } + + public static final class Double extends FieldStats { + + public Double() { + } + + public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, double minValue, double maxValue) { + super(2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.doubleValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.doubleValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Double other = (Double) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readDouble(); + maxValue = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeDouble(minValue); + out.writeDouble(maxValue); + } + + } + + public static final class Text extends FieldStats { + + public Text() { + } + + public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, BytesRef minValue, BytesRef maxValue) { + super(3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return minValue.utf8ToString(); + } + + @Override + public String getMaxValue() { + return maxValue.utf8ToString(); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Text other = (Text) stats; + if (other.minValue.compareTo(minValue) < 0) { + minValue = other.minValue; + } + if (other.maxValue.compareTo(maxValue) > 0) { + maxValue = other.maxValue; + } + } + + @Override + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, getMinValue()); + builder.field(Fields.MAX_VALUE, getMaxValue()); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readBytesRef(); + maxValue = in.readBytesRef(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesRef(minValue); + out.writeBytesRef(maxValue); + } + + } + + public static final class Date extends Long { + + private FormatDateTimeFormatter dateFormatter; + + public Date() { + } + + public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue, FormatDateTimeFormatter dateFormatter) { + super(4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + this.dateFormatter = dateFormatter; + } + + @Override + public String getMinValue() { + return dateFormatter.printer().print(minValue); + } + + @Override + public String getMaxValue() { + return dateFormatter.printer().print(maxValue); + } + + @Override + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, getMinValue()); + builder.field(Fields.MAX_VALUE, getMaxValue()); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + dateFormatter = Joda.forPattern(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(dateFormatter.format()); + } + + } + + public static FieldStats read(StreamInput in) throws IOException { + FieldStats stats; + byte type = in.readByte(); + switch (type) { + case 0: + stats = new Long(); + break; + case 1: + stats = new Float(); + break; + case 2: + stats = new Double(); + break; + case 3: + stats = new Text(); + break; + case 4: + stats = new Date(); + break; + default: + throw new IllegalArgumentException("Illegal type [" + type + "]"); + } + stats.type = type; + stats.readFrom(in); + return stats; + } + + private final static class Fields { + + final static XContentBuilderString MAX_DOC = new XContentBuilderString("max_doc"); + final static XContentBuilderString DOC_COUNT = new XContentBuilderString("doc_count"); + final static XContentBuilderString DENSITY = new XContentBuilderString("density"); + final static XContentBuilderString SUM_DOC_FREQ = new XContentBuilderString("sum_doc_freq"); + final static XContentBuilderString SUM_TOTAL_TERM_FREQ = new XContentBuilderString("sum_total_term_freq"); + final static XContentBuilderString MIN_VALUE = new XContentBuilderString("min_value"); + final static XContentBuilderString MAX_VALUE = new XContentBuilderString("max_value"); + + } + +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java new file mode 100644 index 00000000000..fb4a3f77833 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ClientAction; +import org.elasticsearch.client.Client; + +/** + */ +public class FieldStatsAction extends ClientAction { + + public static final FieldStatsAction INSTANCE = new FieldStatsAction(); + public static final String NAME = "indices:data/read/field_stats"; + + private FieldStatsAction() { + super(NAME); + } + + @Override + public FieldStatsResponse newResponse() { + return new FieldStatsResponse(); + } + + @Override + public FieldStatsRequestBuilder newRequestBuilder(Client client) { + return new FieldStatsRequestBuilder(client); + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java new file mode 100644 index 00000000000..ff61fe88ee9 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + */ +public class FieldStatsRequest extends BroadcastOperationRequest { + + public final static String DEFAULT_LEVEL = "cluster"; + + private String[] fields; + private String level = DEFAULT_LEVEL; + + public String[] fields() { + return fields; + } + + public void fields(String[] fields) { + this.fields = fields; + } + + public String level() { + return level; + } + + public void level(String level) { + this.level = level; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if ("cluster".equals(level) == false && "indices".equals(level) == false) { + validationException = ValidateActions.addValidationError("invalid level option [" + level + "]", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fields = in.readStringArray(); + level = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(fields); + out.writeString(level); + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java new file mode 100644 index 00000000000..4c8d0b6c73b --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.client.Client; + +/** + */ +public class FieldStatsRequestBuilder extends BroadcastOperationRequestBuilder { + + public FieldStatsRequestBuilder(Client client) { + super(client, new FieldStatsRequest()); + } + + public FieldStatsRequestBuilder setFields(String... fields) { + request().fields(fields); + return this; + } + + public FieldStatsRequestBuilder setLevel(String level) { + request().level(level); + return this; + } + + @Override + protected void doExecute(ActionListener listener) { + client.fieldStats(request, listener); + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java new file mode 100644 index 00000000000..e6f69e9791a --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + */ +public class FieldStatsResponse extends BroadcastOperationResponse { + + private Map> indicesMergedFieldStats; + + public FieldStatsResponse() { + } + + public FieldStatsResponse(int totalShards, int successfulShards, int failedShards, List shardFailures, Map> indicesMergedFieldStats) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.indicesMergedFieldStats = indicesMergedFieldStats; + } + + @Nullable + public Map getAllFieldStats() { + return indicesMergedFieldStats.get("_all"); + } + + public Map> getIndicesMergedFieldStats() { + return indicesMergedFieldStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + final int size = in.readVInt(); + indicesMergedFieldStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + int indexSize = in.readVInt(); + Map indexFieldStats = new HashMap<>(indexSize); + indicesMergedFieldStats.put(key, indexFieldStats); + for (int j = 0; j < indexSize; j++) { + key = in.readString(); + FieldStats value = FieldStats.read(in); + indexFieldStats.put(key, value); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(indicesMergedFieldStats.size()); + for (Map.Entry> entry1 : indicesMergedFieldStats.entrySet()) { + out.writeString(entry1.getKey()); + out.writeVInt(entry1.getValue().size()); + for (Map.Entry entry2 : entry1.getValue().entrySet()) { + out.writeString(entry2.getKey()); + entry2.getValue().writeTo(out); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java new file mode 100644 index 00000000000..fb46ff66d3b --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +/** + */ +public class FieldStatsShardRequest extends BroadcastShardOperationRequest { + + private String[] fields; + + public FieldStatsShardRequest() { + } + + public FieldStatsShardRequest(ShardId shardId, FieldStatsRequest request) { + super(shardId, request); + this.fields = request.fields(); + } + + public String[] getFields() { + return fields; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fields = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(fields); + } + +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java new file mode 100644 index 00000000000..ada4552e94c --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + */ +public class FieldStatsShardResponse extends BroadcastShardOperationResponse { + + private Map fieldStats; + + public FieldStatsShardResponse() { + } + + public FieldStatsShardResponse(ShardId shardId, Map fieldStats) { + super(shardId); + this.fieldStats = fieldStats; + } + + public Map getFieldStats() { + return fieldStats; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + final int size = in.readVInt(); + fieldStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + FieldStats value = FieldStats.read(in); + fieldStats.put(key, value); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(fieldStats.size()); + for (Map.Entry entry : fieldStats.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java new file mode 100644 index 00000000000..a726c56e957 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java @@ -0,0 +1,184 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.Terms; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.FieldMappers; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportFieldStatsTransportAction extends TransportBroadcastOperationAction { + + private final IndicesService indicesService; + + @Inject + public TransportFieldStatsTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndicesService indicesService) { + super(settings, FieldStatsAction.NAME, threadPool, clusterService, transportService, actionFilters); + this.indicesService = indicesService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected FieldStatsRequest newRequestInstance() { + return new FieldStatsRequest(); + } + + @Override + protected FieldStatsResponse newResponse(FieldStatsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + int successfulShards = 0; + int failedShards = 0; + Map> indicesMergedFieldStats = new HashMap<>(); + List shardFailures = new ArrayList<>(); + for (int i = 0; i < shardsResponses.length(); i++) { + Object shardValue = shardsResponses.get(i); + if (shardValue == null) { + // simply ignore non active shards + } else if (shardValue instanceof BroadcastShardOperationFailedException) { + failedShards++; + shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardValue)); + } else { + successfulShards++; + FieldStatsShardResponse shardResponse = (FieldStatsShardResponse) shardValue; + + final String indexName; + if ("cluster".equals(request.level())) { + indexName = "_all"; + } else if ("indices".equals(request.level())) { + indexName = shardResponse.getIndex(); + } else { + // should already have been catched by the FieldStatsRequest#validate(...) + throw new ElasticsearchIllegalArgumentException("Illegal level option [" + request.level() + "]"); + } + + Map indexMergedFieldStats = indicesMergedFieldStats.get(indexName); + if (indexMergedFieldStats == null) { + indicesMergedFieldStats.put(indexName, indexMergedFieldStats = new HashMap<>()); + } + + Map fieldStats = shardResponse.getFieldStats(); + for (Map.Entry entry : fieldStats.entrySet()) { + FieldStats existing = indexMergedFieldStats.get(entry.getKey()); + if (existing != null) { + if (existing.getType() != entry.getValue().getType()) { + throw new ElasticsearchIllegalStateException( + "trying to merge the field stats of field [" + entry.getKey() + "] from index [" + shardResponse.getIndex() + "] but the field type is incompatible, try to set the 'level' option to 'indices'" + ); + } + + existing.append(entry.getValue()); + } else { + indexMergedFieldStats.put(entry.getKey(), entry.getValue()); + } + } + } + } + return new FieldStatsResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, indicesMergedFieldStats); + } + + @Override + protected FieldStatsShardRequest newShardRequest() { + return new FieldStatsShardRequest(); + } + + @Override + protected FieldStatsShardRequest newShardRequest(int numShards, ShardRouting shard, FieldStatsRequest request) { + return new FieldStatsShardRequest(shard.shardId(), request); + } + + @Override + protected FieldStatsShardResponse newShardResponse() { + return new FieldStatsShardResponse(); + } + + @Override + protected FieldStatsShardResponse shardOperation(FieldStatsShardRequest request) throws ElasticsearchException { + ShardId shardId = request.shardId(); + Map fieldStats = new HashMap<>(); + IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); + MapperService mapperService = indexServices.mapperService(); + IndexShard shard = indexServices.shardSafe(shardId.id()); + shard.readAllowed(); + try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { + for (String field : request.getFields()) { + FieldMappers fieldMappers = mapperService.fullName(field); + if (fieldMappers != null) { + IndexReader reader = searcher.reader(); + Terms terms = MultiFields.getTerms(reader, field); + if (terms != null) { + fieldStats.put(field, fieldMappers.mapper().stats(terms, reader.maxDoc())); + } + } else { + throw new IllegalArgumentException("field [" + field + "] doesn't exist"); + } + } + } catch (IOException e) { + throw ExceptionsHelper.convertToElastic(e); + } + return new FieldStatsShardResponse(shardId, fieldStats); + } + + @Override + protected GroupShardsIterator shards(ClusterState clusterState, FieldStatsRequest request, String[] concreteIndices) { + return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, null, null); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, FieldStatsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, FieldStatsRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); + } +} diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index 55b8a83428a..7f763e50d35 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -38,6 +38,9 @@ import org.elasticsearch.action.exists.ExistsResponse; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -682,6 +685,12 @@ public interface Client extends ElasticsearchClient, Releasable { */ void clearScroll(ClearScrollRequest request, ActionListener listener); + FieldStatsRequestBuilder prepareFieldStats(); + + ActionFuture fieldStats(FieldStatsRequest request); + + void fieldStats(FieldStatsRequest request, ActionListener listener); + /** * Returns this clients settings */ diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 55f74434b0f..ccfab027fd4 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -44,6 +44,10 @@ import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldstats.FieldStatsAction; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; @@ -550,4 +554,19 @@ public abstract class AbstractClient implements Client { public ClearScrollRequestBuilder prepareClearScroll() { return new ClearScrollRequestBuilder(this); } + + @Override + public void fieldStats(FieldStatsRequest request, ActionListener listener) { + execute(FieldStatsAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture fieldStats(FieldStatsRequest request) { + return execute(FieldStatsAction.INSTANCE, request); + } + + @Override + public FieldStatsRequestBuilder prepareFieldStats() { + return new FieldStatsRequestBuilder(this); + } } diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index a17a2852757..09b6b58137b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -20,14 +20,15 @@ package org.elasticsearch.index.mapper; import com.google.common.base.Strings; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.fielddata.FieldDataType; @@ -35,6 +36,7 @@ import org.elasticsearch.index.mapper.core.AbstractFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; +import java.io.IOException; import java.util.List; /** @@ -295,4 +297,9 @@ public interface FieldMapper extends Mapper { * */ public boolean isGenerated(); + /** + * @return a {@link FieldStats} instance that maps to the type of this field based on the provided {@link Terms} instance. + */ + FieldStats stats(Terms terms, int maxDoc) throws IOException; + } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index a9ab088d285..7d8712ea3db 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -38,10 +38,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.index.Terms; +import org.apache.lucene.queries.TermFilter; +import org.apache.lucene.queries.TermsFilter; +import org.apache.lucene.search.*; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.BytesRefs; @@ -53,16 +58,7 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.FieldMapperListener; -import org.elasticsearch.index.mapper.FieldMappers; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; -import org.elasticsearch.index.mapper.MergeMappingException; -import org.elasticsearch.index.mapper.ObjectMapperListener; -import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -1121,4 +1117,10 @@ public abstract class AbstractFieldMapper implements FieldMapper { return false; } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + return new FieldStats.Text( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), terms.getMin(), terms.getMax() + ); + } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 66d87a77aea..58a419a6e42 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -30,6 +31,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -354,6 +356,15 @@ public class ByteFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomByteNumericField extends CustomNumericField { private final byte number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 8e5c88a9636..3de8b803f41 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -23,6 +23,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -31,6 +32,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -535,6 +537,15 @@ public class DateFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinLong(terms); + long maxValue = NumericUtils.getMaxLong(terms); + return new FieldStats.Date( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter + ); + } + private long parseStringValue(String value) { try { return dateTimeFormatter.parser().parseMillis(value); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 6f6058439bf..95c3546b0a4 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -34,6 +35,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -170,7 +172,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToDouble((BytesRef) value); } - return Double.parseDouble(value.toString()); + return java.lang.Double.parseDouble(value.toString()); } @Override @@ -183,7 +185,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - double iValue = Double.parseDouble(value); + double iValue = java.lang.Double.parseDouble(value); double iSim = fuzziness.asDouble(); return NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, iValue - iSim, @@ -254,13 +256,13 @@ public class DoubleFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = Double.parseDouble(sExternalValue); + value = java.lang.Double.parseDouble(sExternalValue); } } else { value = ((Number) externalValue).doubleValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), Double.toString(value), boost); + context.allEntries().addText(names.fullName(), java.lang.Double.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -360,6 +362,15 @@ public class DoubleFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + double minValue = NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)); + double maxValue = NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)); + return new FieldStats.Double( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomDoubleNumericField extends CustomNumericField { private final double number; @@ -382,7 +393,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return Double.toString(number); + return java.lang.Double.toString(number); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index ab1391e9698..05531e629f8 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -34,6 +35,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -170,7 +172,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToFloat((BytesRef) value); } - return Float.parseFloat(value.toString()); + return java.lang.Float.parseFloat(value.toString()); } @Override @@ -186,14 +188,14 @@ public class FloatFieldMapper extends NumberFieldMapper { return ((Number) value).floatValue(); } if (value instanceof BytesRef) { - return Float.parseFloat(((BytesRef) value).utf8ToString()); + return java.lang.Float.parseFloat(((BytesRef) value).utf8ToString()); } - return Float.parseFloat(value.toString()); + return java.lang.Float.parseFloat(value.toString()); } @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - float iValue = Float.parseFloat(value); + float iValue = java.lang.Float.parseFloat(value); final float iSim = fuzziness.asFloat(); return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, iValue - iSim, @@ -260,13 +262,13 @@ public class FloatFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = Float.parseFloat(sExternalValue); + value = java.lang.Float.parseFloat(sExternalValue); } } else { value = ((Number) externalValue).floatValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), Float.toString(value), boost); + context.allEntries().addText(names.fullName(), java.lang.Float.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -367,6 +369,15 @@ public class FloatFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + float minValue = NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)); + float maxValue = NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)); + return new FieldStats.Float( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomFloatNumericField extends CustomNumericField { private final float number; @@ -389,7 +400,7 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return Float.toString(number); + return java.lang.Float.toString(number); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index eec2d84d0b9..f670a863dea 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -31,6 +32,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -357,6 +359,15 @@ public class IntegerFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomIntegerNumericField extends CustomNumericField { private final int number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index c10fdf79af6..d12e0ab0733 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -31,6 +32,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -165,7 +167,7 @@ public class LongFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToLong((BytesRef) value); } - return Long.parseLong(value.toString()); + return java.lang.Long.parseLong(value.toString()); } @Override @@ -177,7 +179,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - long iValue = Long.parseLong(value); + long iValue = java.lang.Long.parseLong(value); final long iSim = fuzziness.asLong(); return NumericRangeQuery.newLongRange(names.indexName(), precisionStep, iValue - iSim, @@ -244,13 +246,13 @@ public class LongFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = Long.parseLong(sExternalValue); + value = java.lang.Long.parseLong(sExternalValue); } } else { value = ((Number) externalValue).longValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), Long.toString(value), boost); + context.allEntries().addText(names.fullName(), java.lang.Long.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -338,6 +340,15 @@ public class LongFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinLong(terms); + long maxValue = NumericUtils.getMaxLong(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomLongNumericField extends CustomNumericField { private final long number; @@ -360,7 +371,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return Long.toString(number); + return java.lang.Long.toString(number); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index b16518769d1..bcacc56773a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; @@ -31,6 +32,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -355,6 +357,15 @@ public class ShortFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomShortNumericField extends CustomNumericField { private final short number; diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index 9bc0d989058..b8724b8a75b 100644 --- a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -84,6 +84,7 @@ import org.elasticsearch.rest.action.cat.*; import org.elasticsearch.rest.action.delete.RestDeleteAction; import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction; import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; import org.elasticsearch.rest.action.get.RestGetAction; import org.elasticsearch.rest.action.get.RestGetSourceAction; import org.elasticsearch.rest.action.get.RestHeadAction; @@ -229,6 +230,8 @@ public class RestActionModule extends AbstractModule { bind(RestDeleteIndexedScriptAction.class).asEagerSingleton(); + bind(RestFieldStatsAction.class).asEagerSingleton(); + // cat API Multibinder catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class); catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java new file mode 100644 index 00000000000..fd45c5a56d4 --- /dev/null +++ b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.fieldstats; + +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestBuilderListener; + +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader; + +/** + */ +public class RestFieldStatsAction extends BaseRestHandler { + + @Inject + public RestFieldStatsAction(Settings settings, RestController controller, Client client) { + super(settings, controller, client); + controller.registerHandler(GET, "/_field_stats", this); + controller.registerHandler(POST, "/_field_stats", this); + controller.registerHandler(GET, "/{index}/_field_stats", this); + controller.registerHandler(POST, "/{index}/_field_stats", this); + } + + @Override + public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + final FieldStatsRequest fieldStatsRequest = new FieldStatsRequest(); + fieldStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); + fieldStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, fieldStatsRequest.indicesOptions())); + fieldStatsRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); + fieldStatsRequest.level(request.param("level", FieldStatsRequest.DEFAULT_LEVEL)); + fieldStatsRequest.listenerThreaded(false); + + client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(FieldStatsResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + buildBroadcastShardsHeader(builder, response); + + builder.startObject("indices"); + for (Map.Entry> entry1 : response.getIndicesMergedFieldStats().entrySet()) { + builder.startObject(entry1.getKey()); + builder.startObject("fields"); + for (Map.Entry entry2 : entry1.getValue().entrySet()) { + builder.field(entry2.getKey()); + entry2.getValue().toXContent(builder, request); + } + builder.endObject(); + builder.endObject(); + } + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java new file mode 100644 index 00000000000..e31b60a2380 --- /dev/null +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.fieldstats; + +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.test.ElasticsearchIntegrationTest; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** + */ +public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { + + public void testRandom() throws Exception { + assertAcked(prepareCreate("test").addMapping( + "test", "string", "type=string", "date", "type=date", "double", "type=double", "double", "type=double", + "float", "type=float", "long", "type=long", "integer", "type=integer", "short", "type=short", "byte", "type=byte" + )); + + byte minByte = Byte.MAX_VALUE; + byte maxByte = Byte.MIN_VALUE; + short minShort = Short.MAX_VALUE; + short maxShort = Short.MIN_VALUE; + int minInt = Integer.MAX_VALUE; + int maxInt = Integer.MIN_VALUE; + long minLong = Long.MAX_VALUE; + long maxLong = Long.MIN_VALUE; + float minFloat = Float.MAX_VALUE; + float maxFloat = Float.MIN_VALUE; + double minDouble = Double.MAX_VALUE; + double maxDouble = Double.MIN_VALUE; + String minString = new String(Character.toChars(1114111)); + String maxString = "0"; + + int numDocs = scaledRandomIntBetween(128, 1024); + List request = new ArrayList<>(numDocs); + for (int doc = 0; doc < numDocs; doc++) { + byte b = randomByte(); + minByte = (byte) Math.min(minByte, b); + maxByte = (byte) Math.max(maxByte, b); + short s = randomShort(); + minShort = (short) Math.min(minShort, s); + maxShort = (short) Math.max(maxShort, s); + int i = randomInt(); + minInt = Math.min(minInt, i); + maxInt = Math.max(maxInt, i); + long l = randomLong(); + minLong = Math.min(minLong, l); + maxLong = Math.max(maxLong, l); + float f = randomFloat(); + minFloat = Math.min(minFloat, f); + maxFloat = Math.max(maxFloat, f); + double d = randomDouble(); + minDouble = Math.min(minDouble, d); + maxDouble = Math.max(maxDouble, d); + String str = randomRealisticUnicodeOfLength(3); + if (str.compareTo(minString) < 0) { + minString = str; + } + if (str.compareTo(maxString) > 0) { + maxString = str; + } + + request.add(client().prepareIndex("test", "test", Integer.toString(doc)) + .setSource("byte", b, "short", s, "integer", i, "long", l, "float", f, "double", d, "string", str) + ); + } + indexRandom(true, false, request); + + FieldStatsResponse response = client().prepareFieldStats().setFields("byte", "short", "integer", "long", "float", "double", "string").get(); + assertAllSuccessful(response); + + for (FieldStats stats : response.getAllFieldStats().values()) { + assertThat(stats.getMaxDoc(), equalTo((long) numDocs)); + assertThat(stats.getDocCount(), equalTo((long) numDocs)); + assertThat(stats.getDensity(), equalTo(100)); + } + + assertThat(response.getAllFieldStats().get("byte").getMinValue(), equalTo(Byte.toString(minByte))); + assertThat(response.getAllFieldStats().get("byte").getMaxValue(), equalTo(Byte.toString(maxByte))); + assertThat(response.getAllFieldStats().get("short").getMinValue(), equalTo(Short.toString(minShort))); + assertThat(response.getAllFieldStats().get("short").getMaxValue(), equalTo(Short.toString(maxShort))); + assertThat(response.getAllFieldStats().get("integer").getMinValue(), equalTo(Integer.toString(minInt))); + assertThat(response.getAllFieldStats().get("integer").getMaxValue(), equalTo(Integer.toString(maxInt))); + assertThat(response.getAllFieldStats().get("long").getMinValue(), equalTo(Long.toString(minLong))); + assertThat(response.getAllFieldStats().get("long").getMaxValue(), equalTo(Long.toString(maxLong))); + assertThat(response.getAllFieldStats().get("float").getMinValue(), equalTo(Float.toString(minFloat))); + assertThat(response.getAllFieldStats().get("float").getMaxValue(), equalTo(Float.toString(maxFloat))); + assertThat(response.getAllFieldStats().get("double").getMinValue(), equalTo(Double.toString(minDouble))); + assertThat(response.getAllFieldStats().get("double").getMaxValue(), equalTo(Double.toString(maxDouble))); + } + + public void testFieldStatsIndexLevel() throws Exception { + assertAcked(prepareCreate("test1").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test2").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test3").addMapping( + "test", "value", "type=long" + )); + + indexRange("test1", -10, 100); + indexRange("test2", 101, 200); + indexRange("test3", 201, 300); + + // default: + FieldStatsResponse response = client().prepareFieldStats().setFields("value").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300))); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1)); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Level: cluster + response = client().prepareFieldStats().setFields("value").setLevel("cluster").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300))); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1)); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Level: indices + response = client().prepareFieldStats().setFields("value").setLevel("indices").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats(), nullValue()); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(3)); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(100))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(Long.toString(101))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(Long.toString(200))); + assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(Long.toString(201))); + assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Illegal level option: + try { + client().prepareFieldStats().setFields("value").setLevel("illegal").get(); + fail(); + } catch (ActionRequestValidationException e) { + assertThat(e.getMessage(), equalTo("Validation Failed: 1: invalid level option [illegal];")); + } + } + + public void testIncompatibleFieldTypes() { + assertAcked(prepareCreate("test1").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test2").addMapping( + "test", "value", "type=string" + )); + + client().prepareIndex("test1", "test").setSource("value", 1l).get(); + client().prepareIndex("test1", "test").setSource("value", 2l).get(); + client().prepareIndex("test2", "test").setSource("value", "a").get(); + client().prepareIndex("test2", "test").setSource("value", "b").get(); + refresh(); + + try { + client().prepareFieldStats().setFields("value").get(); + fail(); + } catch (ElasticsearchIllegalStateException e){ + assertThat(e.getMessage(), containsString("trying to merge the field stats of field [value]")); + } + + FieldStatsResponse response = client().prepareFieldStats().setFields("value").setLevel("indices").get(); + assertAllSuccessful(response); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2)); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(1))); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(2))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo("a")); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo("b")); + } + + private void indexRange(String index, long from, long to) throws ExecutionException, InterruptedException { + List requests = new ArrayList<>(); + for (long value = from; value <= to; value++) { + requests.add(client().prepareIndex(index, "test").setSource("value", value)); + } + indexRandom(true, false, requests); + } + +} diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java new file mode 100644 index 00000000000..cd5ececc18a --- /dev/null +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.fieldstats; + +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchSingleNodeTest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.*; + +/** + */ +public class FieldStatsTests extends ElasticsearchSingleNodeTest { + + public void testByte() { + testNumberRange("field1", "byte", 12, 18); + testNumberRange("field1", "byte", -5, 5); + testNumberRange("field1", "byte", -18, -12); + } + + public void testShort() { + testNumberRange("field1", "short", 256, 266); + testNumberRange("field1", "short", -5, 5); + testNumberRange("field1", "short", -266, -256); + } + + public void testInteger() { + testNumberRange("field1", "integer", 56880, 56890); + testNumberRange("field1", "integer", -5, 5); + testNumberRange("field1", "integer", -56890, -56880); + } + + public void testLong() { + testNumberRange("field1", "long", 312321312312412l, 312321312312422l); + testNumberRange("field1", "long", -5, 5); + testNumberRange("field1", "long", -312321312312422l, -312321312312412l); + } + + public void testString() { + createIndex("test", ImmutableSettings.EMPTY, "field", "value", "type=string"); + for (int value = 0; value <= 10; value++) { + client().prepareIndex("test", "test").setSource("field", String.format(Locale.ENGLISH, "%03d", value)).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields("field").get(); + assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get("field").getMinValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 0))); + assertThat(result.getAllFieldStats().get("field").getMaxValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 10))); + } + + public void testDouble() { + String fieldName = "field"; + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=double"); + for (double value = -1; value <= 9; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Double.toString(-1))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Double.toString(9))); + } + + public void testFloat() { + String fieldName = "field"; + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=float"); + for (float value = -1; value <= 9; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Float.toString(-1))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Float.toString(9))); + } + + private void testNumberRange(String fieldName, String fieldType, long min, long max) { + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=" + fieldType); + for (long value = min; value <= max; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + long numDocs = max - min + 1; + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(numDocs)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(numDocs)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(java.lang.Long.toString(min))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(java.lang.Long.toString(max))); + client().admin().indices().prepareDelete("test").get(); + } + + public void testMerge() { + List stats = new ArrayList<>(); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + + FieldStats stat = new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(4l)); + assertThat(stat.getSumDocFreq(), equalTo(4l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(4l)); + } + + public void testMerge_notAvailable() { + List stats = new ArrayList<>(); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + + FieldStats stat = new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(-1l)); + assertThat(stat.getSumDocFreq(), equalTo(-1l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(-1l)); + + stats.add(new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l)); + stat = stats.remove(0); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(-1l)); + assertThat(stat.getSumDocFreq(), equalTo(-1l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(-1l)); + } + + public void testInvalidField() { + createIndex("test1", ImmutableSettings.EMPTY, "field1", "value", "type=string"); + client().prepareIndex("test1", "test").setSource("field1", "a").get(); + client().prepareIndex("test1", "test").setSource("field1", "b").get(); + + createIndex("test2", ImmutableSettings.EMPTY, "field2", "value", "type=string"); + client().prepareIndex("test2", "test").setSource("field2", "a").get(); + client().prepareIndex("test2", "test").setSource("field2", "b").get(); + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields("field1", "field2").get(); + assertThat(result.getFailedShards(), equalTo(2)); + assertThat(result.getTotalShards(), equalTo(2)); + assertThat(result.getSuccessfulShards(), equalTo(0)); + assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist"))); + assertThat(result.getIndicesMergedFieldStats().size(), equalTo(0)); + + // will only succeed on the 'test2' shard, because there the field does exist + result = client().prepareFieldStats().setFields("field1").get(); + assertThat(result.getFailedShards(), equalTo(1)); + assertThat(result.getTotalShards(), equalTo(2)); + assertThat(result.getSuccessfulShards(), equalTo(1)); + assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist"))); + assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMinValue(), equalTo("a")); + assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMaxValue(), equalTo("b")); + } + +} \ No newline at end of file From 6a2f9c26820a962aa025111bcbba59c11c19b63b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 23 Apr 2015 09:57:31 +0200 Subject: [PATCH 074/236] docs: fixed title out of sequence --- docs/reference/search/field-stats.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc index b6cd5db4967..ab6076ebdce 100644 --- a/docs/reference/search/field-stats.asciidoc +++ b/docs/reference/search/field-stats.asciidoc @@ -28,7 +28,7 @@ Supported request options: * `level` - Defines if field stats should be returned on a per index level or on a cluster wide level. Valid values are `indices` and `cluster`. Defaults to `cluster`. -==== Field statistics +=== Field statistics The field stats api is supported on string based, number based and date based fields and can return the following statistics per field: @@ -45,7 +45,7 @@ The field stats api is supported on string based, number based and date based fi Note that for all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked as deleted are are only taken into account when the segments these documents reside on are merged away. -==== Example +=== Example [source,js] -------------------------------------------------- From 38cb747c694b87452550a85a42bb675ccf0d8637 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 23 Apr 2015 10:04:24 +0200 Subject: [PATCH 075/236] field stats: removed redundant package prefixes --- .../index/mapper/core/DoubleFieldMapper.java | 12 ++++++------ .../index/mapper/core/FloatFieldMapper.java | 14 +++++++------- .../index/mapper/core/LongFieldMapper.java | 10 +++++----- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 95c3546b0a4..5ea9288c05a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -172,7 +172,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToDouble((BytesRef) value); } - return java.lang.Double.parseDouble(value.toString()); + return Double.parseDouble(value.toString()); } @Override @@ -185,7 +185,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - double iValue = java.lang.Double.parseDouble(value); + double iValue = Double.parseDouble(value); double iSim = fuzziness.asDouble(); return NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, iValue - iSim, @@ -256,13 +256,13 @@ public class DoubleFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = java.lang.Double.parseDouble(sExternalValue); + value = Double.parseDouble(sExternalValue); } } else { value = ((Number) externalValue).doubleValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), java.lang.Double.toString(value), boost); + context.allEntries().addText(names.fullName(), Double.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -393,7 +393,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return java.lang.Double.toString(number); + return Double.toString(number); } } @@ -401,7 +401,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { public static final FieldType TYPE = new FieldType(); static { - TYPE.setDocValuesType(DocValuesType.BINARY); + TYPE.setDocValuesType(DocValuesType.BINARY); TYPE.freeze(); } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 05531e629f8..95dcddb52ec 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -172,7 +172,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToFloat((BytesRef) value); } - return java.lang.Float.parseFloat(value.toString()); + return Float.parseFloat(value.toString()); } @Override @@ -188,14 +188,14 @@ public class FloatFieldMapper extends NumberFieldMapper { return ((Number) value).floatValue(); } if (value instanceof BytesRef) { - return java.lang.Float.parseFloat(((BytesRef) value).utf8ToString()); + return Float.parseFloat(((BytesRef) value).utf8ToString()); } - return java.lang.Float.parseFloat(value.toString()); + return Float.parseFloat(value.toString()); } @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - float iValue = java.lang.Float.parseFloat(value); + float iValue = Float.parseFloat(value); final float iSim = fuzziness.asFloat(); return NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, iValue - iSim, @@ -262,13 +262,13 @@ public class FloatFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = java.lang.Float.parseFloat(sExternalValue); + value = Float.parseFloat(sExternalValue); } } else { value = ((Number) externalValue).floatValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), java.lang.Float.toString(value), boost); + context.allEntries().addText(names.fullName(), Float.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -400,7 +400,7 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return java.lang.Float.toString(number); + return Float.toString(number); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index d12e0ab0733..7a3f90b7d1f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -167,7 +167,7 @@ public class LongFieldMapper extends NumberFieldMapper { if (value instanceof BytesRef) { return Numbers.bytesToLong((BytesRef) value); } - return java.lang.Long.parseLong(value.toString()); + return Long.parseLong(value.toString()); } @Override @@ -179,7 +179,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public Query fuzzyQuery(String value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { - long iValue = java.lang.Long.parseLong(value); + long iValue = Long.parseLong(value); final long iSim = fuzziness.asLong(); return NumericRangeQuery.newLongRange(names.indexName(), precisionStep, iValue - iSim, @@ -246,13 +246,13 @@ public class LongFieldMapper extends NumberFieldMapper { } value = nullValue; } else { - value = java.lang.Long.parseLong(sExternalValue); + value = Long.parseLong(sExternalValue); } } else { value = ((Number) externalValue).longValue(); } if (context.includeInAll(includeInAll, this)) { - context.allEntries().addText(names.fullName(), java.lang.Long.toString(value), boost); + context.allEntries().addText(names.fullName(), Long.toString(value), boost); } } else { XContentParser parser = context.parser(); @@ -371,7 +371,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public String numericAsString() { - return java.lang.Long.toString(number); + return Long.toString(number); } } } From 1a1ddceb47f7842241de43cf668112da67f07ea7 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 23 Apr 2015 09:42:05 +0100 Subject: [PATCH 076/236] Muted failing MovAvgTests --- .../reducers/moving/avg/MovAvgTests.java | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index d6fd7750346..bab47aadb80 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.search.aggregations.reducers.moving.avg; import com.google.common.collect.EvictingQueue; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -32,7 +34,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram. import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.SimpleValue; -import org.elasticsearch.search.aggregations.reducers.movavg.models.*; +import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -40,13 +46,22 @@ import java.util.ArrayList; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.*; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; +import static org.elasticsearch.search.aggregations.AggregationBuilders.min; +import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; @ElasticsearchIntegrationTest.SuiteScopeTest +@AwaitsFix(bugUrl = "Gap test logic seems to fail a lot of the time on CI build") public class MovAvgTests extends ElasticsearchIntegrationTest { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; From dbeb4aaacf5e50585b3c4786807d48482cf0522d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 23 Apr 2015 10:50:01 +0200 Subject: [PATCH 077/236] docs: make sure that the options are rendered correctly --- docs/reference/search/field-stats.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc index ab6076ebdce..65dc2c2a6bf 100644 --- a/docs/reference/search/field-stats.asciidoc +++ b/docs/reference/search/field-stats.asciidoc @@ -24,6 +24,7 @@ curl -XGET "http://localhost:9200/index1,index2/_field_stats?fields=rating" -------------------------------------------------- Supported request options: + * `fields` - A list of fields to compute stats for. * `level` - Defines if field stats should be returned on a per index level or on a cluster wide level. Valid values are `indices` and `cluster`. Defaults to `cluster`. From c6cdf7781b27e1340770cf57201370592bd3f43e Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 21 Apr 2015 15:30:35 +0200 Subject: [PATCH 078/236] Mappings: simplify dynamic mappings updates. While dynamic mappings updates are using the same code path as updates from the API when applied on a data node since #10593, they were still using a different code path on the master node. This commit makes dynamic updates processed the same way as updates from the API, which also seems to do a better way at acknowledgements (I could not reproduce the ConcurrentDynamicTemplateTests failure anymore). It also adds more checks, like for instance that indexing on replicas should not trigger dynamic mapping updates since they should have been handled on the primary before. Close #10720 --- .../action/bulk/TransportShardBulkAction.java | 87 +++-- .../action/index/TransportIndexAction.java | 90 +++-- ...nsportShardReplicationOperationAction.java | 2 +- .../action/index/MappingUpdatedAction.java | 364 +++--------------- .../metadata/MetaDataMappingService.java | 41 -- .../index/gateway/IndexShardGateway.java | 2 +- .../termvectors/ShardTermVectorsService.java | 30 +- .../recovery/RecoverySourceHandler.java | 2 +- .../java/org/elasticsearch/node/Node.java | 3 +- .../percolator/PercolatorService.java | 2 +- .../ConcurrentDynamicTemplateTests.java | 1 - 11 files changed, 201 insertions(+), 423 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 245d7d16033..9e354e82836 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; @@ -352,23 +353,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } - private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable { - // HACK: Rivers seem to have something specific that triggers potential - // deadlocks when doing concurrent indexing. So for now they keep the - // old behaviour of updating mappings locally first and then - // asynchronously notifying the master - // this can go away when rivers are removed - final String indexName = indexService.index().name(); - final String indexUUID = indexService.indexUUID(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update); - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - } - } - private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState, IndexShard indexShard, IndexService indexService, boolean processed) throws Throwable { @@ -392,20 +376,54 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation Engine.IndexingOperation op; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, indexRequest.type(), index.parsedDoc().dynamicMappingsUpdate()); + Mapping update = index.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); + indexShard.index(index); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); + indexShard.index(index); + } + } else { + indexShard.index(index); } - indexShard.index(index); version = index.version(); op = index; created = index.created(); } else { Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, indexRequest.type(), create.parsedDoc().dynamicMappingsUpdate()); + Mapping update = create.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); + indexShard.create(create); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); + indexShard.create(create); + } + } else { + indexShard.create(create); } - indexShard.create(create); version = create.version(); op = create; created = true; @@ -528,8 +546,9 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws Exception { + IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); final BulkShardRequest request = shardRequest.request; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; @@ -544,11 +563,29 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation if (indexRequest.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); + if (index.parsedDoc().dynamicMappingsUpdate() != null) { + if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { + // mappings updates on the _river are not validated synchronously so we can't + // assume they are here when indexing on a replica + indexService.mapperService().merge(indexRequest.type(), new CompressedString(index.parsedDoc().dynamicMappingsUpdate().toBytes()), true); + } else { + throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + index.parsedDoc().dynamicMappingsUpdate() + "]"); + } + } indexShard.index(index); } else { Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); + if (create.parsedDoc().dynamicMappingsUpdate() != null) { + if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { + // mappings updates on the _river are not validated synchronously so we can't + // assume they are here when indexing on a replica + indexService.mapperService().merge(indexRequest.type(), new CompressedString(create.parsedDoc().dynamicMappingsUpdate().toBytes()), true); + } else { + throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + create.parsedDoc().dynamicMappingsUpdate() + "]"); + } + } indexShard.create(create); } } catch (Throwable e) { diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 79ea496c317..494f70708cb 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; @@ -42,6 +43,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; @@ -51,6 +53,8 @@ import org.elasticsearch.river.RiverIndexName; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Performs the index operation. *

    @@ -167,23 +171,6 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi .indexShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing()); } - private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable { - // HACK: Rivers seem to have something specific that triggers potential - // deadlocks when doing concurrent indexing. So for now they keep the - // old behaviour of updating mappings locally first and then - // asynchronously notifying the master - // this can go away when rivers are removed - final String indexName = indexService.index().name(); - final String indexUUID = indexService.indexUUID(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update); - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - } - } - @Override protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { final IndexRequest request = shardRequest.request; @@ -206,19 +193,53 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi if (request.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, request.type(), index.parsedDoc().dynamicMappingsUpdate()); + Mapping update = index.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); + indexShard.index(index); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); + indexShard.index(index); + } + } else { + indexShard.index(index); } - indexShard.index(index); version = index.version(); created = index.created(); } else { Engine.Create create = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, request.type(), create.parsedDoc().dynamicMappingsUpdate()); + Mapping update = create.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); + indexShard.create(create); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); + indexShard.create(create); + } + } else { + indexShard.create(create); } - indexShard.create(create); version = create.version(); created = true; } @@ -239,17 +260,36 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); IndexRequest request = shardRequest.request; SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); if (request.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); + if (index.parsedDoc().dynamicMappingsUpdate() != null) { + if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { + // mappings updates on the _river are not validated synchronously so we can't + // assume they are here when indexing on a replica + indexService.mapperService().merge(request.type(), new CompressedString(index.parsedDoc().dynamicMappingsUpdate().toBytes()), true); + } else { + throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + index.parsedDoc().dynamicMappingsUpdate() + "]"); + } + } indexShard.index(index); } else { Engine.Create create = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); + if (create.parsedDoc().dynamicMappingsUpdate() != null) { + if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { + // mappings updates on the _river are not validated synchronously so we can't + // assume they are here when indexing on a replica + indexService.mapperService().merge(request.type(), new CompressedString(create.parsedDoc().dynamicMappingsUpdate().toBytes()), true); + } else { + throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + create.parsedDoc().dynamicMappingsUpdate() + "]"); + } + } indexShard.create(create); } if (request.refresh()) { diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index df99d045177..c5a0fc95efe 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -117,7 +117,7 @@ public abstract class TransportShardReplicationOperationAction shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; - protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest); + protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws Exception; protected abstract ShardIterator shards(ClusterState clusterState, InternalRequest request) throws ElasticsearchException; diff --git a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 6c5e92b3799..44727699354 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,61 +19,31 @@ package org.elasticsearch.cluster.action.index; -import com.google.common.collect.ImmutableMap; - -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.MasterNodeOperationRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaDataMappingService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.compress.CompressedString; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). */ -public class MappingUpdatedAction extends TransportMasterNodeOperationAction { +public class MappingUpdatedAction extends AbstractComponent { public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; - public static final String ACTION_NAME = "internal:cluster/mapping_updated"; - - private final MetaDataMappingService metaDataMappingService; - - private volatile MasterMappingUpdater masterMappingUpdater; + private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; class ApplySettings implements NodeSettingsService.Listener { @@ -89,44 +59,58 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationActionof())); - final CompressedString mappingSource = new CompressedString(builder.endObject().bytes()); - masterMappingUpdater.add(new MappingChange(index, indexUUID, type, mappingSource, listener)); - } catch (IOException bogus) { - throw new AssertionError("Cannot happen", bogus); + return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) + .setMasterNodeTimeout(timeout).setTimeout(timeout); + } + + public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) { + final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout); + if (listener == null) { + request.execute(); + } else { + final ActionListener actionListener = new ActionListener() { + @Override + public void onResponse(PutMappingResponse response) { + if (response.isAcknowledged()) { + listener.onMappingUpdate(); + } else { + listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]")); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }; + request.execute(actionListener); } } + public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); + } + /** * Same as {@link #updateMappingOnMasterSynchronously(String, String, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMasterSynchronously(String index, String indexUUID, String type, Mapping mappingUpdate) throws Throwable { - updateMappingOnMasterSynchronously(index, indexUUID, type, mappingUpdate, dynamicMappingUpdateTimeout); + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } /** @@ -134,179 +118,9 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction listener) throws ElasticsearchException { - metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), request.nodeId, new ActionListener() { - @Override - public void onResponse(ClusterStateUpdateResponse response) { - listener.onResponse(new MappingUpdatedResponse()); - } - - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] update-mapping [{}] failed to dynamically update the mapping in cluster_state from shard", t, request.index(), request.type()); - listener.onFailure(t); - } - }); - } - - public static class MappingUpdatedResponse extends ActionResponse { - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } - } - - public static class MappingUpdatedRequest extends MasterNodeOperationRequest implements IndicesRequest { - - private String index; - private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; - private String type; - private CompressedString mappingSource; - private String nodeId = null; // null means not set - - MappingUpdatedRequest() { - } - - public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource, String nodeId) { - this.index = index; - this.indexUUID = indexUUID; - this.type = type; - this.mappingSource = mappingSource; - this.nodeId = nodeId; - } - - public String index() { - return index; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public String[] indices() { - return new String[]{index}; - } - - public String indexUUID() { - return indexUUID; - } - - public String type() { - return type; - } - - public CompressedString mappingSource() { - return mappingSource; - } - - /** - * Returns null for not set. - */ - public String nodeId() { - return this.nodeId; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - type = in.readString(); - mappingSource = CompressedString.readCompressedString(in); - indexUUID = in.readString(); - nodeId = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - out.writeString(type); - mappingSource.writeTo(out); - out.writeString(indexUUID); - out.writeOptionalString(nodeId); - } - - @Override - public String toString() { - return "index [" + index + "], indexUUID [" + indexUUID + "], type [" + type + "] and source [" + mappingSource + "]"; - } - } - - private static class MappingChange { - public final String index; - public final String indexUUID; - public final String type; - public final CompressedString mappingSource; - public final MappingUpdateListener listener; - - MappingChange(String index, String indexUUID, String type, CompressedString mappingSource, MappingUpdateListener listener) { - this.index = index; - this.indexUUID = indexUUID; - this.type = type; - this.mappingSource = mappingSource; - this.listener = listener; + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable { + if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { + throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } } @@ -319,90 +133,4 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction - * It also allows to reduce multiple mapping updates on the same index(UUID) and type into one update - * (refreshSource + sending to master), which allows to offload the number of times mappings are updated - * and sent to master for heavy single index requests that each introduce a new mapping, and when - * multiple shards exists on the same nodes, allowing to work on the index level in this case. - */ - private class MasterMappingUpdater extends Thread { - - private volatile boolean running = true; - private final BlockingQueue queue = ConcurrentCollections.newBlockingQueue(); - - public MasterMappingUpdater(String name) { - super(name); - } - - public void add(MappingChange change) { - queue.add(change); - } - - public void close() { - running = false; - this.interrupt(); - } - - @Override - public void run() { - while (running) { - MappingUpdateListener listener = null; - try { - final MappingChange change = queue.poll(10, TimeUnit.MINUTES); - if (change == null) { - continue; - } - listener = change.listener; - - final MappingUpdatedAction.MappingUpdatedRequest mappingRequest; - try { - DiscoveryNode node = clusterService.localNode(); - mappingRequest = new MappingUpdatedAction.MappingUpdatedRequest( - change.index, change.indexUUID, change.type, change.mappingSource, node != null ? node.id() : null - ); - } catch (Throwable t) { - logger.warn("Failed to update master on updated mapping for index [" + change.index + "], type [" + change.type + "]", t); - if (change.listener != null) { - change.listener.onFailure(t); - } - continue; - } - logger.trace("sending mapping updated to master: {}", mappingRequest); - execute(mappingRequest, new ActionListener() { - @Override - public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) { - logger.debug("successfully updated master with mapping update: {}", mappingRequest); - if (change.listener != null) { - change.listener.onMappingUpdate(); - } - } - - @Override - public void onFailure(Throwable e) { - logger.warn("failed to update master on updated mapping for {}", e, mappingRequest); - if (change.listener != null) { - change.listener.onFailure(e); - } - } - }); - } catch (Throwable t) { - if (listener != null) { - // even if the failure is expected, eg. if we got interrupted, - // we need to notify the listener as there might be a latch - // waiting for it to be called - listener.onFailure(t); - } - if (t instanceof InterruptedException && !running) { - // all is well, we are shutting down - } else { - logger.warn("failed to process mapping update", t); - } - } - } - } - } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 690dcceb534..1749e6e271d 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -331,47 +331,6 @@ public class MetaDataMappingService extends AbstractComponent { }); } - public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final String nodeId, final ActionListener listener) { - final long insertOrder; - synchronized (refreshOrUpdateMutex) { - insertOrder = ++refreshOrUpdateInsertOrder; - refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, nodeId, listener)); - } - clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() { - private volatile List allTasks; - - @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); - } - - @Override - public ClusterState execute(final ClusterState currentState) throws Exception { - Tuple> tuple = executeRefreshOrUpdate(currentState, insertOrder); - this.allTasks = tuple.v2(); - return tuple.v1(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (allTasks == null) { - return; - } - for (Object task : allTasks) { - if (task instanceof UpdateTask) { - UpdateTask uTask = (UpdateTask) task; - ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true); - try { - uTask.listener.onResponse(response); - } catch (Throwable t) { - logger.debug("failed to ping back on response of mapping processing for task [{}]", t, uTask.listener); - } - } - } - } - }); - } - public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask(request, listener) { diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java index 05a38b138d4..1cbfaab0672 100644 --- a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java +++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java @@ -165,7 +165,7 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl private void validateMappingUpdate(final String type, Mapping update) { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference error = new AtomicReference<>(); - mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), indexService.indexUUID(), type, update, new MappingUpdatedAction.MappingUpdateListener() { + mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), type, update, waitForMappingUpdatePostRecovery, new MappingUpdatedAction.MappingUpdateListener() { @Override public void onMappingUpdate() { latch.countDown(); diff --git a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java index 6d60d21b1fe..9ca66a65ec7 100644 --- a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java +++ b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java @@ -20,7 +20,12 @@ package org.elasticsearch.index.termvectors; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.*; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; import org.apache.lucene.index.memory.MemoryIndex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.termvectors.TermVectorsFilter; @@ -40,18 +45,30 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.dfs.AggregatedDfs; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import static org.elasticsearch.index.mapper.SourceToParse.source; @@ -285,7 +302,6 @@ public class ShardTermVectorsService extends AbstractIndexShardComponent { private ParsedDocument parseDocument(String index, String type, BytesReference doc) throws Throwable { MapperService mapperService = indexShard.mapperService(); - IndexService indexService = indexShard.indexService(); // TODO: make parsing not dynamically create fields not in the original mapping Tuple docMapper = mapperService.documentMapperWithAutoCreate(type); @@ -294,7 +310,7 @@ public class ShardTermVectorsService extends AbstractIndexShardComponent { parsedDocument.addDynamicMappingsUpdate(docMapper.v2()); } if (parsedDocument.dynamicMappingsUpdate() != null) { - mappingUpdatedAction.updateMappingOnMasterSynchronously(index, indexService.indexUUID(), type, parsedDocument.dynamicMappingsUpdate()); + mappingUpdatedAction.updateMappingOnMasterSynchronously(index, type, parsedDocument.dynamicMappingsUpdate()); } return parsedDocument; } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 54e11c55556..50fd53a0f98 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -567,7 +567,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } }; for (DocumentMapper documentMapper : documentMappersToUpdate) { - mappingUpdatedAction.updateMappingOnMaster(indexService.index().getName(), indexService.indexUUID(), documentMapper.type(), documentMapper.mapping(), listener); + mappingUpdatedAction.updateMappingOnMaster(indexService.index().getName(), documentMapper.type(), documentMapper.mapping(), recoverySettings.internalActionTimeout(), listener); } cancellableThreads.execute(new Interruptable() { @Override diff --git a/src/main/java/org/elasticsearch/node/Node.java b/src/main/java/org/elasticsearch/node/Node.java index bed0eb2a8df..bf3a81487b8 100644 --- a/src/main/java/org/elasticsearch/node/Node.java +++ b/src/main/java/org/elasticsearch/node/Node.java @@ -242,7 +242,7 @@ public class Node implements Releasable { injector.getInstance(plugin).start(); } - injector.getInstance(MappingUpdatedAction.class).start(); + injector.getInstance(MappingUpdatedAction.class).setClient(client); injector.getInstance(IndicesService.class).start(); injector.getInstance(IndexingMemoryController.class).start(); injector.getInstance(IndicesClusterStateService.class).start(); @@ -285,7 +285,6 @@ public class Node implements Releasable { injector.getInstance(HttpServer.class).stop(); } - injector.getInstance(MappingUpdatedAction.class).stop(); injector.getInstance(RiversManager.class).stop(); injector.getInstance(SnapshotsService.class).stop(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index da26759d7c1..6723372f1c8 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -287,7 +287,7 @@ public class PercolatorService extends AbstractComponent { doc.addDynamicMappingsUpdate(docMapper.v2()); } if (doc.dynamicMappingsUpdate() != null) { - mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), documentIndexService.indexUUID(), request.documentType(), doc.dynamicMappingsUpdate()); + mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate()); } // the document parsing exists the "doc" object, so we need to set the new current field. currentFieldName = parser.currentName(); diff --git a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java index 50bbd8e9e2d..28bcde323d2 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java @@ -46,7 +46,6 @@ public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest private final String mappingType = "test-mapping"; @Test // see #3544 - @AwaitsFix(bugUrl = "adrien is looking into this") public void testConcurrentDynamicMapping() throws Exception { final String fieldName = "field"; final String mapping = "{ \"" + mappingType + "\": {" + From 0ff4827e55457d802ca6110d62e4b91816a09087 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 23 Apr 2015 10:44:23 +0100 Subject: [PATCH 079/236] Fix MaxBucketReducer to use gapPolicy Also moved gapPolicy and format ParseField constants to common class --- .../search/aggregations/reducers/Reducer.java | 3 + .../bucketmetrics/MaxBucketBuilder.java | 11 ++++ .../bucketmetrics/MaxBucketParser.java | 6 +- .../bucketmetrics/MaxBucketReducer.java | 15 +++-- .../reducers/derivative/DerivativeParser.java | 7 +-- .../reducers/movavg/MovAvgParser.java | 5 +- .../aggregations/reducers/MaxBucketTests.java | 61 +++++++++++++++++++ 7 files changed, 93 insertions(+), 15 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java index 5ec45064c7f..8daa4d6180a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -47,6 +47,9 @@ public abstract class Reducer implements Streamable { public static final ParseField BUCKETS_PATH = new ParseField("buckets_path"); + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + /** * @return The reducer type this parser is associated with. */ diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java index eb04617e548..7fbcd54f789 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java @@ -20,13 +20,16 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import java.io.IOException; public class MaxBucketBuilder extends ReducerBuilder { private String format; + private GapPolicy gapPolicy; public MaxBucketBuilder(String name) { super(name, MaxBucketReducer.TYPE.name()); @@ -37,11 +40,19 @@ public class MaxBucketBuilder extends ReducerBuilder { return this; } + public MaxBucketBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { if (format != null) { builder.field(MaxBucketParser.FORMAT.getPreferredName(), format); } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java index 2a9dab3b6bd..7d773747a8d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -46,6 +47,7 @@ public class MaxBucketParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; + GapPolicy gapPolicy = GapPolicy.IGNORE; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -55,6 +57,8 @@ public class MaxBucketParser implements Reducer.Parser { format = parser.text(); } else if (BUCKETS_PATH.match(currentFieldName)) { bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text()); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "]."); @@ -86,7 +90,7 @@ public class MaxBucketParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - return new MaxBucketReducer.Factory(reducerName, bucketsPaths, formatter); + return new MaxBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java index e209684797c..b325697568e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java @@ -61,6 +61,7 @@ public class MaxBucketReducer extends SiblingReducer { }; private ValueFormatter formatter; + private GapPolicy gapPolicy; public static void registerStreams() { ReducerStreams.registerStream(STREAM, TYPE.stream()); @@ -69,8 +70,10 @@ public class MaxBucketReducer extends SiblingReducer { private MaxBucketReducer() { } - protected MaxBucketReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, Map metaData) { + protected MaxBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { super(name, bucketsPaths, metaData); + this.gapPolicy = gapPolicy; this.formatter = formatter; } @@ -90,7 +93,7 @@ public class MaxBucketReducer extends SiblingReducer { List buckets = multiBucketsAgg.getBuckets(); for (int i = 0; i < buckets.size(); i++) { Bucket bucket = buckets.get(i); - Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, GapPolicy.IGNORE); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); if (bucketValue != null) { if (bucketValue > maxValue) { maxBucketKeys.clear(); @@ -110,25 +113,29 @@ public class MaxBucketReducer extends SiblingReducer { @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); } @Override public void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); } public static class Factory extends ReducerFactory { private final ValueFormatter formatter; + private final GapPolicy gapPolicy; - public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter) { + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; this.formatter = formatter; } @Override protected Reducer createInternal(Map metaData) throws IOException { - return new MaxBucketReducer(name, bucketsPaths, formatter, metaData); + return new MaxBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index c4d3aa2a229..cfca5c60978 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.reducers.derivative; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -32,13 +32,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; - public class DerivativeParser implements Reducer.Parser { - public static final ParseField FORMAT = new ParseField("format"); - public static final ParseField GAP_POLICY = new ParseField("gap_policy"); - @Override public String type() { return DerivativeReducer.TYPE.name(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java index c1cdadf91ea..5d79b1d1e7a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; @@ -37,12 +38,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; - public class MovAvgParser implements Reducer.Parser { - public static final ParseField FORMAT = new ParseField("format"); - public static final ParseField GAP_POLICY = new ParseField("gap_policy"); public static final ParseField MODEL = new ParseField("model"); public static final ParseField WINDOW = new ParseField("window"); public static final ParseField SETTINGS = new ParseField("settings"); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java index 48d93766bfc..84e559e4970 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -244,6 +245,66 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { List termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + for (int i = 0; i < interval; ++i) { Terms.Bucket termsBucket = termsBuckets.get(i); assertThat(termsBucket, notNullValue()); From dbbfe39415757845f846b6ea4cc4238c360e9df6 Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 16 Apr 2015 11:36:14 +0900 Subject: [PATCH 080/236] [Docs] fix typo in scripting module Closes #10622 --- docs/reference/modules/scripting.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index c0ea9368c23..26b50c97ac1 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -69,7 +69,7 @@ GET /_search { "script_fields": { "my_field": { - "script_file": "my_test", + "script_file": "my_script", "params": { "my_var": 2 } From 9eabcd7c0f44fec7b59c37d6ce3a0acf2b29c891 Mon Sep 17 00:00:00 2001 From: Mal Curtis Date: Thu, 16 Apr 2015 16:17:34 +1200 Subject: [PATCH 081/236] Docs: Fix missing comma in context suggester docs Closes #10623 --- docs/reference/search/suggesters/context-suggest.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index bdc8157af00..c09659a43a9 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -106,7 +106,7 @@ or as reference to another field within the documents indexed: "context": { "color": { "type": "category", - "default": "red" + "default": "red", "path": "color_field" } } From 2e2e345dcb90e1ec1c48c6bb5348469fc514aea1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 23 Apr 2015 14:53:09 +0200 Subject: [PATCH 082/236] Tests: Mute RiverTests.testMultipleRiversStart. --- src/test/java/org/elasticsearch/river/RiverTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java index 1d7c308d497..9cf698d661d 100644 --- a/src/test/java/org/elasticsearch/river/RiverTests.java +++ b/src/test/java/org/elasticsearch/river/RiverTests.java @@ -46,6 +46,7 @@ public class RiverTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates") public void testMultipleRiversStart() throws Exception { int nbRivers = between(2,10); logger.info("--> testing with {} rivers...", nbRivers); From adc0807c686b37bf1d96bd3eb9d26face380fe44 Mon Sep 17 00:00:00 2001 From: tlrx Date: Thu, 8 Jan 2015 18:31:04 +0100 Subject: [PATCH 083/236] Internal: Add METADATA_READ and METADATA_WRITE blocks This commit splits the current ClusterBlockLevel.METADATA into two disctins ClusterBlockLevel.METADATA_READ and ClusterBlockLevel.METADATA_WRITE blocks. It allows to make a distinction between an operation that modifies the index or cluster metadata and an operation that does not change any metadata. Before this commit, many operations where blocked when the cluster was read-only: Cluster Stats, Get Mappings, Get Snapshot, Get Index Settings, etc. Now those operations are allowed even when the cluster or the index is read-only. Related to #8102, #2833 Closes #3703 Closes #5855 Closes #10521 Closes #10522 --- .../indices.exists/20_read_only_index.yaml | 30 ++++ .../TransportNodesShutdownAction.java | 3 +- .../TransportDeleteRepositoryAction.java | 2 +- .../get/TransportGetRepositoriesAction.java | 2 +- .../put/TransportPutRepositoryAction.java | 2 +- .../TransportVerifyRepositoryAction.java | 2 +- .../TransportClusterRerouteAction.java | 2 +- .../TransportClusterUpdateSettingsAction.java | 2 +- .../TransportClusterSearchShardsAction.java | 2 +- .../create/TransportCreateSnapshotAction.java | 2 +- .../delete/TransportDeleteSnapshotAction.java | 2 +- .../get/TransportGetSnapshotsAction.java | 2 +- .../TransportRestoreSnapshotAction.java | 8 +- .../TransportSnapshotsStatusAction.java | 2 +- .../TransportPendingClusterTasksAction.java | 2 +- .../alias/TransportIndicesAliasesAction.java | 2 +- .../exists/TransportAliasesExistAction.java | 2 +- .../alias/get/TransportGetAliasesAction.java | 2 +- .../TransportClearIndicesCacheAction.java | 4 +- .../close/TransportCloseIndexAction.java | 2 +- .../create/TransportCreateIndexAction.java | 2 +- .../delete/TransportDeleteIndexAction.java | 2 +- .../indices/TransportIndicesExistsAction.java | 2 +- .../types/TransportTypesExistsAction.java | 2 +- .../indices/flush/TransportFlushAction.java | 4 +- .../indices/get/TransportGetIndexAction.java | 2 +- .../TransportGetFieldMappingsIndexAction.java | 9 +- .../get/TransportGetMappingsAction.java | 2 +- .../put/TransportPutMappingAction.java | 2 +- .../open/TransportOpenIndexAction.java | 2 +- .../optimize/TransportOptimizeAction.java | 4 +- .../refresh/TransportRefreshAction.java | 4 +- .../TransportIndicesSegmentsAction.java | 4 +- .../get/TransportGetSettingsAction.java | 2 +- .../put/TransportUpdateSettingsAction.java | 4 +- .../stats/TransportIndicesStatsAction.java | 4 +- .../TransportDeleteIndexTemplateAction.java | 2 +- .../get/TransportGetIndexTemplatesAction.java | 2 +- .../put/TransportPutIndexTemplateAction.java | 2 +- .../delete/TransportDeleteWarmerAction.java | 2 +- .../warmer/get/TransportGetWarmersAction.java | 3 +- .../warmer/put/TransportPutWarmerAction.java | 8 +- .../cluster/block/ClusterBlock.java | 4 +- .../cluster/block/ClusterBlockLevel.java | 44 ++++- .../cluster/metadata/IndexMetaData.java | 4 +- .../cluster/metadata/MetaData.java | 2 +- .../discovery/DiscoverySettings.java | 2 +- .../org/elasticsearch/tribe/TribeService.java | 2 +- .../repositories/RepositoryBlocksTests.java | 115 +++++++++++++ .../snapshots/SnapshotBlocksTests.java | 159 ++++++++++++++++++ .../tasks/PendingTasksBlocksTests.java | 57 +++++++ .../clear/ClearIndicesCacheBlocksTests.java | 64 +++++++ .../indices/create/CreateIndexTests.java | 11 ++ .../delete/DeleteIndexBlocksTests.java | 43 +++++ .../admin/indices/flush/FlushBlocksTests.java | 82 +++++++++ .../admin/indices/get/GetIndexTests.java | 34 +++- .../indices/optimize/OptimizeBlocksTests.java | 82 +++++++++ .../indices/refresh/RefreshBlocksTests.java | 78 +++++++++ .../segments/IndicesSegmentsBlocksTests.java | 65 +++++++ .../stats/IndicesStatsBlocksTests.java | 62 +++++++ .../aliases/IndexAliasesTests.java | 61 ++++++- .../blocks/SimpleBlocksTests.java | 13 +- .../cluster/BlockClusterStatsTests.java | 117 ------------- .../allocation/ClusterRerouteTests.java | 62 ++++++- .../cluster/block/ClusterBlockTests.java | 84 +++++++++ .../settings/ClusterSettingsTests.java | 39 +++++ .../shards/ClusterSearchShardsTests.java | 38 ++++- .../gateway/RecoverAfterNodesTests.java | 32 ++-- .../IndicesOptionsIntegrationTests.java | 19 --- .../exists/indices/IndicesExistsTests.java | 80 +++++++++ .../exists/types/TypesExistsTests.java | 30 ++++ .../mapping/SimpleGetFieldMappingsTests.java | 32 +++- .../mapping/SimpleGetMappingsTests.java | 29 ++++ .../indices/mapping/UpdateMappingTests.java | 29 +++- .../settings/GetSettingsBlocksTests.java | 65 +++++++ .../indices/settings/UpdateSettingsTests.java | 34 +++- .../indices/state/OpenCloseIndexTests.java | 62 ++++++- .../template/IndexTemplateBlocksTests.java | 66 ++++++++ .../warmer/IndicesWarmerBlocksTests.java | 158 +++++++++++++++++ .../test/ElasticsearchIntegrationTest.java | 18 ++ .../hamcrest/ElasticsearchAssertions.java | 39 +++++ 81 files changed, 1912 insertions(+), 252 deletions(-) create mode 100644 rest-api-spec/test/indices.exists/20_read_only_index.yaml create mode 100644 src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java delete mode 100644 src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java create mode 100644 src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java create mode 100644 src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java create mode 100644 src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java create mode 100644 src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java diff --git a/rest-api-spec/test/indices.exists/20_read_only_index.yaml b/rest-api-spec/test/indices.exists/20_read_only_index.yaml new file mode 100644 index 00000000000..24d2dcdc08f --- /dev/null +++ b/rest-api-spec/test/indices.exists/20_read_only_index.yaml @@ -0,0 +1,30 @@ +--- +"Test indices.exists on a read only index": + + - do: + indices.create: + index: test_index_ro + + - do: + indices.put_settings: + index: test_index_ro + body: + index.blocks.read_only: true + + - do: + indices.exists: + index: test_index_ro + + - is_true: '' + + - do: + indices.put_settings: + index: test_index_ro + body: + index.blocks.read_only: false + + - do: + indices.exists: + index: test_index_ro + + - is_true: '' diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java index 34fc6891b23..3199f62a6ce 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java @@ -75,7 +75,8 @@ public class TransportNodesShutdownAction extends TransportMasterNodeOperationAc @Override protected ClusterBlockException checkBlock(NodesShutdownRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + // Stopping a node impacts the cluster state, so we check for the METADATA_WRITE block here + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index e6b3778d1ce..4ddac8e58b4 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -65,7 +65,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeOperatio @Override protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index a48d5293b46..a91c7c91ae4 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -65,7 +65,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera @Override protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 1826f751705..698c3277986 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -65,7 +65,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeOperationAc @Override protected ClusterBlockException checkBlock(PutRepositoryRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 308db280367..2c5810d02be 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -69,7 +69,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio @Override protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 5606983d334..db66ad3aa72 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -58,7 +58,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(ClusterRerouteRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 40a491de253..2f7b816482d 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -76,7 +76,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { return null; } - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index c0ba33554cc..2a83f0f08b8 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -58,7 +58,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadO @Override protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 4742959f563..db9f4226c55 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -65,7 +65,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(CreateSnapshotRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 0e5ec8ed3fb..28993564147 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -64,7 +64,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 4420777bd52..421af7832d1 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -67,7 +67,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct @Override protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 5a8a06ff3bc..a815271ff1f 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -65,7 +65,13 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeOperation @Override protected ClusterBlockException checkBlock(RestoreSnapshotRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + // Restoring a snapshot might change the global state and create/change an index, + // so we need to check for METADATA_WRITE and WRITE blocks + ClusterBlockException blockException = state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); + if (blockException != null) { + return blockException; + } + return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, ""); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 967ac3808ec..338592710b8 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -70,7 +70,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation @Override protected ClusterBlockException checkBlock(SnapshotsStatusRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 2e46a5af070..03edcf55c1b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -53,7 +53,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO @Override protected ClusterBlockException checkBlock(PendingClusterTasksRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 360765c9358..5820cffe5f7 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -78,7 +78,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA indices.add(index); } } - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, indices.toArray(new String[indices.size()])); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices.toArray(new String[indices.size()])); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 327a5e16e0f..89c8afc3c4c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -49,7 +49,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio @Override protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index dcb817f2191..e699580b704 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -52,7 +52,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA @Override protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index e49c577463d..2b79f98aca5 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -175,12 +175,12 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio @Override protected ClusterBlockException checkGlobalBlock(ClusterState state, ClearIndicesCacheRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override protected ClusterBlockException checkRequestBlock(ClusterState state, ClearIndicesCacheRequest request, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index ae2fd4c1450..4be9d842d26 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -76,7 +76,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio @Override protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 3a283d68041..9edd9b74664 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -67,7 +67,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi @Override protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, request.index()); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.index()); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 6272926cce7..72d27374614 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -76,7 +76,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi @Override protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index e6ca1ae7947..a8064a78164 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -65,7 +65,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadOperati protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, clusterService.state().metaData().concreteIndices(indicesOptions, request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, clusterService.state().metaData().concreteIndices(indicesOptions, request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index b7d10299204..a703cfbb741 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -62,7 +62,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation @Override protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 94fe3a41cae..ffdd71e6936 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -120,11 +120,11 @@ public class TransportFlushAction extends TransportBroadcastOperationAction levels = new ArrayList<>(); for (int i = 0; i < len; i++) { - levels.add(ClusterBlockLevel.fromId(in.readVInt())); + levels.addAll(ClusterBlockLevel.fromId(in.readVInt())); } this.levels = EnumSet.copyOf(levels); retryable = in.readBoolean(); @@ -145,7 +145,7 @@ public class ClusterBlock implements Serializable, Streamable, ToXContent { out.writeString(description); out.writeVInt(levels.size()); for (ClusterBlockLevel level : levels) { - out.writeVInt(level.id()); + out.writeVInt(level.toId(out.getVersion())); } out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index 2c88da8986d..3ed8d999ad5 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.block; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.Version; import java.util.EnumSet; @@ -29,9 +30,18 @@ import java.util.EnumSet; public enum ClusterBlockLevel { READ(0), WRITE(1), - METADATA(2); - public static final EnumSet ALL = EnumSet.of(READ, WRITE, METADATA); + /** + * Since 1.6.0, METADATA has been split into two distincts cluster block levels + * @deprecated Use METADATA_READ or METADATA_WRITE instead. + */ + @Deprecated + METADATA(2), + + METADATA_READ(3), + METADATA_WRITE(4); + + public static final EnumSet ALL = EnumSet.of(READ, WRITE, METADATA_READ, METADATA_WRITE); public static final EnumSet READ_WRITE = EnumSet.of(READ, WRITE); private final int id; @@ -44,13 +54,35 @@ public enum ClusterBlockLevel { return this.id; } - public static ClusterBlockLevel fromId(int id) { + /** + * Returns the ClusterBlockLevel's id according to a given version, this to ensure backward compatibility. + * + * @param version the version + * @return the ClusterBlockLevel's id + */ + public int toId(Version version) { + assert version != null : "Version shouldn't be null"; + // Since 1.6.0, METADATA has been split into two distincts cluster block levels + if (version.before(Version.V_1_6_0)) { + if (this == ClusterBlockLevel.METADATA_READ || this == ClusterBlockLevel.METADATA_WRITE) { + return ClusterBlockLevel.METADATA.id(); + } + } + return id(); + } + + static EnumSet fromId(int id) { if (id == 0) { - return READ; + return EnumSet.of(READ); } else if (id == 1) { - return WRITE; + return EnumSet.of(WRITE); } else if (id == 2) { - return METADATA; + // Since 1.6.0, METADATA has been split into two distincts cluster block levels + return EnumSet.of(METADATA_READ, METADATA_WRITE); + } else if (id == 3) { + return EnumSet.of(METADATA_READ); + } else if (id == 4) { + return EnumSet.of(METADATA_WRITE); } throw new ElasticsearchIllegalArgumentException("No cluster block level matching [" + id + "]"); } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index d84250ebcd7..1543151fad0 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -117,10 +117,10 @@ public class IndexMetaData { return factory; } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA)); + public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); - public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA)); + public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); public static enum State { OPEN((byte) 0), diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 2026a148b26..51793b1d27b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -127,7 +127,7 @@ public class MetaData implements Iterable { public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; - public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA)); + public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final MetaData EMPTY_META_DATA = builder().build(); diff --git a/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index b8d48b16129..420e6a399d1 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -44,7 +44,7 @@ public class DiscoverySettings extends AbstractComponent { public final static int NO_MASTER_BLOCK_ID = 2; public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA)); + public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout = DEFAULT_PUBLISH_TIMEOUT; diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java index 90989561eff..3994cbacaff 100644 --- a/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -73,7 +73,7 @@ import java.util.Set; */ public class TribeService extends AbstractLifecycleComponent { - public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA)); + public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); public static Settings processSettings(Settings settings) { diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java new file mode 100644 index 00000000000..a901b103516 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.repositories; + +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. + * + * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + */ +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class RepositoryBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPutRepositoryWithBlocks() { + logger.info("--> registering a repository is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> registering a repository is allowed when the cluster is not read only"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + } + + @Test + public void testVerifyRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testDeleteRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + logger.info("--> deleting a repository is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks"), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> deleting a repository is allowed when the cluster is not read only"); + assertAcked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks")); + } + + @Test + public void testGetRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + GetRepositoriesResponse response = client().admin().cluster().prepareGetRepositories("test-repo-blocks").execute().actionGet(); + assertThat(response.repositories(), hasSize(1)); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java new file mode 100644 index 00000000000..e6ad356c21c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java @@ -0,0 +1,159 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.snapshots; + +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Before; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. + * + * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + */ +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class SnapshotBlocksTests extends ElasticsearchIntegrationTest { + + protected static final String INDEX_NAME = "test-blocks"; + protected static final String REPOSITORY_NAME = "repo-" + INDEX_NAME; + protected static final String SNAPSHOT_NAME = "snapshot-0"; + + @Before + protected void setUpRepository() throws Exception { + createIndex(INDEX_NAME); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex(INDEX_NAME, "type").setSource("test", "init").execute().actionGet(); + } + + logger.info("--> register a repository"); + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME) + .setType("fs") + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + logger.info("--> verify the repository"); + VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); + assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + + logger.info("--> create a snapshot"); + CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(snapshotResponse.status(), equalTo(RestStatus.OK)); + ensureSearchable(); + } + + @Test + public void testCreateSnapshotWithBlocks() { + logger.info("--> creating a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1"), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> creating a snapshot is allowed when the cluster is not read only"); + CreateSnapshotResponse response = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(response.status(), equalTo(RestStatus.OK)); + } + + @Test + public void testDeleteSnapshotWithBlocks() { + logger.info("--> deleting a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> deleting a snapshot is allowed when the cluster is not read only"); + DeleteSnapshotResponse response = client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).execute().actionGet(); + assertThat(response.isAcknowledged(), equalTo(true)); + } + + @Test + public void testRestoreSnapshotWithBlocks() { + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + assertFalse(client().admin().indices().prepareExists(INDEX_NAME).get().isExists()); + + logger.info("--> restoring a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> creating a snapshot is allowed when the cluster is not read only"); + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(response.status(), equalTo(RestStatus.OK)); + assertTrue(client().admin().indices().prepareExists(INDEX_NAME).get().isExists()); + } + + @Test + public void testGetSnapshotWithBlocks() { + // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet(); + assertThat(response.getSnapshots(), hasSize(1)); + assertThat(response.getSnapshots().get(0).name(), equalTo(SNAPSHOT_NAME)); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testSnapshotStatusWithBlocks() { + // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(REPOSITORY_NAME) + .setSnapshots(SNAPSHOT_NAME) + .execute().actionGet(); + assertThat(response.getSnapshots(), hasSize(1)); + assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java new file mode 100644 index 00000000000..004f2c85daf --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.tasks; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class PendingTasksBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPendingTasksWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + // This test checks that the Pending Cluster Tasks operation is never blocked, even if an index is read only or whatever. + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + assertNotNull(response.getPendingTasks()); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + try { + setClusterReadOnly(true); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + assertNotNull(response.getPendingTasks()); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java new file mode 100644 index 00000000000..b9d4b2e685c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.cache.clear; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testClearIndicesCacheWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true).execute().actionGet(); + assertNoFailures(clearIndicesCacheResponse); + assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java index 638c9a61fb4..1b0102f0071 100644 --- a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java +++ b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java @@ -34,6 +34,7 @@ import org.junit.Test; import java.util.HashMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; @@ -140,4 +141,14 @@ public class CreateIndexTests extends ElasticsearchIntegrationTest{ e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); } } + + @Test + public void testCreateIndexWithBlocks() { + try { + setClusterReadOnly(true); + assertBlocked(prepareCreate("test")); + } finally { + setClusterReadOnly(false); + } + } } diff --git a/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java new file mode 100644 index 00000000000..6973f63a227 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.delete; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class DeleteIndexBlocksTests extends ElasticsearchIntegrationTest{ + + @Test + public void testDeleteIndexWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + try { + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareDelete("test")); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java new file mode 100644 index 00000000000..b9fa6bcd8b5 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class FlushBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testFlushWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + FlushResponse response = client().admin().indices().prepareFlush("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareFlush("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Flushing all indices is blocked when the cluster is read-only + try { + FlushResponse response = client().admin().indices().prepareFlush().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareFlush()); + } finally { + setClusterReadOnly(false); + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java b/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java index f335d40e79b..2b4f4bcab6b 100644 --- a/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java +++ b/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; - import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -34,12 +33,13 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.*; @ElasticsearchIntegrationTest.SuiteScopeTest public class GetIndexTests extends ElasticsearchIntegrationTest { @@ -205,6 +205,32 @@ public class GetIndexTests extends ElasticsearchIntegrationTest { assertEmptyWarmers(response); } + @Test + public void testGetIndexWithBlocks() { + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("idx", block); + GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("idx") + .addFeatures(Feature.MAPPINGS, Feature.ALIASES).get(); + String[] indices = response.indices(); + assertThat(indices, notNullValue()); + assertThat(indices.length, equalTo(1)); + assertThat(indices[0], equalTo("idx")); + assertMappings(response, "idx"); + assertAliases(response, "idx"); + } finally { + disableIndexBlock("idx", block); + } + } + + try { + enableIndexBlock("idx", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("idx", SETTING_BLOCKS_METADATA); + } + } + private GetIndexResponse runWithRandomFeatureMethod(GetIndexRequestBuilder requestBuilder, Feature... features) { if (randomBoolean()) { return requestBuilder.addFeatures(features).get(); diff --git a/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java new file mode 100644 index 00000000000..47b5fbe0da8 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.optimize; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class OptimizeBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testOptimizeWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + OptimizeResponse response = client().admin().indices().prepareOptimize("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareOptimize("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Optimizing all indices is blocked when the cluster is read-only + try { + OptimizeResponse response = client().admin().indices().prepareOptimize().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareFlush()); + } finally { + setClusterReadOnly(false); + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java new file mode 100644 index 00000000000..fc83f96eb3c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class RefreshBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testRefreshWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + RefreshResponse response = client().admin().indices().prepareRefresh("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareRefresh("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Refreshing all indices is blocked when the cluster is read-only + try { + RefreshResponse response = client().admin().indices().prepareRefresh().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareRefresh()); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java new file mode 100644 index 00000000000..a3f2f9f1044 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.segments; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesSegmentsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndicesSegmentsWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + client().admin().indices().prepareFlush("test-blocks").get(); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + IndicesSegmentResponse response = client().admin().indices().prepareSegments("test-blocks").execute().actionGet(); + assertNoFailures(response); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareSegments("test-blocks")); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java new file mode 100644 index 00000000000..d6dba10e696 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesStatsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndicesStatsWithBlocks() { + createIndex("ro"); + ensureGreen("ro"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", blockSetting); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("ro").execute().actionGet(); + assertNotNull(indicesStatsResponse.getIndex("ro")); + } finally { + disableIndexBlock("ro", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + client().admin().indices().prepareStats("ro").execute().actionGet(); + fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true"); + } catch (ClusterBlockException e) { + // Ok, a ClusterBlockException is expected + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 3a23269d055..4cdafec11a4 100644 --- a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; +import java.util.Arrays; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -57,14 +58,13 @@ import java.util.concurrent.TimeUnit; import static com.google.common.collect.Sets.newHashSet; import static org.elasticsearch.client.Requests.createIndexRequest; import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.index.query.FilterBuilders.*; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.test.hamcrest.CollectionAssertions.hasKey; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; /** @@ -908,10 +908,10 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .setAliases("{\n" + - " \"alias1\" : {},\n" + - " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" + - " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" + - "}")); + " \"alias1\" : {},\n" + + " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" + + " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" + + "}")); checkAliases(); } @@ -989,13 +989,56 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { @Test public void testAliasesFilterWithHasChildQuery() throws Exception { assertAcked(prepareCreate("my-index") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent") + .addMapping("parent") + .addMapping("child", "_parent", "type=parent") ); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", hasChildFilter("child", matchAllQuery()))); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", hasParentFilter("child", matchAllQuery()))); } + @Test + public void testAliasesWithBlocks() { + createIndex("test"); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", block); + + assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2")); + assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1")); + assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_READ_ONLY); + + assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_READ_ONLY_BLOCK); + assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK); + assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); + + } finally { + disableIndexBlock("test", SETTING_READ_ONLY); + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + + assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareAliasesExist("alias2"), INDEX_METADATA_BLOCK); + + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } + private void checkAliases() { GetAliasesResponse getAliasesResponse = admin().indices().prepareGetAliases("alias1").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); diff --git a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java index a68fccf4f58..3fbdfa142ac 100644 --- a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java +++ b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java @@ -49,14 +49,14 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { canIndexExists("test1"); // cluster.read_only = true: block write and metadata - setClusterReadOnly("true"); + setClusterReadOnly(true); canNotCreateIndex("test2"); // even if index has index.read_only = false canNotIndexDocument("test1"); - canNotIndexExists("test1"); + canIndexExists("test1"); // cluster.read_only = false: removes the block - setClusterReadOnly("false"); + setClusterReadOnly(false); canCreateIndex("test2"); canIndexDocument("test2"); canIndexDocument("test1"); @@ -71,7 +71,7 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { // adds index write and metadata block setIndexReadOnly( "ro", "true"); canNotIndexDocument("ro"); - canNotIndexExists("ro"); + canIndexExists("ro"); // other indices not blocked canCreateIndex("rw"); @@ -156,11 +156,6 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { } } - private void setClusterReadOnly(String value) { - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet(); - } - private void setIndexReadOnly(String index, Object value) { HashMap newSettings = new HashMap<>(); newSettings.put(IndexMetaData.SETTING_READ_ONLY, value); diff --git a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java deleted file mode 100644 index 31abbc2c020..00000000000 --- a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster; - -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.elasticsearch.test.ElasticsearchIntegrationTest.*; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; - -/** - * Scoped as test, because the if the test with cluster read only block fails, all other tests fail as well, as this is not cleaned up properly - */ -@ClusterScope(scope= Scope.TEST) -public class BlockClusterStatsTests extends ElasticsearchIntegrationTest { - - @Test - public void testBlocks() throws Exception { - assertAcked(prepareCreate("foo").addAlias(new Alias("foo-alias"))); - try { - assertAcked(client().admin().indices().prepareUpdateSettings("foo").setSettings( - ImmutableSettings.settingsBuilder().put("index.blocks.read_only", true))); - ClusterUpdateSettingsResponse updateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTransientSettings( - ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", true).build()).get(); - assertThat(updateSettingsResponse.isAcknowledged(), is(true)); - - ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().setLocal(true).clear().setBlocks(true).get(); - assertThat(clusterStateResponseUnfiltered.getState().blocks().global(), hasSize(1)); - assertThat(clusterStateResponseUnfiltered.getState().blocks().indices().size(), is(1)); - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get(); - assertThat(clusterStateResponse.getState().blocks().global(), hasSize(0)); - assertThat(clusterStateResponse.getState().blocks().indices().size(), is(0)); - - try { - client().admin().indices().prepareClose("foo-alias").get(); - fail("close index should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().preparePutMapping("foo-alias").setType("type1").setSource("field1", "type=string").get(); - fail("put mapping should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().preparePutWarmer("foo-alias").setSearchRequest(Requests.searchRequest("foo-alias")).get(); - fail("put warmer should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareDeleteWarmer().setIndices("foo-alias").setNames("warmer1").get(); - fail("delete warmer should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareTypesExists("foo-alias").setTypes("test").get(); - fail("types exists should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareExists("foo-alias").get(); - fail("indices exists should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - } finally { - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", false).build()).get()); - assertAcked(client().admin().indices().prepareUpdateSettings("foo").setSettings( - ImmutableSettings.settingsBuilder().put("index.blocks.read_only", false))); - } - } - - private void assertClusterAndIndexBlocks(ClusterBlockException e) { - assertThat(e.blocks().size(), equalTo(2)); - for (ClusterBlock clusterBlock : e.blocks()) { - assertThat(clusterBlock.status(), equalTo(RestStatus.FORBIDDEN)); - assertThat(clusterBlock.id(), either(equalTo(5)).or(equalTo(6))); - assertThat(clusterBlock.description(), either(containsString("cluster read-only (api)")).or(containsString("index read-only (api)"))); - } - } -} diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java index ffe9e4ea86a..69098eeafde 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; @@ -31,7 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCo import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; @@ -47,9 +49,14 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; /** */ @@ -70,7 +77,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { @Test public void rerouteWithCommands_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); rerouteWithCommands(commonSettings); } @@ -148,7 +155,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { @Test public void rerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); rerouteWithAllocateLocalGateway(commonSettings); } @@ -241,7 +248,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { logger.info("--> disable allocation"); Settings newSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet(); @@ -264,4 +271,51 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES)); } + @Test + public void testClusterRerouteWithBlocks() throws Exception { + List nodesIds = internalCluster().startNodesAsync(2).get(); + + logger.info("--> create an index with 1 shard and 0 replicas"); + assertAcked(prepareCreate("test-blocks").setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))); + ensureGreen("test-blocks"); + + logger.info("--> check that the index has 1 shard"); + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + List shards = state.routingTable().allShards("test-blocks"); + assertThat(shards, hasSize(1)); + + logger.info("--> check that the shard is allocated"); + ShardRouting shard = shards.get(0); + assertThat(shard.assignedToNode(), equalTo(true)); + + logger.info("--> retrieve the node where the shard is allocated"); + DiscoveryNode node = state.nodes().resolveNode(shard.currentNodeId()); + assertNotNull(node); + + // toggle is used to mve the shard from one node to another + int toggle = nodesIds.indexOf(node.getName()); + + // Rerouting shards is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test-blocks", blockSetting); + assertAcked(client().admin().cluster().prepareReroute() + .add(new MoveAllocationCommand(new ShardId("test-blocks", 0), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet(); + assertThat(healthResponse.isTimedOut(), equalTo(false)); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Rerouting shards is blocked when the cluster is read only + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareReroute() + .add(new MoveAllocationCommand(new ShardId("test-blocks", 1), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + } finally { + setClusterReadOnly(false); + } + } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java new file mode 100644 index 00000000000..e704fb3ecbc --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.block; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.util.EnumSet; + +import static org.elasticsearch.cluster.block.ClusterBlockLevel.*; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ClusterBlockTests extends ElasticsearchTestCase { + + @Test + public void testSerialization() throws Exception { + int iterations = randomIntBetween(10, 100); + for (int i = 0; i < iterations; i++) { + // Get a random version + Version version = randomVersion(random()); + + // Get a random list of ClusterBlockLevels + EnumSet levels = EnumSet.noneOf(ClusterBlockLevel.class); + int nbLevels = randomIntBetween(1, ClusterBlockLevel.values().length); + for (int j = 0; j < nbLevels; j++) { + levels.add(randomFrom(ClusterBlockLevel.values())); + } + + ClusterBlock clusterBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(), + randomBoolean(), randomFrom(RestStatus.values()), levels); + + BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + clusterBlock.writeTo(out); + + BytesStreamInput in = new BytesStreamInput(out.bytes()); + in.setVersion(version); + ClusterBlock result = ClusterBlock.readClusterBlock(in); + + assertThat(result.id(), equalTo(clusterBlock.id())); + assertThat(result.status(), equalTo(clusterBlock.status())); + assertThat(result.description(), equalTo(clusterBlock.description())); + assertThat(result.retryable(), equalTo(clusterBlock.retryable())); + assertThat(result.disableStatePersistence(), equalTo(clusterBlock.disableStatePersistence())); + + // This enum set is used to count the expected serialized/deserialized number of blocks + EnumSet expected = EnumSet.noneOf(ClusterBlockLevel.class); + + for (ClusterBlockLevel level : clusterBlock.levels()) { + if (level == METADATA) { + assertTrue(result.levels().contains(METADATA_READ)); + assertTrue(result.levels().contains(METADATA_WRITE)); + } else { + assertTrue(result.levels().contains(level)); + } + + expected.addAll(ClusterBlockLevel.fromId(level.toId(version))); + } + assertThat(result.levels().size(), equalTo(expected.size())); + } + } +} diff --git a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java index 9947c1a12b8..c49f046a996 100644 --- a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java +++ b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.cluster.settings; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -28,9 +30,11 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; @ClusterScope(scope = TEST) @@ -141,4 +145,39 @@ public class ClusterSettingsTests extends ElasticsearchIntegrationTest { assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); } + + @Test + public void testClusterUpdateSettingsWithBlocks() { + String key1 = "cluster.routing.allocation.enable"; + Settings transientSettings = ImmutableSettings.builder().put(key1, false).build(); + + String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; + Settings persistentSettings = ImmutableSettings.builder().put(key2, "5").build(); + + ClusterUpdateSettingsRequestBuilder request = client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(transientSettings) + .setPersistentSettings(persistentSettings); + + // Cluster settings updates are blocked when the cluster is read only + try { + setClusterReadOnly(true); + assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK); + + // But it's possible to update the settings to update the "cluster.blocks.read_only" setting + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); + + } finally { + setClusterReadOnly(false); + } + + // It should work now + ClusterUpdateSettingsResponse response = request.execute().actionGet(); + + assertAcked(response); + assertThat(response.getTransientSettings().get(key1), notNullValue()); + assertThat(response.getTransientSettings().get(key2), nullValue()); + assertThat(response.getPersistentSettings().get(key1), nullValue()); + assertThat(response.getPersistentSettings().get(key2), notNullValue()); + } } diff --git a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java index 596d8289d17..013c4a728e1 100644 --- a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java +++ b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java @@ -27,8 +27,12 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.*; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; /** @@ -123,4 +127,36 @@ public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest { assertThat(seenTest2, equalTo(true)); assertThat(response.getNodes().length, equalTo(2)); } + + @Test + public void testClusterSearchShardsWithBlocks() { + createIndex("test-blocks"); + + NumShards numShards = getNumShards("test-blocks"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + ensureGreen("test-blocks"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test-blocks").execute().actionGet(); + assertThat(response.getGroups().length, equalTo(numShards.numPrimaries)); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().cluster().prepareSearchShards("test-blocks")); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java b/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java index 00e6fcdf4f1..6e7e986eb93 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java @@ -51,7 +51,7 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { ImmutableSet blocks; do { blocks = nodeClient.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA); + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE); } while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis()); return blocks; @@ -67,17 +67,17 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start node (1)"); Client clientNode1 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3)); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start node (2)"); Client clientNode2 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3)); Thread.sleep(BLOCK_WAIT_TIMEOUT.millis()); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(clientNode2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start node (3)"); @@ -93,28 +93,28 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start master_node (1)"); Client master1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (1)"); Client data1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (2)"); Client data2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start master_node (2)"); @@ -130,28 +130,28 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start master_node (1)"); Client master1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (1)"); Client data1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start master_node (2)"); Client master2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (2)"); diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index d5798aa94f1..9f10f503002 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -731,25 +731,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0)); } - @Test - // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false) - public void testIndicesExists() throws Exception { - assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); - - createIndex("foo", "foobar", "bar", "barbaz"); - ensureYellow(); - - assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true)); - } - @Test public void testPutMapping() throws Exception { verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true); diff --git a/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java new file mode 100644 index 00000000000..b0a549f3cc7 --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.exists.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.hamcrest.Matchers.equalTo; + +public class IndicesExistsTests extends ElasticsearchIntegrationTest { + + @Test + // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false) + public void testIndicesExists() throws Exception { + assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false)); + assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false)); + assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); + + createIndex("foo", "foobar", "bar", "barbaz"); + ensureYellow(); + + assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true)); + } + + @Test + public void testIndicesExistsWithBlocks() { + createIndex("ro"); + ensureYellow(); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", blockSetting); + assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true)); + } finally { + disableIndexBlock("ro", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true)); + fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true"); + } catch (ClusterBlockException e) { + // Ok, a ClusterBlockException is expected + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java index 778723be68c..f72609298e4 100644 --- a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java +++ b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java @@ -21,11 +21,18 @@ package org.elasticsearch.indices.exists.types; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; public class TypesExistsTests extends ElasticsearchIntegrationTest { @@ -69,4 +76,27 @@ public class TypesExistsTests extends ElasticsearchIntegrationTest { assertThat(response.isExists(), equalTo(false)); } + @Test + public void testTypesExistsWithBlocks() throws IOException { + assertAcked(prepareCreate("ro").addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())); + ensureGreen("ro"); + + // Request is not blocked + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", block); + assertThat(client().admin().indices().prepareTypesExists("ro").setTypes("type1").execute().actionGet().isExists(), equalTo(true)); + } finally { + disableIndexBlock("ro", block); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareTypesExists("ro").setTypes("type1")); + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java index e718c8b318d..119157bcfc1 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java @@ -21,16 +21,21 @@ package org.elasticsearch.indices.mapping; import com.google.common.collect.Maps; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; import java.util.Map; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest { @@ -174,4 +179,29 @@ public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest { assertThat(responseStrings, not(equalTo(prettyJsonBuilder.string()))); } + + @Test + public void testGetFieldMappingsWithBlocks() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("typeA", getMappingForType("typeA")) + .addMapping("typeB", getMappingForType("typeB"))); + ensureYellow(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("typeA").setFields("field1", "obj.subfield").get(); + assertThat(response.fieldMappings("test", "typeA", "field1").fullName(), equalTo("field1")); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java index adfd37a0c15..4757aea8600 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java @@ -28,8 +28,11 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -143,4 +146,30 @@ public class SimpleGetMappingsTests extends ElasticsearchIntegrationTest { assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); } + @Test + public void testGetMappingsWithBlocks() throws IOException { + client().admin().indices().prepareCreate("test") + .addMapping("typeA", getMappingForType("typeA")) + .addMapping("typeB", getMappingForType("typeB")) + .execute().actionGet(); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet(); + assertThat(response.mappings().size(), equalTo(1)); + assertThat(response.mappings().get("test").size(), equalTo(2)); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java index d225c936121..2a82d92efa0 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java @@ -41,15 +41,16 @@ import org.hamcrest.Matchers; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; @ClusterScope(randomDynamicTemplates = false) @@ -440,4 +441,28 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { } } + + @Test + public void testPutMappingsWithBlocks() throws Exception { + createIndex("test"); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", block); + assertAcked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")); + } finally { + disableIndexBlock("test", block); + } + } + + for (String block : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", block); + assertBlocked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")); + } finally { + disableIndexBlock("test", block); + } + } + } } diff --git a/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java b/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java new file mode 100644 index 00000000000..7ecf5eb465d --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.settings; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class GetSettingsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testGetSettingsWithBlocks() throws Exception { + assertAcked(prepareCreate("test") + .setSettings(ImmutableSettings.settingsBuilder() + .put("index.refresh_interval", -1) + .put("index.merge.policy.expunge_deletes_allowed", "30") + .put("index.mapper.dynamic", false))); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); + assertThat(response.getIndexToSettings().size(), greaterThanOrEqualTo(1)); + assertThat(response.getSetting("test", "index.refresh_interval"), equalTo("-1")); + assertThat(response.getSetting("test", "index.merge.policy.expunge_deletes_allowed"), equalTo("30")); + assertThat(response.getSetting("test", "index.mapper.dynamic"), equalTo("false")); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetSettings("test")); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 38a7a9d09ae..73d08959622 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; @@ -41,8 +42,10 @@ import org.elasticsearch.index.store.support.AbstractIndexStore; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -384,4 +387,31 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { rootLogger.setLevel(savedLevel); } } + + @Test + public void testUpdateSettingsWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + Settings.Builder builder = ImmutableSettings.builder().put("index.refresh_interval", -1); + + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Closing an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } } diff --git a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java index 8b7d7932298..4ba97227750 100644 --- a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java +++ b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java @@ -30,21 +30,18 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -338,5 +335,58 @@ public class OpenCloseIndexTests extends ElasticsearchIntegrationTest { assertHitCount(searchResponse, docs); } + @Test + public void testOpenCloseIndexWithBlocks() { + createIndex("test"); + ensureGreen("test"); + int docs = between(10, 100); + for (int i = 0; i < docs ; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + + // Closing an index is not blocked + CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet(); + assertAcked(closeIndexResponse); + assertIndexIsClosed("test"); + + // Opening an index is not blocked + OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").execute().actionGet(); + assertAcked(openIndexResponse); + assertIndexIsOpened("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Closing an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareClose("test")); + assertIndexIsOpened("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet(); + assertAcked(closeIndexResponse); + assertIndexIsClosed("test"); + + // Opening an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareOpen("test")); + assertIndexIsClosed("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java b/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java new file mode 100644 index 00000000000..e8fddf2c1aa --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.template; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.io.IOException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndexTemplateBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndexTemplatesWithBlocks() throws IOException { + // creates a simple index template + client().admin().indices().preparePutTemplate("template_blocks") + .setTemplate("te*") + .setOrder(0) + .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") + .startObject("field1").field("type", "string").field("store", "yes").endObject() + .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() + .endObject().endObject().endObject()) + .execute().actionGet(); + + try { + setClusterReadOnly(true); + + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_blocks").execute().actionGet(); + assertThat(response.getIndexTemplates(), hasSize(1)); + + assertBlocked(client().admin().indices().preparePutTemplate("template_blocks_2") + .setTemplate("block*") + .setOrder(0) + .addAlias(new Alias("alias_1"))); + + assertBlocked(client().admin().indices().prepareDeleteTemplate("template_blocks")); + + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java b/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java new file mode 100644 index 00000000000..a2735d6134e --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.warmer; + + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableList; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesWarmerBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPutWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + // Index reads are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_READ); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_READ); + } + + // Index writes are blocked, the warmer can be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); + assertAcked(client().admin().indices().preparePutWarmer("warmer_acked") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); + } + + // Index metadata changes are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + + // Index metadata changes are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_READ_ONLY); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_READ_ONLY); + } + + // Adding a new warmer is not possible when the cluster is read-only + try { + setClusterReadOnly(true); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testGetWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get(); + assertThat(response.warmers().size(), equalTo(1)); + + ObjectObjectCursor> entry = response.warmers().iterator().next(); + assertThat(entry.key, equalTo("test-blocks")); + assertThat(entry.value.size(), equalTo(1)); + assertThat(entry.value.iterator().next().name(), equalTo("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } + + @Test + public void testDeleteWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + enableIndexBlock("test-blocks", blockSetting); + assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + enableIndexBlock("test-blocks", blockSetting); + assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + } +} diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index b2c1f87b757..1c0c11bb5cd 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1421,6 +1421,24 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } + /** Disables an index block for the specified index */ + public static void disableIndexBlock(String index, String block) { + Settings settings = ImmutableSettings.builder().put(block, false).build(); + client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); + } + + /** Enables an index block for the specified index */ + public static void enableIndexBlock(String index, String block) { + Settings settings = ImmutableSettings.builder().put(block, true).build(); + client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); + } + + /** Sets or unsets the cluster read_only mode **/ + public static void setClusterReadOnly(boolean value) { + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); + } + private static CountDownLatch newLatch(List latches) { CountDownLatch l = new CountDownLatch(1); latches.add(l); diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 271a71466c2..c691f3c5834 100644 --- a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -54,6 +54,8 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; @@ -83,6 +85,7 @@ import java.util.concurrent.TimeUnit; import static com.google.common.base.Predicates.isNull; import static org.elasticsearch.test.ElasticsearchTestCase.*; import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; @@ -118,6 +121,42 @@ public class ElasticsearchAssertions { assertVersionSerializable(response); } + /** + * Executes the request and fails if the request has not been blocked. + * + * @param builder the request builder + */ + public static void assertBlocked(ActionRequestBuilder builder) { + assertBlocked(builder, null); + } + + /** + * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}. + * + * @param builder the request builder + * @param expectedBlock the expected block + */ + public static void assertBlocked(ActionRequestBuilder builder, ClusterBlock expectedBlock) { + try { + builder.get(); + fail("Request executed with success but a ClusterBlockException was expected"); + } catch (ClusterBlockException e) { + assertThat(e.blocks().size(), greaterThan(0)); + assertThat(e.status(), equalTo(RestStatus.FORBIDDEN)); + + if (expectedBlock != null) { + boolean found = false; + for (ClusterBlock clusterBlock : e.blocks()) { + if (clusterBlock.id() == expectedBlock.id()) { + found = true; + break; + } + } + assertThat("Request should have been blocked by [" + expectedBlock + "] instead of " + e.blocks(), found, equalTo(true)); + } + } + } + public static String formatShardStatus(BroadcastOperationResponse response) { String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & " + response.getFailedShards() + " shard failures:"; From a66cf85f37a016d30a473dd29951e05c5857f24d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 11:28:18 -0400 Subject: [PATCH 084/236] refactor SSD/FileStore logic out of NodeEnvironment --- .../org/elasticsearch/env/ESFileStore.java | 172 ++++++++++++++++++ .../org/elasticsearch/env/Environment.java | 42 ++++- .../elasticsearch/env/NodeEnvironment.java | 73 +------- 3 files changed, 210 insertions(+), 77 deletions(-) create mode 100644 src/main/java/org/elasticsearch/env/ESFileStore.java diff --git a/src/main/java/org/elasticsearch/env/ESFileStore.java b/src/main/java/org/elasticsearch/env/ESFileStore.java new file mode 100644 index 00000000000..d8ffcfedc15 --- /dev/null +++ b/src/main/java/org/elasticsearch/env/ESFileStore.java @@ -0,0 +1,172 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.apache.lucene.util.Constants; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.io.PathUtils; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; + +/** + * Implementation of FileStore that supports + * additional features, such as SSD detection and better + * filesystem information for the root filesystem. + * @see Environment#getFileStore(Path) + */ +class ESFileStore extends FileStore { + /** Underlying filestore */ + final FileStore in; + /** Cached result of Lucene's {@code IOUtils.spins} on path. */ + final Boolean spins; + + ESFileStore(FileStore in) { + this.in = in; + Boolean spins; + // Lucene's IOUtils.spins only works on Linux today: + if (Constants.LINUX) { + try { + spins = IOUtils.spins(PathUtils.get(getMountPointLinux(in))); + } catch (Exception e) { + spins = null; + } + } else { + spins = null; + } + this.spins = spins; + } + + // these are hacks that are not guaranteed + private static String getMountPointLinux(FileStore store) { + String desc = store.toString(); + int index = desc.lastIndexOf(" ("); + if (index != -1) { + return desc.substring(0, index); + } else { + return desc; + } + } + + /** Files.getFileStore(Path) useless here! Don't complain, just try it yourself. */ + static FileStore getMatchingFileStore(Path path, FileStore fileStores[]) throws IOException { + FileStore store = Files.getFileStore(path); + + if (Constants.WINDOWS) { + return store; // be defensive, don't even try to do anything fancy. + } + + try { + String mount = getMountPointLinux(store); + FileStore sameMountPoint = null; + for (FileStore fs : fileStores) { + if (mount.equals(getMountPointLinux(fs))) { + if (sameMountPoint == null) { + sameMountPoint = fs; + } else { + // more than one filesystem has the same mount point; something is wrong! + // fall back to crappy one we got from Files.getFileStore + return store; + } + } + } + + if (sameMountPoint != null) { + // ok, we found only one, use it: + return sameMountPoint; + } else { + // fall back to crappy one we got from Files.getFileStore + return store; + } + } catch (Exception e) { + // ignore + } + + // fall back to crappy one we got from Files.getFileStore + return store; + } + + @Override + public String name() { + return in.name(); + } + + @Override + public String type() { + return in.type(); + } + + @Override + public boolean isReadOnly() { + return in.isReadOnly(); + } + + @Override + public long getTotalSpace() throws IOException { + return in.getTotalSpace(); + } + + @Override + public long getUsableSpace() throws IOException { + return in.getUsableSpace(); + } + + @Override + public long getUnallocatedSpace() throws IOException { + return in.getUnallocatedSpace(); + } + + @Override + public boolean supportsFileAttributeView(Class type) { + return in.supportsFileAttributeView(type); + } + + @Override + public boolean supportsFileAttributeView(String name) { + if ("lucene".equals(name)) { + return true; + } else { + return in.supportsFileAttributeView(name); + } + } + + @Override + public V getFileStoreAttributeView(Class type) { + return in.getFileStoreAttributeView(type); + } + + @Override + public Object getAttribute(String attribute) throws IOException { + if ("lucene:spins".equals(attribute)) { + return spins; + } else { + return in.getAttribute(attribute); + } + } + + @Override + public String toString() { + return in.toString(); + } +} diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index 87a356774f1..dab2d22f3f3 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -20,22 +20,16 @@ package org.elasticsearch.env; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; -import com.google.common.base.Charsets; - -import java.io.File; import java.io.IOException; -import java.io.InputStreamReader; import java.net.MalformedURLException; -import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; -import java.nio.file.*; -import java.util.Collections; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; import static org.elasticsearch.common.Strings.cleanPath; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; @@ -63,6 +57,9 @@ public class Environment { private final Path logsFile; + /** List of filestores on the system */ + private final FileStore[] fileStores; + public Environment() { this(EMPTY_SETTINGS); } @@ -112,6 +109,13 @@ public class Environment { } else { logsFile = homeFile.resolve("logs"); } + + // gather information about filesystems + ArrayList allStores = new ArrayList<>(); + for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { + allStores.add(new ESFileStore(store)); + } + fileStores = allStores.toArray(new ESFileStore[allStores.size()]); } /** @@ -177,6 +181,24 @@ public class Environment { return logsFile; } + /** + * Looks up the filestore associated with a Path. + *

    + * This is an enhanced version of {@link Files#getFileStore(Path)}: + *

    + */ + public FileStore getFileStore(Path path) throws IOException { + return ESFileStore.getMatchingFileStore(path, fileStores); + } + public URL resolveConfig(String path) throws FailedToResolveConfigException { String origPath = path; // first, try it as a path on the file system diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index c2c6755ecdc..c9dbf42d3fd 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -22,7 +22,6 @@ package org.elasticsearch.env; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import org.apache.lucene.store.*; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; @@ -64,23 +63,15 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { * not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */ public final Boolean spins; - public NodePath(Path path) throws IOException { + public NodePath(Path path, Environment environment) throws IOException { this.path = path; this.indicesPath = path.resolve(INDICES_FOLDER); - this.fileStore = getFileStore(path); - Boolean spins; - - // Lucene's IOUtils.spins only works on Linux today: - if (Constants.LINUX) { - try { - spins = IOUtils.spins(path); - } catch (Exception e) { - spins = null; - } + this.fileStore = environment.getFileStore(path); + if (fileStore.supportsFileAttributeView("lucene")) { + this.spins = (Boolean) fileStore.getAttribute("lucene:spins"); } else { - spins = null; + this.spins = null; } - this.spins = spins; } /** @@ -157,7 +148,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { Lock tmpLock = luceneDir.makeLock(NODE_LOCK_FILENAME); boolean obtained = tmpLock.obtain(); if (obtained) { - nodePaths[dirIndex] = new NodePath(dir); + nodePaths[dirIndex] = new NodePath(dir, environment); locks[dirIndex] = tmpLock; localNodeId = possibleLockId; } else { @@ -289,58 +280,6 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { return b.toString(); } - - // TODO: move somewhere more "util"? But, this is somewhat hacky code ... not great to publicize it any more: - - // NOTE: poached from Lucene's IOUtils: - - /** Files.getFileStore(Path) useless here! Don't complain, just try it yourself. */ - private static FileStore getFileStore(Path path) throws IOException { - FileStore store = Files.getFileStore(path); - - try { - String mount = getMountPoint(store); - FileStore sameMountPoint = null; - for (FileStore fs : path.getFileSystem().getFileStores()) { - if (mount.equals(getMountPoint(fs))) { - if (sameMountPoint == null) { - sameMountPoint = fs; - } else { - // more than one filesystem has the same mount point; something is wrong! - // fall back to crappy one we got from Files.getFileStore - return store; - } - } - } - - if (sameMountPoint != null) { - // ok, we found only one, use it: - return sameMountPoint; - } else { - // fall back to crappy one we got from Files.getFileStore - return store; - } - } catch (Exception e) { - // ignore - } - - // fall back to crappy one we got from Files.getFileStore - return store; - } - - // NOTE: poached from Lucene's IOUtils: - - // these are hacks that are not guaranteed - private static String getMountPoint(FileStore store) { - String desc = store.toString(); - int index = desc.lastIndexOf(" ("); - if (index != -1) { - return desc.substring(0, index); - } else { - return desc; - } - } - /** * Deletes a shard data directory iff the shards locks were successfully acquired. * From 60721b2a176bf74e6b07a65745628ad4aa44bbcb Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 26 Feb 2015 11:30:02 -0500 Subject: [PATCH 085/236] Snapshot/Restore: remove obsolete expand_wildcards_open and expand_wildcards_close options In #6097 we made snapshot/restore index option consistent with other API. Now we can remove old style options from master. Closes #10743 --- docs/reference/migration/migrate_2_0.asciidoc | 7 +++++++ .../snapshots/create/CreateSnapshotRequest.java | 10 +--------- .../snapshots/restore/RestoreSnapshotRequest.java | 11 +---------- 3 files changed, 9 insertions(+), 19 deletions(-) diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index b20e1960fee..37471d32a0b 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -389,3 +389,10 @@ favour or `bool`. The `execution` option of the `terms` filter is now deprecated and ignored if provided. + +=== Snapshot and Restore + +The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer +supported by the snapshot and restore operations. These parameters have been replaced by +a single `expand_wildcards` parameter. See <> for more. + diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 284e02e3bc1..0bc0e75b4e5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -371,10 +371,6 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest entry : ((Map) source).entrySet()) { String name = entry.getKey(); if (name.equals("indices")) { @@ -385,10 +381,6 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest) source, IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed))); + indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); return this; } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 95582ca65bd..27dd0672ca5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -504,11 +504,6 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest entry : ((Map) source).entrySet()) { String name = entry.getKey(); if (name.equals("indices")) { @@ -519,10 +514,6 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) source, IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed))); + indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); return this; } From 0865d220f4d55d9dca858710d2f8240b0c87e7d2 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 15:04:58 -0400 Subject: [PATCH 086/236] Remove crazy permissions for filestores, ssds, now that this logic has been refactored. Log a warning when security is disabled. --- .../elasticsearch/bootstrap/Bootstrap.java | 2 +- .../org/elasticsearch/bootstrap/Security.java | 35 ++----------------- 2 files changed, 4 insertions(+), 33 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 72c13efa0f4..f69a9c35409 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -98,7 +98,7 @@ public class Bootstrap { Security.configure(environment); logger.info("security enabled"); } else { - logger.info("security disabled"); + logger.warn("security disabled"); } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 35de7edbe9f..287cfa060e1 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,11 +19,9 @@ package org.elasticsearch.bootstrap; -import org.apache.lucene.util.Constants; -import org.elasticsearch.common.io.PathUtils; +import org.apache.lucene.util.StringHelper; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; @@ -32,7 +30,6 @@ import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; -import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; import java.util.HashSet; @@ -51,6 +48,8 @@ class Security { * Can only happen once! */ static void configure(Environment environment) throws IOException { + // init lucene random seed. it will use /dev/urandom where available. + StringHelper.randomId(); Path newConfig = processTemplate(environment.configFile().resolve("security.policy"), environment); System.setProperty("java.security.policy", newConfig.toString()); System.setSecurityManager(new SecurityManager()); @@ -102,34 +101,6 @@ class Security { addPath(writer, encode(path), "read,readlink,write,delete"); addRecursivePath(writer, encode(path), "read,readlink,write,delete"); } - - // on *nix, try to grant read perms to file stores / SSD detection - if (!Constants.WINDOWS) { - Set stores = new HashSet<>(); - for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { - try { - String mount = NodeEnvironment.getMountPoint(store); - // mount point for fstat() calls against it - if (mount.startsWith("/")) { - stores.add(mount); - } - // block device: add it for SSD detection - if (store.name().startsWith("/")) { - stores.add(store.name()); - } - } catch (Throwable t) { - // these are hacks that are not guaranteed - } - } - for (String store : stores) { - addPath(writer, encode(store), "read,readlink"); - } - addRecursivePath(writer, "/sys/block", "read,readlink"); - addRecursivePath(writer, "/sys/devices", "read,readlink"); - addRecursivePath(writer, "/dev", "read,readlink"); - addRecursivePath(writer, "/devices", "read,readlink"); - } - writer.write("};"); writer.write(System.lineSeparator()); } From 4a94e1f14bc272d1d3584fc387deadea48331e7d Mon Sep 17 00:00:00 2001 From: Benoit Delbosc Date: Fri, 17 Apr 2015 14:59:24 +0200 Subject: [PATCH 087/236] Docs: Warning about the conflict with the Standard Tokenizer The examples given requires a specific Tokenizer to work. Closes: 10645 --- .../word-delimiter-tokenfilter.asciidoc | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc index 9ce81e1ac9f..edb3f3b5590 100644 --- a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc @@ -16,27 +16,27 @@ ignored: "//hello---there, 'dude'" -> "hello", "there", "dude" Parameters include: -`generate_word_parts`:: +`generate_word_parts`:: If `true` causes parts of words to be generated: "PowerShot" => "Power" "Shot". Defaults to `true`. -`generate_number_parts`:: +`generate_number_parts`:: If `true` causes number subwords to be generated: "500-42" => "500" "42". Defaults to `true`. -`catenate_words`:: +`catenate_words`:: If `true` causes maximum runs of word parts to be catenated: "wi-fi" => "wifi". Defaults to `false`. -`catenate_numbers`:: +`catenate_numbers`:: If `true` causes maximum runs of number parts to be catenated: "500-42" => "50042". Defaults to `false`. -`catenate_all`:: +`catenate_all`:: If `true` causes all subword parts to be catenated: "wi-fi-4000" => "wifi4000". Defaults to `false`. -`split_on_case_change`:: +`split_on_case_change`:: If `true` causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards). Defaults to `true`. @@ -44,29 +44,29 @@ Parameters include: If `true` includes original words in subwords: "500-42" => "500-42" "500" "42". Defaults to `false`. -`split_on_numerics`:: +`split_on_numerics`:: If `true` causes "j2se" to be three tokens; "j" "2" "se". Defaults to `true`. -`stem_english_possessive`:: +`stem_english_possessive`:: If `true` causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil". Defaults to `true`. Advance settings include: -`protected_words`:: +`protected_words`:: A list of protected words from being delimiter. Either an array, or also can set `protected_words_path` which resolved to a file configured with protected words (one on each line). Automatically resolves to `config/` based location if exists. -`type_table`:: +`type_table`:: A custom type mapping table, for example (when configured using `type_table_path`): [source,js] -------------------------------------------------- - # Map the $, %, '.', and ',' characters to DIGIT + # Map the $, %, '.', and ',' characters to DIGIT # This might be useful for financial data. $ => DIGIT % => DIGIT @@ -78,3 +78,9 @@ Advance settings include: # see http://en.wikipedia.org/wiki/Zero-width_joiner \\u200D => ALPHANUM -------------------------------------------------- + +NOTE: Using a tokenizer like the `standard` tokenizer may interfere with +the `catenate_*` and `preserve_original` parameters, as the original +string may already have lost punctuation during tokenization. Instead, +you may want to use the `whitespace` tokenizer. + From e2861bd7be0e9534d1145fab138edb577ee1723d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 15:32:00 -0400 Subject: [PATCH 088/236] ensure we only pull system filestores once time --- .../org/elasticsearch/env/Environment.java | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index dab2d22f3f3..b19407cf262 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -58,7 +58,20 @@ public class Environment { private final Path logsFile; /** List of filestores on the system */ - private final FileStore[] fileStores; + private static final FileStore[] fileStores; + + /** + * We have to do this in clinit instead of init, because ES code is pretty messy, + * and makes these environments, throws them away, makes them again, etc. + */ + static { + // gather information about filesystems + ArrayList allStores = new ArrayList<>(); + for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { + allStores.add(new ESFileStore(store)); + } + fileStores = allStores.toArray(new ESFileStore[allStores.size()]); + } public Environment() { this(EMPTY_SETTINGS); @@ -109,13 +122,6 @@ public class Environment { } else { logsFile = homeFile.resolve("logs"); } - - // gather information about filesystems - ArrayList allStores = new ArrayList<>(); - for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { - allStores.add(new ESFileStore(store)); - } - fileStores = allStores.toArray(new ESFileStore[allStores.size()]); } /** From 573e81d2ea914f0aeb4c97128de8961e8bc68816 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 16:01:16 -0400 Subject: [PATCH 089/236] Ensure paths exist (or more permissions are needed later) --- src/main/java/org/elasticsearch/bootstrap/Security.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 287cfa060e1..2242b970259 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -76,6 +76,7 @@ class Security { paths.add(environment.workFile()); paths.add(environment.workWithClusterFile()); for (Path path : environment.dataFiles()) { + System.out.println("datapath=" + path); paths.add(path); } for (Path path : environment.dataWithClusterFiles()) { @@ -97,6 +98,8 @@ class Security { writer.write("grant {"); writer.write(System.lineSeparator()); for (Path path : paths) { + // data paths actually may not exist yet. + Files.createDirectories(path); // add each path twice: once for itself, again for files underneath it addPath(writer, encode(path), "read,readlink,write,delete"); addRecursivePath(writer, encode(path), "read,readlink,write,delete"); From 5d2153fe9be8ca4b8454a42666ed9821898ce55d Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 16:03:19 -0400 Subject: [PATCH 090/236] remove stray sop --- src/main/java/org/elasticsearch/bootstrap/Security.java | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 2242b970259..ac2bf6b0913 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -76,7 +76,6 @@ class Security { paths.add(environment.workFile()); paths.add(environment.workWithClusterFile()); for (Path path : environment.dataFiles()) { - System.out.println("datapath=" + path); paths.add(path); } for (Path path : environment.dataWithClusterFiles()) { From 114d10e5a96b071121ac7862e176b1e15112aadb Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 23 Apr 2015 17:51:17 -0400 Subject: [PATCH 091/236] [TEST] Restructure MovAvgTests to be more generic, robust --- .../reducers/ReducerTestHelpers.java | 131 +++ .../reducers/moving/avg/MovAvgTests.java | 1045 ++++++++--------- 2 files changed, 646 insertions(+), 530 deletions(-) create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java new file mode 100644 index 00000000000..8496b93e7ea --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + + +import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumBuilder; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.util.ArrayList; + +/** + * Provides helper methods and classes for use in Reducer tests, such as creating mock histograms or computing + * simple metrics + */ +public class ReducerTestHelpers extends ElasticsearchTestCase { + + /** + * Generates a mock histogram to use for testing. Each MockBucket holds a doc count, key and document values + * which can later be used to compute metrics and compare against the real aggregation results. Gappiness can be + * controlled via parameters + * + * @param interval Interval between bucket keys + * @param size Size of mock histogram to generate (in buckets) + * @param gapProbability Probability of generating an empty bucket. 0.0-1.0 inclusive + * @param runProbability Probability of extending a gap once one has been created. 0.0-1.0 inclusive + * @return + */ + public static ArrayList generateHistogram(int interval, int size, double gapProbability, double runProbability) { + ArrayList values = new ArrayList<>(size); + + boolean lastWasGap = false; + + for (int i = 0; i < size; i++) { + MockBucket bucket = new MockBucket(); + if (randomDouble() < gapProbability) { + // start a gap + bucket.count = 0; + bucket.docValues = new double[0]; + + lastWasGap = true; + + } else if (lastWasGap && randomDouble() < runProbability) { + // add to the existing gap + bucket.count = 0; + bucket.docValues = new double[0]; + + lastWasGap = true; + } else { + bucket.count = randomIntBetween(1, 50); + bucket.docValues = new double[bucket.count]; + for (int j = 0; j < bucket.count; j++) { + bucket.docValues[j] = randomDouble() * randomIntBetween(-20,20); + } + lastWasGap = false; + } + + bucket.key = i * interval; + values.add(bucket); + } + + return values; + } + + /** + * Simple mock bucket container + */ + public static class MockBucket { + public int count; + public double[] docValues; + public long key; + } + + /** + * Computes a simple agg metric (min, sum, etc) from the provided values + * + * @param values Array of values to compute metric for + * @param metric A metric builder which defines what kind of metric should be returned for the values + * @return + */ + public static double calculateMetric(double[] values, ValuesSourceMetricsAggregationBuilder metric) { + + if (metric instanceof MinBuilder) { + double accumulator = Double.MAX_VALUE; + for (double value : values) { + accumulator = Math.min(accumulator, value); + } + return accumulator; + } else if (metric instanceof MaxBuilder) { + double accumulator = Double.MIN_VALUE; + for (double value : values) { + accumulator = Math.max(accumulator, value); + } + return accumulator; + } else if (metric instanceof SumBuilder) { + double accumulator = 0; + for (double value : values) { + accumulator += value; + } + return accumulator; + } else if (metric instanceof AvgBuilder) { + double accumulator = 0; + for (double value : values) { + accumulator += value; + } + return accumulator / values.length; + } + + return 0.0; + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index bab47aadb80..c92f0b1cc2e 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.ReducerTestHelpers; import org.elasticsearch.search.aggregations.reducers.SimpleValue; import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; @@ -40,10 +41,10 @@ import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelB import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; import org.junit.Test; -import java.util.ArrayList; -import java.util.List; +import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; @@ -59,32 +60,56 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest @AwaitsFix(bugUrl = "Gap test logic seems to fail a lot of the time on CI build") public class MovAvgTests extends ElasticsearchIntegrationTest { - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - private static final String SINGLE_VALUED_VALUE_FIELD_NAME = "v_value"; + private static final String INTERVAL_FIELD = "l_value"; + private static final String VALUE_FIELD = "v_value"; private static final String GAP_FIELD = "g_value"; static int interval; - static int numValueBuckets; - static int numFilledValueBuckets; + static int numBuckets; static int windowSize; + static double alpha; + static double beta; static BucketHelpers.GapPolicy gapPolicy; + static ValuesSourceMetricsAggregationBuilder metric; + static List mockHisto; - static long[] docCounts; - static long[] docValues; - static Double[] simpleDocCounts; - static Double[] linearDocCounts; - static Double[] singleDocCounts; - static Double[] doubleDocCounts; + static Map> testValues; + + + enum MovAvgType { + SIMPLE ("simple"), LINEAR("linear"), SINGLE("single"), DOUBLE("double"); + + private final String name; + + MovAvgType(String s) { + name = s; + } + + public String toString(){ + return name; + } + } + + enum MetricTarget { + VALUE ("value"), COUNT("count"); + + private final String name; + + MetricTarget(String s) { + name = s; + } + + public String toString(){ + return name; + } + } - static Double[] simpleDocValues; - static Double[] linearDocValues; - static Double[] singleDocValues; - static Double[] doubleDocValues; @Override public void setupSuiteScopeCluster() throws Exception { @@ -92,297 +117,191 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { createIndex("idx_unmapped"); List builders = new ArrayList<>(); + interval = 5; - numValueBuckets = randomIntBetween(6, 80); - numFilledValueBuckets = numValueBuckets; - windowSize = randomIntBetween(3,10); + numBuckets = randomIntBetween(6, 80); + windowSize = randomIntBetween(3, 10); + alpha = randomDouble(); + beta = randomDouble(); + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; - - - docCounts = new long[numValueBuckets]; - docValues = new long[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - docCounts[i] = randomIntBetween(0, 20); - docValues[i] = randomIntBetween(1,20); //this will be used as a constant for all values within a bucket - } + metric = randomMetric("the_metric", VALUE_FIELD); + mockHisto = ReducerTestHelpers.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); - // Used for the gap tests - builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() - .field("gap_test", 0) - .field(GAP_FIELD, 1).endObject())); - builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() - .field("gap_test", (numValueBuckets - 1) * interval) - .field(GAP_FIELD, 1).endObject())); + testValues = new HashMap<>(8); - this.setupSimple(); - this.setupLinear(); - this.setupSingle(); - this.setupDouble(); - - for (int i = 0; i < numValueBuckets; i++) { - for (int docs = 0; docs < docCounts[i]; docs++) { - builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, i * interval) - .field(SINGLE_VALUED_VALUE_FIELD_NAME, docValues[i]).endObject())); + for (MovAvgType type : MovAvgType.values()) { + for (MetricTarget target : MetricTarget.values()) { + setupExpected(type, target); } } + for (ReducerTestHelpers.MockBucket mockBucket : mockHisto) { + for (double value : mockBucket.docValues) { + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, mockBucket.key) + .field(VALUE_FIELD, value).endObject())); + } + } + + // Used for specially crafted gap tests + builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, 0) + .field(GAP_FIELD, 1).endObject())); + + builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, 49) + .field(GAP_FIELD, 1).endObject())); + indexRandom(true, builders); ensureSearchable(); } - private void setupSimple() { - simpleDocCounts = new Double[numValueBuckets]; + /** + * Calculates the moving averages for a specific (model, target) tuple based on the previously generated mock histogram. + * Computed values are stored in the testValues map. + * + * @param type The moving average model to use + * @param target The document field "target", e.g. _count or a field value + */ + private void setupExpected(MovAvgType type, MetricTarget target) { + ArrayList values = new ArrayList<>(numBuckets); EvictingQueue window = EvictingQueue.create(windowSize); - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } - window.offer((double)docCounts[i]); - double movAvg = 0; - for (double value : window) { - movAvg += value; - } - movAvg /= window.size(); + for (ReducerTestHelpers.MockBucket mockBucket : mockHisto) { + double metricValue; + double[] docValues = mockBucket.docValues; - simpleDocCounts[i] = movAvg; - } - - window.clear(); - simpleDocValues = new Double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0) { + // Gaps only apply to metric values, not doc _counts + if (mockBucket.count == 0 && target.equals(MetricTarget.VALUE)) { // If there was a gap in doc counts and we are ignoring, just skip this bucket if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + values.add(null); continue; } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { // otherwise insert a zero instead of the true value - window.offer(0.0); + metricValue = 0.0; } else { - window.offer((double) docValues[i]); + metricValue = ReducerTestHelpers.calculateMetric(docValues, metric); } + } else { - //if there are docs in this bucket, insert the regular value - window.offer((double) docValues[i]); + // If this isn't a gap, or is a _count, just insert the value + metricValue = target.equals(MetricTarget.VALUE) ? ReducerTestHelpers.calculateMetric(docValues, metric) : mockBucket.count; } - double movAvg = 0; - for (double value : window) { - movAvg += value; + window.offer(metricValue); + switch (type) { + case SIMPLE: + values.add(simple(window)); + break; + case LINEAR: + values.add(linear(window)); + break; + case SINGLE: + values.add(singleExp(window)); + break; + case DOUBLE: + values.add(doubleExp(window)); + break; } - movAvg /= window.size(); - - simpleDocValues[i] = movAvg; } - + testValues.put(type.toString() + "_" + target.toString(), values); } - private void setupLinear() { - EvictingQueue window = EvictingQueue.create(windowSize); - linearDocCounts = new Double[numValueBuckets]; - window.clear(); - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } - window.offer((double)docCounts[i]); - - double avg = 0; - long totalWeight = 1; - long current = 1; - - for (double value : window) { - avg += value * current; - totalWeight += current; - current += 1; - } - linearDocCounts[i] = avg / totalWeight; - } - - window.clear(); - linearDocValues = new Double[numValueBuckets]; - - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0) { - // If there was a gap in doc counts and we are ignoring, just skip this bucket - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { - // otherwise insert a zero instead of the true value - window.offer(0.0); - } else { - window.offer((double) docValues[i]); - } - } else { - //if there are docs in this bucket, insert the regular value - window.offer((double) docValues[i]); - } - - double avg = 0; - long totalWeight = 1; - long current = 1; - - for (double value : window) { - avg += value * current; - totalWeight += current; - current += 1; - } - linearDocValues[i] = avg / totalWeight; + /** + * Simple, unweighted moving average + * + * @param window Window of values to compute movavg for + * @return + */ + private double simple(Collection window) { + double movAvg = 0; + for (double value : window) { + movAvg += value; } + movAvg /= window.size(); + return movAvg; } - private void setupSingle() { - EvictingQueue window = EvictingQueue.create(windowSize); - singleDocCounts = new Double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } - window.offer((double)docCounts[i]); + /** + * Linearly weighted moving avg + * + * @param window Window of values to compute movavg for + * @return + */ + private double linear(Collection window) { + double avg = 0; + long totalWeight = 1; + long current = 1; - double avg = 0; - double alpha = 0.5; - boolean first = true; - - for (double value : window) { - if (first) { - avg = value; - first = false; - } else { - avg = (value * alpha) + (avg * (1 - alpha)); - } - } - singleDocCounts[i] = avg ; + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; } - - singleDocValues = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0) { - // If there was a gap in doc counts and we are ignoring, just skip this bucket - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { - // otherwise insert a zero instead of the true value - window.offer(0.0); - } else { - window.offer((double) docValues[i]); - } - } else { - //if there are docs in this bucket, insert the regular value - window.offer((double) docValues[i]); - } - - double avg = 0; - double alpha = 0.5; - boolean first = true; - - for (double value : window) { - if (first) { - avg = value; - first = false; - } else { - avg = (value * alpha) + (avg * (1 - alpha)); - } - } - singleDocValues[i] = avg ; - } - + return avg / totalWeight; } - private void setupDouble() { - EvictingQueue window = EvictingQueue.create(windowSize); - doubleDocCounts = new Double[numValueBuckets]; + /** + * Single exponential moving avg + * + * @param window Window of values to compute movavg for + * @return + */ + private double singleExp(Collection window) { + double avg = 0; + boolean first = true; - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0 && gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } - window.offer((double)docCounts[i]); - - double s = 0; - double last_s = 0; - - // Trend value - double b = 0; - double last_b = 0; - - double alpha = 0.5; - double beta = 0.5; - int counter = 0; - - double last; - for (double value : window) { - last = value; - if (counter == 1) { - s = value; - b = value - last; - } else { - s = alpha * value + (1.0d - alpha) * (last_s + last_b); - b = beta * (s - last_s) + (1 - beta) * last_b; - } - - counter += 1; - last_s = s; - last_b = b; - } - - doubleDocCounts[i] = s + (0 * b) ; - } - - doubleDocValues = new Double[numValueBuckets]; - window.clear(); - - for (int i = 0; i < numValueBuckets; i++) { - if (docCounts[i] == 0) { - // If there was a gap in doc counts and we are ignoring, just skip this bucket - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - continue; - } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { - // otherwise insert a zero instead of the true value - window.offer(0.0); - } else { - window.offer((double) docValues[i]); - } + for (double value : window) { + if (first) { + avg = value; + first = false; } else { - //if there are docs in this bucket, insert the regular value - window.offer((double) docValues[i]); + avg = (value * alpha) + (avg * (1 - alpha)); } - - double s = 0; - double last_s = 0; - - // Trend value - double b = 0; - double last_b = 0; - - double alpha = 0.5; - double beta = 0.5; - int counter = 0; - - double last; - for (double value : window) { - last = value; - if (counter == 1) { - s = value; - b = value - last; - } else { - s = alpha * value + (1.0d - alpha) * (last_s + last_b); - b = beta * (s - last_s) + (1 - beta) * last_b; - } - - counter += 1; - last_s = s; - last_b = b; - } - - doubleDocValues[i] = s + (0 * b) ; } + return avg; } + /** + * Double exponential moving avg + * @param window Window of values to compute movavg for + * @return + */ + private double doubleExp(Collection window) { + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + return s + (0 * b) ; + } + + + + /** * test simple moving average on single value field */ @@ -390,11 +309,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void simpleSingleValuedField() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new SimpleModel.SimpleModelBuilder()) @@ -413,33 +332,40 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(simpleDocCounts[i])); + List expectedCounts = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.VALUE.toString()); - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(simpleDocValues[i])); + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); } } - /** - * test linear moving average on single value field - */ @Test public void linearSingleValuedField() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new LinearModel.LinearModelBuilder()) @@ -458,41 +384,48 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(linearDocCounts[i])); + List expectedCounts = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.VALUE.toString()); - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(linearDocValues[i])); + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); } } - /** - * test single exponential moving average on single value field - */ @Test - public void singleExpSingleValuedField() { + public void singleSingleValuedField() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) .gapPolicy(gapPolicy) .setBucketsPaths("_count")) .subAggregation(movingAvg("movavg_values") .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(0.5)) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) ).execute().actionGet(); @@ -503,41 +436,48 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(singleDocCounts[i])); + List expectedCounts = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.VALUE.toString()); - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(singleDocValues[i])); + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); } } - /** - * test double exponential moving average on single value field - */ @Test - public void doubleExpSingleValuedField() { + public void doubleSingleValuedField() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) .gapPolicy(gapPolicy) .setBucketsPaths("_count")) .subAggregation(movingAvg("movavg_values") .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(0.5).beta(0.5)) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) ).execute().actionGet(); @@ -548,18 +488,28 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, docCounts[i]); - SimpleValue docCountMovAvg = bucket.getAggregations().get("movavg_counts"); - assertThat(docCountMovAvg, notNullValue()); - assertThat(docCountMovAvg.value(), equalTo(doubleDocCounts[i])); + List expectedCounts = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.VALUE.toString()); - SimpleValue valuesMovAvg = bucket.getAggregations().get("movavg_values"); - assertThat(valuesMovAvg, notNullValue()); - assertThat(valuesMovAvg.value(), equalTo(doubleDocValues[i])); + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); } } @@ -567,11 +517,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testSizeZeroWindow() { try { client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(0) .modelBuilder(new SimpleModel.SimpleModelBuilder()) @@ -581,9 +531,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { fail("MovingAvg should not accept a window that is zero"); } catch (SearchPhaseExecutionException exception) { - //Throwable rootCause = exception.unwrapCause(); - //assertThat(rootCause, instanceOf(SearchParseException.class)); - //assertThat("[window] value must be a positive, non-zero integer. Value supplied was [0] in [movingAvg].", equalTo(exception.getMessage())); + // All good } } @@ -591,10 +539,10 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testBadParent() { try { client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - range("histo").field(SINGLE_VALUED_FIELD_NAME).addRange(0, 10) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + range("histo").field(INTERVAL_FIELD).addRange(0, 10) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(0) .modelBuilder(new SimpleModel.SimpleModelBuilder()) @@ -604,7 +552,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { fail("MovingAvg should not accept non-histogram as parent"); } catch (SearchPhaseExecutionException exception) { - // All good + // All good } } @@ -612,11 +560,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testNegativeWindow() { try { client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(-10) .modelBuilder(new SimpleModel.SimpleModelBuilder()) @@ -636,11 +584,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testNoBucketsInHistogram() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( histogram("histo").field("test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(new SimpleModel.SimpleModelBuilder()) @@ -657,15 +605,41 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(buckets.size(), equalTo(0)); } + @Test + public void testNoBucketsInHistogramWithPredict() { + int numPredictions = randomIntBetween(1,10); + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field("test").interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric") + .predict(numPredictions)) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + } + @Test public void testZeroPrediction() { try { client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) @@ -676,7 +650,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { fail("MovingAvg should not accept a prediction size that is zero"); } catch (SearchPhaseExecutionException exception) { - // All Good + // All Good } } @@ -684,11 +658,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testNegativePrediction() { try { client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", SINGLE_VALUED_VALUE_FIELD_NAME)) + histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") .window(windowSize) .modelBuilder(randomModelBuilder()) @@ -705,7 +679,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { /** * This test uses the "gap" dataset, which is simply a doc at the beginning and end of - * the SINGLE_VALUED_FIELD_NAME range. These docs have a value of 1 in the `g_field`. + * the INTERVAL_FIELD range. These docs have a value of 1 in GAP_FIELD. * This test verifies that large gaps don't break things, and that the mov avg roughly works * in the correct manner (checks direction of change, but not actual values) */ @@ -713,12 +687,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { public void testGiantGap() { SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + .subAggregation(min("the_metric").field(GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) @@ -731,26 +704,38 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value(); assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); double currentValue; - for (int i = 1; i < numValueBuckets - 2; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 1; i < 49; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - // Since there are only two values in this test, at the beginning and end, the moving average should - // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing - // without actually verifying the computed values. Should work for all types of moving avgs and - // gap policies - assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); - lastValue = currentValue; + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } } - // The last bucket has a real value, so this should always increase the moving avg - currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movavg_counts"))).value(); - assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + + SimpleValue current = buckets.get(49).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + currentValue = current.value(); + + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // If we insert zeros, this should always increase the moving avg since the last bucket has a real value + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + } } /** @@ -758,21 +743,19 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { */ @Test public void testGiantGapWithPredict() { - - MovAvgModelBuilder model = randomModelBuilder(); int numPredictions = randomIntBetween(1, 10); + SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) - .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + .subAggregation(min("the_metric").field(GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") .window(windowSize) - .modelBuilder(model) + .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .predict(numPredictions) - .setBucketsPaths("the_metric")) + .setBucketsPaths("the_metric") + .predict(numPredictions)) ).execute().actionGet(); assertSearchResponse(response); @@ -781,32 +764,43 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value(); assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); double currentValue; - for (int i = 1; i < numValueBuckets - 2; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 1; i < 49; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - // Since there are only two values in this test, at the beginning and end, the moving average should - // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing - // without actually verifying the computed values. Should work for all types of moving avgs and - // gap policies - assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); - lastValue = currentValue; + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } } - // The last bucket has a real value, so this should always increase the moving avg - currentValue = ((SimpleValue)(buckets.get(numValueBuckets - 1).getAggregations().get("movavg_counts"))).value(); - assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + SimpleValue current = buckets.get(49).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + currentValue = current.value(); + + if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + // If we ignore missing, there will only be two values in this histo, so movavg will stay the same + assertThat(Double.compare(lastValue, currentValue), equalTo(0)); + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // If we insert zeros, this should always increase the moving avg since the last bucket has a real value + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + } // Now check predictions - for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + for (int i = 50; i < 50 + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are // not null, and that we don't have the_metric anymore - assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } @@ -818,22 +812,19 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { */ @Test public void testLeftGap() { - SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + .subAggregation(movingAvg("movavg_values") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) - ) - - ).execute().actionGet(); + )) + .execute().actionGet(); assertSearchResponse(response); @@ -842,44 +833,42 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(filtered.getName(), equalTo("filtered")); InternalHistogram histo = filtered.getAggregations().get("histo"); - assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); + + double lastValue = 0; double currentValue; - double lastValue = 0.0; - for (int i = 0; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 0; i < 50; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); - lastValue = currentValue; + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } } - } @Test - public void testLeftGapWithPrediction() { - - int numPredictions = randomIntBetween(0, 10); - + public void testLeftGapWithPredict() { + int numPredictions = randomIntBetween(1, 10); SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - filter("filtered").filter(new RangeFilterBuilder("gap_test").from(1)).subAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + .subAggregation(movingAvg("movavg_values") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .predict(numPredictions) - .setBucketsPaths("the_metric")) - ) - - ).execute().actionGet(); + .setBucketsPaths("the_metric") + .predict(numPredictions)) + )) + .execute().actionGet(); assertSearchResponse(response); @@ -888,26 +877,29 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(filtered.getName(), equalTo("filtered")); InternalHistogram histo = filtered.getAggregations().get("histo"); - assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); + + double lastValue = 0; double currentValue; - double lastValue = 0.0; - for (int i = 0; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 0; i < 50; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); - lastValue = currentValue; + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } } // Now check predictions - for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + for (int i = 50; i < 50 + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are // not null, and that we don't have the_metric anymore - assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } @@ -919,22 +911,19 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { */ @Test public void testRightGap() { - SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + .subAggregation(movingAvg("movavg_values") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) - ) - - ).execute().actionGet(); + )) + .execute().actionGet(); assertSearchResponse(response); @@ -943,44 +932,46 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(filtered.getName(), equalTo("filtered")); InternalHistogram histo = filtered.getAggregations().get("histo"); - assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); + + + SimpleValue current = buckets.get(0).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + + double lastValue = current.value(); double currentValue; - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); - for (int i = 1; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 1; i < 50; i++) { + current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); - lastValue = currentValue; + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } } - } @Test - public void testRightGapWithPredictions() { - - int numPredictions = randomIntBetween(0, 10); - + public void testRightGapWithPredict() { + int numPredictions = randomIntBetween(1, 10); SearchResponse response = client() - .prepareSearch("idx") + .prepareSearch("idx").setTypes("gap_type") .addAggregation( - filter("filtered").filter(new RangeFilterBuilder("gap_test").to((interval * (numValueBuckets - 1) - interval))).subAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .extendedBounds(0L, (long) (interval * (numValueBuckets - 1))) + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") + .subAggregation(movingAvg("movavg_values") .window(windowSize) .modelBuilder(randomModelBuilder()) .gapPolicy(gapPolicy) - .predict(numPredictions) - .setBucketsPaths("the_metric")) - ) - - ).execute().actionGet(); + .setBucketsPaths("the_metric") + .predict(numPredictions)) + )) + .execute().actionGet(); assertSearchResponse(response); @@ -989,75 +980,69 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(filtered.getName(), equalTo("filtered")); InternalHistogram histo = filtered.getAggregations().get("histo"); - assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets + numPredictions)); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); + + + SimpleValue current = buckets.get(0).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + + double lastValue = current.value(); double currentValue; - double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_counts"))).value(); - for (int i = 1; i < numValueBuckets - 1; i++) { - currentValue = ((SimpleValue)(buckets.get(i).getAggregations().get("movavg_counts"))).value(); + for (int i = 1; i < 50; i++) { + current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); - assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); - lastValue = currentValue; + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } } // Now check predictions - for (int i = numValueBuckets; i < numValueBuckets + numPredictions; i++) { + for (int i = 50; i < 50 + numPredictions; i++) { // Unclear at this point which direction the predictions will go, just verify they are // not null, and that we don't have the_metric anymore - assertThat((buckets.get(i).getAggregations().get("movavg_counts")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); } } - @Test - public void testPredictWithNoBuckets() { - int numPredictions = randomIntBetween(0, 10); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - // Filter so we are above all values - filter("filtered").filter(new RangeFilterBuilder("gap_test").from((interval * (numValueBuckets - 1) + interval))).subAggregation( - histogram("histo").field("gap_test").interval(interval).minDocCount(0) - .subAggregation(randomMetric("the_metric", GAP_FIELD)) - .subAggregation(movingAvg("movavg_counts") - .window(windowSize) - .modelBuilder(randomModelBuilder()) - .gapPolicy(gapPolicy) - .predict(numPredictions) - .setBucketsPaths("the_metric")) - ) - - ).execute().actionGet(); - - assertSearchResponse(response); - - InternalFilter filtered = response.getAggregations().get("filtered"); - assertThat(filtered, notNullValue()); - assertThat(filtered.getName(), equalTo("filtered")); - - InternalHistogram histo = filtered.getAggregations().get("histo"); - - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(0)); + private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { + if (!expectedBucketIter.hasNext()) { + fail("`expectedBucketIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedCountsIter.hasNext()) { + fail("`expectedCountsIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedValuesIter.hasNext()) { + fail("`expectedValuesIter` iterator ended before `actual` iterator, size mismatch"); + } } - - private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, - long expectedDocCount) { - if (expectedDocCount == -1) { - expectedDocCount = 0; + private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, Double expectedValue) { + // This is a gap bucket + SimpleValue countMovAvg = actual.getAggregations().get("movavg_counts"); + if (expectedCount == null) { + assertThat("[_count] movavg is not null", countMovAvg, nullValue()); + } else { + assertThat("[_count] movavg is null", countMovAvg, notNullValue()); + assertThat("[_count] movavg does not match expected ["+countMovAvg.value()+" vs "+expectedCount+"]", + Math.abs(countMovAvg.value() - expectedCount) <= 0.000001, equalTo(true)); + } + + // This is a gap bucket + SimpleValue valuesMovAvg = actual.getAggregations().get("movavg_values"); + if (expectedValue == null) { + assertThat("[value] movavg is not null", valuesMovAvg, Matchers.nullValue()); + } else { + assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); + assertThat("[value] movavg does not match expected ["+valuesMovAvg.value()+" vs "+expectedValue+"]", Math.abs(valuesMovAvg.value() - expectedValue) <= 0.000001, equalTo(true)); } - assertThat(msg, bucket, notNullValue()); - assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); - assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); } private MovAvgModelBuilder randomModelBuilder() { @@ -1069,9 +1054,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { case 1: return new LinearModel.LinearModelBuilder(); case 2: - return new SingleExpModel.SingleExpModelBuilder().alpha(randomDouble()); + return new SingleExpModel.SingleExpModelBuilder().alpha(alpha); case 3: - return new DoubleExpModel.DoubleExpModelBuilder().alpha(randomDouble()).beta(randomDouble()); + return new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta); default: return new SimpleModel.SimpleModelBuilder(); } From a218d59ce1ebb53c93ffb049d1d921fb0609711e Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 23 Apr 2015 17:51:43 -0400 Subject: [PATCH 092/236] Fix bug where MovAvgReducer would allow NaN's to "corrupt" the moving avg --- .../search/aggregations/reducers/movavg/MovAvgReducer.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java index 4bd2ff4c50a..8b9f73ebf55 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java @@ -115,10 +115,9 @@ public class MovAvgReducer extends Reducer { Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); currentKey = bucket.getKey(); - if (thisBucketValue != null) { + if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) { values.offer(thisBucketValue); - // TODO handle "edge policy" double movavg = model.next(values); List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); From 8435d9226f41c97658ca5c16cdca71fc82474dd5 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Thu, 23 Apr 2015 19:13:27 -0400 Subject: [PATCH 093/236] Fix bug in GiantGapWithPrediction, due to "slow start" of double exp --- .../aggregations/reducers/moving/avg/MovAvgTests.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index c92f0b1cc2e..eaedfe4e597 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -20,9 +20,9 @@ package org.elasticsearch.search.aggregations.reducers.moving.avg; +import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.EvictingQueue; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -58,12 +58,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest -@AwaitsFix(bugUrl = "Gap test logic seems to fail a lot of the time on CI build") public class MovAvgTests extends ElasticsearchIntegrationTest { private static final String INTERVAL_FIELD = "l_value"; @@ -789,8 +787,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { currentValue = current.value(); if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { - // If we ignore missing, there will only be two values in this histo, so movavg will stay the same - assertThat(Double.compare(lastValue, currentValue), equalTo(0)); + // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { // If we insert zeros, this should always increase the moving avg since the last bucket has a real value assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); From b2850bff47a3fffe005c8fd05adcacc973ffcbaa Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 21:18:56 -0400 Subject: [PATCH 094/236] remove logging statements for another bikeshed --- src/main/java/org/elasticsearch/bootstrap/Bootstrap.java | 4 ---- src/main/java/org/elasticsearch/bootstrap/Security.java | 7 ++----- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index f69a9c35409..55d3712af71 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -93,12 +93,8 @@ public class Bootstrap { } private void setupSecurity(Settings settings, Environment environment) throws Exception { - ESLogger logger = Loggers.getLogger(Bootstrap.class); if (settings.getAsBoolean("security.enabled", true)) { Security.configure(environment); - logger.info("security enabled"); - } else { - logger.warn("security disabled"); } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index ac2bf6b0913..0a2bfec614e 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; @@ -53,11 +54,7 @@ class Security { Path newConfig = processTemplate(environment.configFile().resolve("security.policy"), environment); System.setProperty("java.security.policy", newConfig.toString()); System.setSecurityManager(new SecurityManager()); - try { - Files.delete(newConfig); - } catch (Exception e) { - Loggers.getLogger(Security.class).warn("unable to remove temporary file: " + newConfig, e); - } + IOUtils.deleteFilesIgnoringExceptions(newConfig); // TODO: maybe log something if it fails? } // package-private for testing From 500c956b45f25a8bc6a6d7ff640fa38350d6b093 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 22:02:57 -0400 Subject: [PATCH 095/236] Remove policy config file, its a resource. Remove exposed boolean to turn off security. Add unit test --- config/elasticsearch.yml | 10 ---- pom.xml | 2 +- .../elasticsearch/bootstrap/Bootstrap.java | 2 +- .../org/elasticsearch/bootstrap/Security.java | 20 ++++++-- .../elasticsearch/bootstrap}/security.policy | 0 .../bootstrap/SecurityTests.java | 48 +++++++++++++++++++ 6 files changed, 66 insertions(+), 16 deletions(-) rename {config => src/main/resources/org/elasticsearch/bootstrap}/security.policy (100%) create mode 100644 src/test/java/org/elasticsearch/bootstrap/SecurityTests.java diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index 8842b93e35c..35383a4c5ac 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -231,16 +231,6 @@ # #http.enabled: false -################################### Security ################################## - -# SecurityManager runs elasticsearch with a lower set of priviledges. -# For more information, see -# . - -# Disable security completely: -# -# security.enabled: false - ################################### Gateway ################################### # The gateway allows for persisting the cluster state between full cluster diff --git a/pom.xml b/pom.xml index 6dd7050bbd6..a6c0d2f8517 100644 --- a/pom.xml +++ b/pom.xml @@ -630,7 +630,7 @@ ${tests.compatibility} true - ${basedir}/config/security.policy + ${basedir}/src/main/resources/org/elasticsearch/bootstrap/security.policy diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 55d3712af71..6d1652f2525 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -93,7 +93,7 @@ public class Bootstrap { } private void setupSecurity(Settings settings, Environment environment) throws Exception { - if (settings.getAsBoolean("security.enabled", true)) { + if (settings.getAsBoolean("security.manager.enabled", true)) { Security.configure(environment); } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 0a2bfec614e..7e5739761c7 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,19 +19,22 @@ package org.elasticsearch.bootstrap; +import com.google.common.io.ByteStreams; + import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.HashSet; import java.util.Set; @@ -44,6 +47,9 @@ import java.util.Set; */ class Security { + /** template policy file, the one used in tests */ + static final String POLICY_RESOURCE = "security.policy"; + /** * Initializes securitymanager for the environment * Can only happen once! @@ -51,18 +57,24 @@ class Security { static void configure(Environment environment) throws IOException { // init lucene random seed. it will use /dev/urandom where available. StringHelper.randomId(); - Path newConfig = processTemplate(environment.configFile().resolve("security.policy"), environment); + InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); + if (config == null) { + throw new NoSuchFileException(POLICY_RESOURCE); + } + Path newConfig = processTemplate(config, environment); System.setProperty("java.security.policy", newConfig.toString()); System.setSecurityManager(new SecurityManager()); IOUtils.deleteFilesIgnoringExceptions(newConfig); // TODO: maybe log something if it fails? } // package-private for testing - static Path processTemplate(Path template, Environment environment) throws IOException { + static Path processTemplate(InputStream template, Environment environment) throws IOException { Path processed = Files.createTempFile(null, null); try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { // copy the template as-is. - Files.copy(template, output); + try (InputStream in = template) { + ByteStreams.copy(in, output); + } // add permissions for all configured paths. Set paths = new HashSet<>(); diff --git a/config/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy similarity index 100% rename from config/security.policy rename to src/main/resources/org/elasticsearch/bootstrap/security.policy diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java new file mode 100644 index 00000000000..d3a27f56b1f --- /dev/null +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.Collections; + +public class SecurityTests extends ElasticsearchTestCase { + + /** backslash escaping (e.g. windows paths) */ + public void testEncode() { + assertEquals("c:\\\\foobar", Security.encode("c:\\foobar")); + } + + /** test template processing */ + public void testTemplateProcessing() throws Exception { + Path path = createTempDir(); + + byte results[] = Security.createPermissions(Collections.singleton(path)); + String unicode = new String(results, StandardCharsets.UTF_8); + // try not to make this test too fragile or useless + assertTrue(unicode.contains("grant {")); + assertTrue(unicode.contains(Security.encode(path))); + assertTrue(unicode.contains("read")); + assertTrue(unicode.contains("write")); + } + +} From de109bdb3c59d03af33456e2690de31fb3290a82 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 23 Apr 2015 22:15:16 -0400 Subject: [PATCH 096/236] Buffer this inputstream out of paranoia. guava copy goes byte-by-byte... --- src/main/java/org/elasticsearch/bootstrap/Security.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 7e5739761c7..7ac7e3b5e95 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -25,6 +25,7 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.env.Environment; +import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -72,7 +73,7 @@ class Security { Path processed = Files.createTempFile(null, null); try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { // copy the template as-is. - try (InputStream in = template) { + try (InputStream in = new BufferedInputStream(template)) { ByteStreams.copy(in, output); } From 1f5bdca8cca1791d8028bf57829337e466e88921 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 22 Apr 2015 13:27:15 -0700 Subject: [PATCH 097/236] Mappings: Restrict murmur3 field type to sane options Disabling doc values or trying to index hash values are not correct uses of this the murmur3 field type, and just cause problems. This disallows changing doc values or index options for 2.0+. closes #10465 --- docs/reference/migration/migrate_2_0.asciidoc | 4 + .../index/mapper/core/Murmur3FieldMapper.java | 13 ++ .../mapper/core/Murmur3FieldMapperTests.java | 132 ++++++++++++++++++ 3 files changed, 149 insertions(+) create mode 100644 src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 37471d32a0b..91462645bd5 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -302,6 +302,10 @@ the user-friendly representation of boolean fields: `false`/`true`: ] --------------- +=== Murmur3 Fields +Fields of type `murmur3` can no longer change `doc_values` or `index` setting. +They are always stored with doc values, and not indexed. + === Codecs It is no longer possible to specify per-field postings and doc values formats diff --git a/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java index 68158c6a316..7c9c920a3c6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.core; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.hash.MurmurHash3; @@ -35,6 +36,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.murmur3Field; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; @@ -69,6 +71,17 @@ public class Murmur3FieldMapper extends LongFieldMapper { @SuppressWarnings("unchecked") public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = murmur3Field(name); + + // tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (node.get("doc_values") != null) { + throw new MapperParsingException("Setting [doc_values] cannot be modified for field [" + name + "]"); + } + if (node.get("index") != null) { + throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); + } + } + parseNumberField(builder, name, node, parserContext); // Because this mapper extends LongFieldMapper the null_value field will be added to the JSON when transferring cluster state // between nodes so we have to remove the entry here so that the validation doesn't fail diff --git a/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java new file mode 100644 index 00000000000..fd502a04ed9 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.core; + +import org.apache.lucene.index.IndexOptions; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.test.ElasticsearchSingleNodeTest; +import org.junit.Before; + +public class Murmur3FieldMapperTests extends ElasticsearchSingleNodeTest { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void before() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDocValuesSettingNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", false) + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified")); + } + + // even setting to the default is not allowed, the setting is invalid + mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", true) + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified")); + } + } + + public void testIndexSettingNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "not_analyzed") + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [index] cannot be modified")); + } + + // even setting to the default is not allowed, the setting is invalid + mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "no") + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [index] cannot be modified")); + } + } + + public void testDocValuesSettingBackcompat() throws Exception { + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + indexService = createIndex("test_bwc", settings); + parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", false) + .endObject().endObject().endObject().endObject().string(); + + DocumentMapper docMapper = parser.parse(mapping); + Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); + assertFalse(mapper.hasDocValues()); + } + + public void testIndexSettingBackcompat() throws Exception { + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + indexService = createIndex("test_bwc", settings); + parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "not_analyzed") + .endObject().endObject().endObject().endObject().string(); + + DocumentMapper docMapper = parser.parse(mapping); + Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); + assertEquals(IndexOptions.DOCS, mapper.fieldType().indexOptions()); + } + + // TODO: add more tests +} From c9d72431a3277786f3d597d2b90d4cafbc455687 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 22 Apr 2015 16:34:28 +0200 Subject: [PATCH 098/236] Test: add afterIfFailed & afterIfSuccessful to ElasticsearchTestCase Also use afterIfFailed to log more info from the rest tests --- .../test/ElasticsearchTestCase.java | 208 ++++++++++-------- .../test/rest/ElasticsearchRestTestCase.java | 7 + 2 files changed, 129 insertions(+), 86 deletions(-) diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 1f3d0eaa54b..55b4b15af01 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -28,8 +28,8 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import com.google.common.base.Predicate; - import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; @@ -57,23 +57,15 @@ import org.elasticsearch.test.junit.listeners.LoggingListener; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; import org.elasticsearch.test.search.MockSearchService; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import org.junit.*; +import org.junit.rules.RuleChain; import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.file.FileSystem; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Formatter; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Random; +import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -86,8 +78,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS * Base testcase for randomized unit testing with Elasticsearch */ @Listeners({ - ReproduceInfoPrinter.class, - LoggingListener.class + ReproduceInfoPrinter.class, + LoggingListener.class }) @ThreadLeakScope(Scope.SUITE) @ThreadLeakLingering(linger = 5000) // 5 sec lingering @@ -96,39 +88,63 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS // we suppress pretty much all the lucene codecs for now, except asserting // assertingcodec is the winner for a codec here: it finds bugs and gives clear exceptions. @SuppressCodecs({ - "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50", - "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap", - "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50" + "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50", + "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap", + "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50" }) @LuceneTestCase.SuppressReproduceLine public abstract class ElasticsearchTestCase extends LuceneTestCase { - + static { SecurityHack.ensureInitialized(); } - + protected final ESLogger logger = Loggers.getLogger(getClass()); // ----------------------------------------------------------------- // Suite and test case setup/cleanup. // ----------------------------------------------------------------- + @Rule + public RuleChain failureAndSuccessEvents = RuleChain.outerRule(new TestRuleAdapter() { + @Override + protected void afterIfSuccessful() throws Throwable { + ElasticsearchTestCase.this.afterIfSuccessful(); + } + + @Override + protected void afterAlways(List errors) throws Throwable { + if (errors != null && errors.isEmpty() == false) { + ElasticsearchTestCase.this.afterIfFailed(errors); + } + super.afterAlways(errors); + } + }); + + /** called when a test fails, supplying the errors it generated */ + protected void afterIfFailed(List errors) { + } + + /** called after a test is finished, but only if succesfull */ + protected void afterIfSuccessful() { + } + // TODO: Parent/child and other things does not work with the query cache // We must disable query cache for both suite and test to override lucene, but LTC resets it after the suite - + @BeforeClass public static void disableQueryCacheSuite() { IndexSearcher.setDefaultQueryCache(null); } - + @Before public final void disableQueryCache() { IndexSearcher.setDefaultQueryCache(null); } - + // setup mock filesystems for this test run. we change PathUtils // so that all accesses are plumbed thru any mock wrappers - + @BeforeClass public static void setFileSystem() throws Exception { Field field = PathUtils.class.getDeclaredField("DEFAULT"); @@ -137,7 +153,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { field.set(null, mock); assertEquals(mock, PathUtils.getDefaultFileSystem()); } - + @AfterClass public static void restoreFileSystem() throws Exception { Field field1 = PathUtils.class.getDeclaredField("ACTUAL_DEFAULT"); @@ -149,41 +165,41 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // setup a default exception handler which knows when and how to print a stacktrace private static Thread.UncaughtExceptionHandler defaultHandler; - + @BeforeClass public static void setDefaultExceptionHandler() throws Exception { defaultHandler = Thread.getDefaultUncaughtExceptionHandler(); Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(defaultHandler)); } - + @AfterClass public static void restoreDefaultExceptionHandler() throws Exception { Thread.setDefaultUncaughtExceptionHandler(defaultHandler); } // randomize content type for request builders - + @BeforeClass public static void setContentType() throws Exception { Requests.CONTENT_TYPE = randomFrom(XContentType.values()); Requests.INDEX_CONTENT_TYPE = randomFrom(XContentType.values()); } - + @AfterClass public static void restoreContentType() { Requests.CONTENT_TYPE = XContentType.SMILE; Requests.INDEX_CONTENT_TYPE = XContentType.JSON; } - + // randomize and override the number of cpus so tests reproduce regardless of real number of cpus - + @BeforeClass public static void setProcessors() { int numCpu = TestUtil.nextInt(random(), 1, 4); System.setProperty(EsExecutors.DEFAULT_SYSPROP, Integer.toString(numCpu)); assertEquals(numCpu, EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)); } - + @AfterClass public static void restoreProcessors() { System.clearProperty(EsExecutors.DEFAULT_SYSPROP); @@ -192,7 +208,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // check some things (like MockDirectoryWrappers) are closed where we currently // manage them. TODO: can we add these to LuceneTestCase.closeAfterSuite directly? // or something else simpler instead of the fake closeables? - + @BeforeClass public static void setAfterSuiteAssertions() throws Exception { closeAfterSuite(new Closeable() { @@ -208,7 +224,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { } }); } - + @After public final void ensureCleanedUp() throws Exception { MockPageCacheRecycler.ensureAllPagesAreReleased(); @@ -228,18 +244,18 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { } }); } - + // mockdirectorywrappers currently set this boolean if checkindex fails // TODO: can we do this cleaner??? - + /** MockFSDirectoryService sets this: */ public static boolean checkIndexFailed; - + @Before public final void resetCheckIndexStatus() throws Exception { checkIndexFailed = false; } - + @After public final void ensureCheckIndexPassed() throws Exception { assertFalse("at least one shard failed CheckIndex", checkIndexFailed); @@ -248,7 +264,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // ----------------------------------------------------------------- // Test facilities and facades for subclasses. // ----------------------------------------------------------------- - + // TODO: replaces uses of getRandom() with random() // TODO: decide on one set of naming for between/scaledBetween and remove others // TODO: replace frequently() with usually() @@ -258,114 +274,133 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // TODO: replace uses of this function with random() return random(); } - + /** * Returns a "scaled" random number between min and max (inclusive). + * * @see RandomizedTest#scaledRandomIntBetween(int, int); */ public static int scaledRandomIntBetween(int min, int max) { return RandomizedTest.scaledRandomIntBetween(min, max); } - - /** + + /** * A random integer from min to max (inclusive). + * * @see #scaledRandomIntBetween(int, int) */ public static int randomIntBetween(int min, int max) { - return RandomInts.randomIntBetween(random(), min, max); + return RandomInts.randomIntBetween(random(), min, max); } - + /** * Returns a "scaled" number of iterations for loops which can have a variable - * iteration count. This method is effectively + * iteration count. This method is effectively * an alias to {@link #scaledRandomIntBetween(int, int)}. */ public static int iterations(int min, int max) { return scaledRandomIntBetween(min, max); } - - /** - * An alias for {@link #randomIntBetween(int, int)}. - * + + /** + * An alias for {@link #randomIntBetween(int, int)}. + * * @see #scaledRandomIntBetween(int, int) */ public static int between(int min, int max) { - return randomIntBetween(min, max); + return randomIntBetween(min, max); } - + /** * The exact opposite of {@link #rarely()}. */ public static boolean frequently() { - return !rarely(); + return !rarely(); } - + public static boolean randomBoolean() { return random().nextBoolean(); } - - public static byte randomByte() { return (byte) random().nextInt(); } - public static short randomShort() { return (short) random().nextInt(); } - public static int randomInt() { return random().nextInt(); } - public static float randomFloat() { return random().nextFloat(); } - public static double randomDouble() { return random().nextDouble(); } - public static long randomLong() { return random().nextLong(); } + + public static byte randomByte() { + return (byte) random().nextInt(); + } + + public static short randomShort() { + return (short) random().nextInt(); + } + + public static int randomInt() { + return random().nextInt(); + } + + public static float randomFloat() { + return random().nextFloat(); + } + + public static double randomDouble() { + return random().nextDouble(); + } + + public static long randomLong() { + return random().nextLong(); + } /** A random integer from 0..max (inclusive). */ public static int randomInt(int max) { return RandomizedTest.randomInt(max); } - + /** Pick a random object from the given array. The array must not be empty. */ public static T randomFrom(T... array) { - return RandomPicks.randomFrom(random(), array); + return RandomPicks.randomFrom(random(), array); } /** Pick a random object from the given list. */ public static T randomFrom(List list) { - return RandomPicks.randomFrom(random(), list); + return RandomPicks.randomFrom(random(), list); } - + public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomAsciiOfLength(int codeUnits) { - return RandomizedTest.randomAsciiOfLength(codeUnits); + return RandomizedTest.randomAsciiOfLength(codeUnits); } - + public static String randomUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomUnicodeOfLength(int codeUnits) { - return RandomizedTest.randomUnicodeOfLength(codeUnits); + return RandomizedTest.randomUnicodeOfLength(codeUnits); } public static String randomUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) { - return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); + return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); } - + public static String randomUnicodeOfCodepointLength(int codePoints) { - return RandomizedTest.randomUnicodeOfCodepointLength(codePoints); + return RandomizedTest.randomUnicodeOfCodepointLength(codePoints); } public static String randomRealisticUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomRealisticUnicodeOfLength(int codeUnits) { - return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits); + return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits); } public static String randomRealisticUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) { - return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); + return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); } - + public static String randomRealisticUnicodeOfCodepointLength(int codePoints) { - return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); + return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); } - + public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) { if (allowNull && random().nextBoolean()) { return null; @@ -425,7 +460,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { throw e; } } - + public static boolean awaitBusy(Predicate breakPredicate) throws InterruptedException { return awaitBusy(breakPredicate, 10, TimeUnit.SECONDS); } @@ -497,9 +532,9 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = ImmutableSettings.builder() - .put(settings) - .put("path.home", createTempDir().toAbsolutePath()) - .putArray("path.data", tmpPaths()).build(); + .put(settings) + .put("path.home", createTempDir().toAbsolutePath()) + .putArray("path.data", tmpPaths()).build(); return new NodeEnvironment(build, new Environment(build)); } @@ -524,7 +559,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { private ElasticsearchUncaughtExceptionHandler(Thread.UncaughtExceptionHandler parent) { this.parent = parent; } - + @Override public void uncaughtException(Thread t, Throwable e) { if (e instanceof EsRejectedExecutionException) { @@ -552,8 +587,9 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { int cnt = 1; final Formatter f = new Formatter(message, Locale.ENGLISH); for (Map.Entry e : threads.entrySet()) { - if (e.getKey().isAlive()) + if (e.getKey().isAlive()) { f.format(Locale.ENGLISH, "\n %2d) %s", cnt++, threadName(e.getKey())).flush(); + } if (e.getValue().length == 0) { message.append("\n at (empty stack)"); } else { diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index 5e1d5084e41..78a29610d33 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.node.Node; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -123,6 +124,12 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration } } + @Override + protected void afterIfFailed(List errors) { + logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash())); + super.afterIfFailed(errors); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.builder() From 15d58d91f1abb75764428fdb5e436edbe0f4181d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 09:36:10 +0200 Subject: [PATCH 099/236] [REST] Render REST errors in a structural way This commit adds support for structural errors / failures / exceptions on the elasticsearch REST layer. Exceptions are rendering with at least a `type` and a `reason` corresponding to the exception name and the message. Some expcetions like the ones associated with an index or a shard will have additional information about the index the exception was triggered on or the shard respectivly. Each rendered response will also contain a list of root causes which is a list of distinct shard level errors returned for the request. Root causes are the lowest level elasticsearch exception found per shard response and are intended to be displayed to the user to indicate the soruce of the exception. Shard level response are by-default grouped by their type and reason to reduce the amount of duplicates retunred. Yet, the same exception retunred from different indices will not be grouped. Closes #3303 --- rest-api-spec/test/create/50_parent.yaml | 2 +- rest-api-spec/test/create/75_ttl.yaml | 2 +- .../test/delete/42_missing_parent.yml | 2 +- rest-api-spec/test/index/50_parent.yaml | 2 +- rest-api-spec/test/index/75_ttl.yaml | 2 +- .../test/mget/13_missing_metadata.yaml | 8 +- rest-api-spec/test/mget/15_ids.yaml | 4 +- rest-api-spec/test/mpercolate/10_basic.yaml | 2 +- rest-api-spec/test/msearch/10_basic.yaml | 2 +- rest-api-spec/test/script/10_basic.yaml | 4 +- rest-api-spec/test/template/10_basic.yaml | 2 +- rest-api-spec/test/template/20_search.yaml | 2 +- rest-api-spec/test/update/50_parent.yaml | 2 +- rest-api-spec/test/update/75_ttl.yaml | 2 +- .../elasticsearch/ElasticsearchException.java | 136 ++++++++--- .../org/elasticsearch/ExceptionsHelper.java | 8 +- .../action/ActionWriteResponse.java | 20 +- .../action/bulk/BulkItemResponse.java | 2 +- .../action/bulk/TransportShardBulkAction.java | 8 +- .../search/SearchPhaseExecutionException.java | 101 ++++++++- .../action/search/ShardSearchFailure.java | 36 ++- .../DefaultShardOperationFailedException.java | 22 +- ...nsportIndexReplicationOperationAction.java | 10 +- ...nsportShardReplicationOperationAction.java | 5 +- .../common/io/stream/BytesStreamOutput.java | 5 - .../common/io/stream/StreamInput.java | 11 + .../common/io/stream/StreamOutput.java | 11 +- .../elasticsearch/index/IndexException.java | 24 +- .../index/percolator/PercolatorException.java | 4 - .../index/query/QueryParsingException.java | 1 + .../index/shard/IndexShardException.java | 18 +- .../indices/AliasFilterParsingException.java | 4 - .../indices/IndexMissingException.java | 2 +- .../indices/TypeMissingException.java | 5 - .../elasticsearch/rest/BytesRestResponse.java | 25 ++- .../search/SearchContextException.java | 15 +- .../search/SearchParseException.java | 4 +- .../ElasticsearchExceptionTests.java | 212 ++++++++++++++++++ .../BulkProcessorClusterSettingsTests.java | 2 +- .../cluster/metadata/MetaDataTests.java | 3 +- .../deleteByQuery/DeleteByQueryTests.java | 2 +- .../org/elasticsearch/document/BulkTests.java | 8 +- .../query/SimpleIndexQueryParserTests.java | 4 +- .../breaker/CircuitBreakerServiceTests.java | 12 +- .../template/SimpleIndexTemplateTests.java | 4 +- .../nested/SimpleNestedTests.java | 2 +- .../rest/BytesRestResponseTests.java | 64 +++++- .../script/GroovySandboxScriptTests.java | 3 +- .../script/GroovyScriptTests.java | 24 +- .../script/IndexLookupTests.java | 4 +- .../script/IndexedScriptTests.java | 6 +- .../script/OnDiskScriptTests.java | 8 +- .../script/SandboxDisabledTests.java | 2 +- .../expression/ExpressionScriptTests.java | 58 ++--- .../AggregationsIntegrationTests.java | 2 +- .../bucket/DateHistogramTests.java | 2 +- .../aggregations/bucket/HistogramTests.java | 2 +- .../aggregations/bucket/NestedTests.java | 2 +- .../aggregations/bucket/TopHitsTests.java | 4 +- .../child/SimpleChildQuerySearchTests.java | 25 +-- .../DecayFunctionScoreTests.java | 16 +- .../search/query/SearchQueryTests.java | 8 +- .../search/simple/SimpleSearchTests.java | 2 +- .../search/sort/SimpleSortTests.java | 2 +- .../suggest/CompletionSuggestSearchTests.java | 12 +- .../hamcrest/ElasticsearchAssertions.java | 2 +- 66 files changed, 737 insertions(+), 275 deletions(-) diff --git a/rest-api-spec/test/create/50_parent.yaml b/rest-api-spec/test/create/50_parent.yaml index dcd24d99346..6fe64b7bbed 100644 --- a/rest-api-spec/test/create/50_parent.yaml +++ b/rest-api-spec/test/create/50_parent.yaml @@ -14,7 +14,7 @@ wait_for_status: yellow - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ create: index: test_1 type: test diff --git a/rest-api-spec/test/create/75_ttl.yaml b/rest-api-spec/test/create/75_ttl.yaml index 099ed279020..3354e3b0517 100644 --- a/rest-api-spec/test/create/75_ttl.yaml +++ b/rest-api-spec/test/create/75_ttl.yaml @@ -89,7 +89,7 @@ type: test id: 1 - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ create: index: test_1 type: test diff --git a/rest-api-spec/test/delete/42_missing_parent.yml b/rest-api-spec/test/delete/42_missing_parent.yml index 8247f8885e9..d72c5a83d59 100644 --- a/rest-api-spec/test/delete/42_missing_parent.yml +++ b/rest-api-spec/test/delete/42_missing_parent.yml @@ -21,7 +21,7 @@ body: { foo: bar } - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ delete: index: test_1 type: test diff --git a/rest-api-spec/test/index/50_parent.yaml b/rest-api-spec/test/index/50_parent.yaml index 551d30d95d7..28ab61cb49b 100644 --- a/rest-api-spec/test/index/50_parent.yaml +++ b/rest-api-spec/test/index/50_parent.yaml @@ -13,7 +13,7 @@ wait_for_status: yellow - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ index: index: test_1 type: test diff --git a/rest-api-spec/test/index/75_ttl.yaml b/rest-api-spec/test/index/75_ttl.yaml index 1d73d4ed40f..554933654fb 100644 --- a/rest-api-spec/test/index/75_ttl.yaml +++ b/rest-api-spec/test/index/75_ttl.yaml @@ -74,7 +74,7 @@ # with timestamp - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ index: index: test_1 type: test diff --git a/rest-api-spec/test/mget/13_missing_metadata.yaml b/rest-api-spec/test/mget/13_missing_metadata.yaml index 11b4a129406..8d986a330bf 100644 --- a/rest-api-spec/test/mget/13_missing_metadata.yaml +++ b/rest-api-spec/test/mget/13_missing_metadata.yaml @@ -13,27 +13,27 @@ wait_for_status: yellow - do: - catch: /ActionRequestValidationException.+ id is missing/ + catch: /action_request_validation_exception.+ id is missing/ mget: body: docs: - { _index: test_1, _type: test} - do: - catch: /ActionRequestValidationException.+ index is missing/ + catch: /action_request_validation_exception.+ index is missing/ mget: body: docs: - { _type: test, _id: 1} - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: body: docs: [] - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: body: {} diff --git a/rest-api-spec/test/mget/15_ids.yaml b/rest-api-spec/test/mget/15_ids.yaml index a86fc2cdc6e..cdd6c5724fe 100644 --- a/rest-api-spec/test/mget/15_ids.yaml +++ b/rest-api-spec/test/mget/15_ids.yaml @@ -59,14 +59,14 @@ - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: index: test_1 body: ids: [] - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: index: test_1 body: {} diff --git a/rest-api-spec/test/mpercolate/10_basic.yaml b/rest-api-spec/test/mpercolate/10_basic.yaml index 70118c93da1..9f949c21cd6 100644 --- a/rest-api-spec/test/mpercolate/10_basic.yaml +++ b/rest-api-spec/test/mpercolate/10_basic.yaml @@ -37,5 +37,5 @@ foo: bar - match: { responses.0.total: 1 } - - match: { responses.1.error: "IndexMissingException[[percolator_index1] missing]" } + - match: { responses.1.error: "/IndexMissingException.no.such.index./" } - match: { responses.2.total: 1 } diff --git a/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/test/msearch/10_basic.yaml index c0786229926..a028853429e 100644 --- a/rest-api-spec/test/msearch/10_basic.yaml +++ b/rest-api-spec/test/msearch/10_basic.yaml @@ -39,7 +39,7 @@ match: {foo: bar} - match: { responses.0.hits.total: 3 } - - match: { responses.1.error: "IndexMissingException[[test_2] missing]" } + - match: { responses.1.error: "/IndexMissingException.no.such.index./" } - match: { responses.2.hits.total: 1 } diff --git a/rest-api-spec/test/script/10_basic.yaml b/rest-api-spec/test/script/10_basic.yaml index 822d8879771..ee977429b59 100644 --- a/rest-api-spec/test/script/10_basic.yaml +++ b/rest-api-spec/test/script/10_basic.yaml @@ -60,7 +60,7 @@ - do: - catch: /ElasticsearchIllegalArgumentException.Unable.to.parse.*/ + catch: /Unable.to.parse.*/ put_script: id: "1" lang: "groovy" @@ -74,7 +74,7 @@ body: { "script" : "_score * doc[\"myParent.weight\"].value" } - do: - catch: /ElasticsearchIllegalArgumentException.script_lang.not.supported/ + catch: /script_lang.not.supported/ put_script: id: "1" lang: "foobar" diff --git a/rest-api-spec/test/template/10_basic.yaml b/rest-api-spec/test/template/10_basic.yaml index 838a21d5a56..bd1fd436648 100644 --- a/rest-api-spec/test/template/10_basic.yaml +++ b/rest-api-spec/test/template/10_basic.yaml @@ -50,7 +50,7 @@ body: { "template": { "query": { "match{{}}_all": {}}, "size": "{{my_size}}" } } - do: - catch: /ElasticsearchIllegalArgumentException\SUnable\sto\sparse.*/ + catch: /Unable\sto\sparse.*/ put_template: id: "1" body: { "template": { "query": { "match{{}}_all": {}}, "size": "{{my_size}}" } } diff --git a/rest-api-spec/test/template/20_search.yaml b/rest-api-spec/test/template/20_search.yaml index 55f886c6412..d8e7364d545 100644 --- a/rest-api-spec/test/template/20_search.yaml +++ b/rest-api-spec/test/template/20_search.yaml @@ -37,7 +37,7 @@ - match: { hits.total: 1 } - do: - catch: /ElasticsearchIllegalArgumentException.Unable.to.find.on.disk.script.simple1/ + catch: /Unable.to.find.on.disk.script.simple1/ search_template: body: { "template" : "simple1" } diff --git a/rest-api-spec/test/update/50_parent.yaml b/rest-api-spec/test/update/50_parent.yaml index 3d15ea9f2a8..bc64665e919 100644 --- a/rest-api-spec/test/update/50_parent.yaml +++ b/rest-api-spec/test/update/50_parent.yaml @@ -15,7 +15,7 @@ setup: "Parent": - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ update: index: test_1 type: test diff --git a/rest-api-spec/test/update/75_ttl.yaml b/rest-api-spec/test/update/75_ttl.yaml index f6b05b9eca2..8072c4d400f 100644 --- a/rest-api-spec/test/update/75_ttl.yaml +++ b/rest-api-spec/test/update/75_ttl.yaml @@ -81,7 +81,7 @@ # with timestamp - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ index: index: test_1 type: test diff --git a/src/main/java/org/elasticsearch/ElasticsearchException.java b/src/main/java/org/elasticsearch/ElasticsearchException.java index fd7c9186875..eed4040793c 100644 --- a/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -22,18 +22,23 @@ package org.elasticsearch; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.HasRestHeaders; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.List; import java.util.Map; /** * A base class for all elasticsearch exceptions. */ -public class ElasticsearchException extends RuntimeException { +public class ElasticsearchException extends RuntimeException implements ToXContent { + + public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.skip_cause"; /** * Construct a ElasticsearchException with the specified detail message. @@ -62,12 +67,8 @@ public class ElasticsearchException extends RuntimeException { Throwable cause = unwrapCause(); if (cause == this) { return RestStatus.INTERNAL_SERVER_ERROR; - } else if (cause instanceof ElasticsearchException) { - return ((ElasticsearchException) cause).status(); - } else if (cause instanceof IllegalArgumentException) { - return RestStatus.BAD_REQUEST; } else { - return RestStatus.INTERNAL_SERVER_ERROR; + return ExceptionsHelper.status(cause); } } @@ -114,19 +115,6 @@ public class ElasticsearchException extends RuntimeException { return rootCause; } - /** - * Retrieve the most specific cause of this exception, that is, - * either the innermost cause (root cause) or this exception itself. - *

    Differs from {@link #getRootCause()} in that it falls back - * to the present exception if there is no root cause. - * - * @return the most specific cause (never null) - */ - public Throwable getMostSpecificCause() { - Throwable rootCause = getRootCause(); - return (rootCause != null ? rootCause : this); - } - /** * Check whether this exception contains an exception of the given type: * either it is of the given class itself or it contains a nested cause @@ -175,21 +163,6 @@ public class ElasticsearchException extends RuntimeException { this.headers = headers(headers); } - public WithRestHeaders(String msg, @Nullable ImmutableMap> headers) { - super(msg); - this.headers = headers != null ? headers : ImmutableMap.>of(); - } - - public WithRestHeaders(String msg, Throwable cause, Tuple... headers) { - super(msg, cause); - this.headers = headers(headers); - } - - public WithRestHeaders(String msg, Throwable cause, @Nullable ImmutableMap> headers) { - super(msg, cause); - this.headers = headers != null ? headers : ImmutableMap.>of(); - } - @Override public ImmutableMap> getHeaders() { return headers; @@ -215,4 +188,97 @@ public class ElasticsearchException extends RuntimeException { return ImmutableMap.copyOf(map); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (this instanceof ElasticsearchWrapperException) { + toXContent(builder, params, this); + } else { + builder.field("type", getExceptionName(this)); + builder.field("reason", getMessage()); + innerToXContent(builder, params); + } + return builder; + } + + /** + * Renders additional per exception information into the xcontent + */ + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + causeToXContent(builder, params); + } + + /** + * Renders a cause exception as xcontent + */ + protected final void causeToXContent(XContentBuilder builder, Params params) throws IOException { + final Throwable cause = getCause(); + if (cause != null && params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, false) == false) { + builder.field("caused_by"); + builder.startObject(); + toXContent(builder, params, cause); + builder.endObject(); + } + } + + /** + * Statis toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent. + */ + public static void toXContent(XContentBuilder builder, Params params, Throwable ex) throws IOException { + ex = ExceptionsHelper.unwrapCause(ex); + if (ex instanceof ElasticsearchException) { + ((ElasticsearchException) ex).toXContent(builder, params); + } else { + builder.field("type", getExceptionName(ex)); + builder.field("reason", ex.getMessage()); + if (ex.getCause() != null) { + builder.field("caused_by"); + builder.startObject(); + toXContent(builder, params, ex.getCause()); + builder.endObject(); + } + } + } + + /** + * Returns the root cause of this exception or mupltiple if different shards caused different exceptions + */ + public ElasticsearchException[] guessRootCauses() { + final Throwable cause = getCause(); + if (cause != null && cause instanceof ElasticsearchException) { + return ((ElasticsearchException) cause).guessRootCauses(); + } + return new ElasticsearchException[] {this}; + } + + /** + * Returns the root cause of this exception or mupltiple if different shards caused different exceptions. + * If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array + * is returned. + */ + public static ElasticsearchException[] guessRootCauses(Throwable t) { + Throwable ex = ExceptionsHelper.unwrapCause(t); + if (ex instanceof ElasticsearchException) { + return ((ElasticsearchException) ex).guessRootCauses(); + } + return new ElasticsearchException[0]; + } + + /** + * Returns a underscore case name for the given exception. This method strips Elasticsearch prefixes from exception names. + */ + public static String getExceptionName(Throwable ex) { + String simpleName = ex.getClass().getSimpleName(); + if (simpleName.startsWith("Elasticsearch")) { + simpleName = simpleName.substring("Elasticsearch".length()); + } + return Strings.toUnderscoreCase(simpleName); + } + + @Override + public String toString() { + return ExceptionsHelper.detailedMessage(this).trim(); + } + + } diff --git a/src/main/java/org/elasticsearch/ExceptionsHelper.java b/src/main/java/org/elasticsearch/ExceptionsHelper.java index 552d339301e..9c29a4dc0aa 100644 --- a/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -54,8 +54,12 @@ public final class ExceptionsHelper { } public static RestStatus status(Throwable t) { - if (t instanceof ElasticsearchException) { - return ((ElasticsearchException) t).status(); + if (t != null) { + if (t instanceof ElasticsearchException) { + return ((ElasticsearchException) t).status(); + } else if (t instanceof IllegalArgumentException) { + return RestStatus.BAD_REQUEST; + } } return RestStatus.INTERNAL_SERVER_ERROR; } diff --git a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java index 63b1f06a9f9..5ddefaf99b2 100644 --- a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java +++ b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java @@ -19,6 +19,9 @@ package org.elasticsearch.action; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -164,15 +167,15 @@ public abstract class ActionWriteResponse extends ActionResponse { private String index; private int shardId; private String nodeId; - private String reason; + private Throwable cause; private RestStatus status; private boolean primary; - public Failure(String index, int shardId, @Nullable String nodeId, String reason, RestStatus status, boolean primary) { + public Failure(String index, int shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) { this.index = index; this.shardId = shardId; this.nodeId = nodeId; - this.reason = reason; + this.cause = cause; this.status = status; this.primary = primary; } @@ -209,7 +212,7 @@ public abstract class ActionWriteResponse extends ActionResponse { */ @Override public String reason() { - return reason; + return ExceptionsHelper.detailedMessage(cause); } /** @@ -233,7 +236,7 @@ public abstract class ActionWriteResponse extends ActionResponse { index = in.readString(); shardId = in.readVInt(); nodeId = in.readOptionalString(); - reason = in.readString(); + cause = in.readThrowable(); status = RestStatus.readFrom(in); primary = in.readBoolean(); } @@ -243,7 +246,7 @@ public abstract class ActionWriteResponse extends ActionResponse { out.writeString(index); out.writeVInt(shardId); out.writeOptionalString(nodeId); - out.writeString(reason); + out.writeThrowable(cause); RestStatus.writeTo(out, status); out.writeBoolean(primary); } @@ -254,7 +257,10 @@ public abstract class ActionWriteResponse extends ActionResponse { builder.field(Fields._INDEX, index); builder.field(Fields._SHARD, shardId); builder.field(Fields._NODE, nodeId); - builder.field(Fields.REASON, reason); + builder.field(Fields.REASON); + builder.startObject(); + ElasticsearchException.toXContent(builder, params, cause); + builder.endObject(); builder.field(Fields.STATUS, status); builder.field(Fields.PRIMARY, primary); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 3ffa1ddcb62..c32f02f0022 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -51,7 +51,7 @@ public class BulkItemResponse implements Streamable { this.index = index; this.type = type; this.id = id; - this.message = ExceptionsHelper.detailedMessage(t); + this.message = t.toString(); this.status = ExceptionsHelper.status(t); } diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 9e354e82836..4a29cfae58a 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -157,7 +157,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } throw (ElasticsearchException) e; } - if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); } else { logger.debug("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); @@ -190,7 +190,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } throw (ElasticsearchException) e; } - if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); } else { logger.debug("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); @@ -279,7 +279,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation case UPSERT: case INDEX: IndexRequest indexRequest = updateResult.request(); - if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); } else { logger.debug("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); @@ -289,7 +289,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation break; case DELETE: DeleteRequest deleteRequest = updateResult.request(); - if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); } else { logger.debug("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); diff --git a/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 9e3de989bbf..d0202ba219d 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -20,25 +20,30 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; +import java.util.*; + /** * */ public class SearchPhaseExecutionException extends ElasticsearchException { - private final String phaseName; private ShardSearchFailure[] shardFailures; public SearchPhaseExecutionException(String phaseName, String msg, ShardSearchFailure[] shardFailures) { - super(buildMessage(phaseName, msg, shardFailures)); + super(msg); this.phaseName = phaseName; this.shardFailures = shardFailures; } public SearchPhaseExecutionException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) { - super(buildMessage(phaseName, msg, shardFailures), cause); + super(msg, cause); this.phaseName = phaseName; this.shardFailures = shardFailures; } @@ -60,10 +65,6 @@ public class SearchPhaseExecutionException extends ElasticsearchException { return status; } - public String phaseName() { - return phaseName; - } - public ShardSearchFailure[] shardFailures() { return shardFailures; } @@ -83,4 +84,90 @@ public class SearchPhaseExecutionException extends ElasticsearchException { } return sb.toString(); } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("phase", phaseName); + final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default + builder.field("grouped", group); // notify that it's grouped + builder.field("failed_shards"); + builder.startArray(); + ShardSearchFailure[] failures = params.paramAsBoolean("group_shard_failures", true) ? groupBy(shardFailures) : shardFailures; + for (ShardSearchFailure failure : failures) { + builder.startObject(); + failure.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + super.innerToXContent(builder, params); + + } + + private ShardSearchFailure[] groupBy(ShardSearchFailure[] failures) { + List uniqueFailures = new ArrayList<>(); + Set reasons = new HashSet<>(); + for (ShardSearchFailure failure : failures) { + GroupBy reason = new GroupBy(failure.getCause()); + if (reasons.contains(reason) == false) { + reasons.add(reason); + uniqueFailures.add(failure); + } + } + return uniqueFailures.toArray(new ShardSearchFailure[0]); + + } + + @Override + public ElasticsearchException[] guessRootCauses() { + ShardSearchFailure[] failures = groupBy(shardFailures); + List rootCauses = new ArrayList<>(failures.length); + for (ShardSearchFailure failure : failures) { + ElasticsearchException[] guessRootCauses = ElasticsearchException.guessRootCauses(failure.getCause()); + rootCauses.addAll(Arrays.asList(guessRootCauses)); + } + return rootCauses.toArray(new ElasticsearchException[0]); + } + + @Override + public String toString() { + return buildMessage(phaseName, getMessage(), shardFailures); + } + + static class GroupBy { + final String reason; + final Index index; + final Class causeType; + + public GroupBy(Throwable t) { + if (t instanceof IndexException) { + index = ((IndexException) t).index(); + } else { + index = null; + } + reason = t.getMessage(); + causeType = t.getClass(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GroupBy groupBy = (GroupBy) o; + + if (!causeType.equals(groupBy.causeType)) return false; + if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false; + if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false; + + return true; + } + + @Override + public int hashCode() { + int result = reason != null ? reason.hashCode() : 0; + result = 31 * result + (index != null ? index.hashCode() : 0); + result = 31 * result + causeType.hashCode(); + return result; + } + } } diff --git a/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 38bf5693c08..4a6e073e288 100644 --- a/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -25,24 +25,29 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchException; import org.elasticsearch.search.SearchShardTarget; import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; /** * Represents a failure to search on a specific shard. */ -public class ShardSearchFailure implements ShardOperationFailedException { +public class ShardSearchFailure implements ShardOperationFailedException, ToXContent { public static final ShardSearchFailure[] EMPTY_ARRAY = new ShardSearchFailure[0]; private SearchShardTarget shardTarget; private String reason; private RestStatus status; + private Throwable cause; private ShardSearchFailure() { @@ -59,12 +64,9 @@ public class ShardSearchFailure implements ShardOperationFailedException { } else if (shardTarget != null) { this.shardTarget = shardTarget; } - if (actual != null && actual instanceof ElasticsearchException) { - status = ((ElasticsearchException) actual).status(); - } else { - status = RestStatus.INTERNAL_SERVER_ERROR; - } + status = ExceptionsHelper.status(actual); this.reason = ExceptionsHelper.detailedMessage(t); + this.cause = actual; } public ShardSearchFailure(String reason, SearchShardTarget shardTarget) { @@ -138,6 +140,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { } reason = in.readString(); status = RestStatus.readFrom(in); + cause = in.readThrowable(); } @Override @@ -150,5 +153,26 @@ public class ShardSearchFailure implements ShardOperationFailedException { } out.writeString(reason); RestStatus.writeTo(out, status); + out.writeThrowable(cause); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("shard", shardId()); + builder.field("index", index()); + if (shardTarget != null) { + builder.field("node", shardTarget.nodeId()); + } + if (cause != null) { + builder.field("reason"); + builder.startObject(); + ElasticsearchException.toXContent(builder, params, cause); + builder.endObject(); + } + return builder; + } + + public Throwable getCause() { + return cause; } } diff --git a/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 3a7e90096c1..3be7b7d2aee 100644 --- a/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,30 +40,25 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile private int shardId; - private String reason; + private Throwable reason; private RestStatus status; private DefaultShardOperationFailedException() { - } public DefaultShardOperationFailedException(IndexShardException e) { this.index = e.shardId().index().name(); this.shardId = e.shardId().id(); - this.reason = detailedMessage(e); + this.reason = e; this.status = e.status(); } public DefaultShardOperationFailedException(String index, int shardId, Throwable t) { this.index = index; this.shardId = shardId; - this.reason = detailedMessage(t); - if (t != null && t instanceof ElasticsearchException) { - status = ((ElasticsearchException) t).status(); - } else { - status = RestStatus.INTERNAL_SERVER_ERROR; - } + this.reason = t; + status = ExceptionsHelper.status(t); } @Override @@ -77,7 +73,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile @Override public String reason() { - return this.reason; + return detailedMessage(reason); } @Override @@ -97,7 +93,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile index = in.readString(); } shardId = in.readVInt(); - reason = in.readString(); + reason = in.readThrowable(); status = RestStatus.readFrom(in); } @@ -110,12 +106,12 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile out.writeString(index); } out.writeVInt(shardId); - out.writeString(reason); + out.writeThrowable(reason); RestStatus.writeTo(out, status); } @Override public String toString() { - return "[" + index + "][" + shardId + "] failed, reason [" + reason + "]"; + return "[" + index + "][" + shardId + "] failed, reason [" + reason() + "]"; } } diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java index 53c4984bfc8..5d0cba209d0 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java @@ -103,14 +103,8 @@ public abstract class TransportIndexReplicationOperationAction entry : shardReplicaFailures.entrySet()) { - String reason = ExceptionsHelper.detailedMessage(entry.getValue()); RestStatus restStatus = ExceptionsHelper.status(entry.getValue()); failuresArray[slot++] = new ActionWriteResponse.ShardInfo.Failure( - shardId.getIndex(), shardId.getId(), entry.getKey(), reason, restStatus, false + shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false ); } } else { diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 4235d38a21d..2107a9958da 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -60,11 +60,6 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { this.bytes = bigarrays.newByteArray(expectedSize); } - @Override - public boolean seekPositionSupported() { - return true; - } - @Override public long position() throws IOException { return count; diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index b05d463c58d..fea34cd94c3 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.io.stream; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -32,6 +33,7 @@ import org.joda.time.DateTime; import java.io.IOException; import java.io.InputStream; +import java.io.ObjectInputStream; import java.util.*; /** @@ -463,4 +465,13 @@ public abstract class StreamInput extends InputStream { return null; } } + + public T readThrowable() throws IOException { + try { + ObjectInputStream oin = new ObjectInputStream(this); + return (T) oin.readObject(); + } catch (ClassNotFoundException e) { + throw new IOException("failed to deserialize exception", e); + } + } } diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 208b98ea8a7..754e38ceadb 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.text.Text; import org.joda.time.ReadableInstant; import java.io.IOException; +import java.io.ObjectOutputStream; import java.io.OutputStream; import java.util.Date; import java.util.LinkedHashMap; @@ -50,10 +51,6 @@ public abstract class StreamOutput extends OutputStream { return this; } - public boolean seekPositionSupported() { - return false; - } - public long position() throws IOException { throw new UnsupportedOperationException(); } @@ -432,4 +429,10 @@ public abstract class StreamOutput extends OutputStream { writeBoolean(false); } } + + public void writeThrowable(Throwable throwable) throws IOException { + ObjectOutputStream out = new ObjectOutputStream(this); + out.writeObject(throwable); + out.flush(); + } } diff --git a/src/main/java/org/elasticsearch/index/IndexException.java b/src/main/java/org/elasticsearch/index/IndexException.java index a3d18cfd86e..0f100dcd4f0 100644 --- a/src/main/java/org/elasticsearch/index/IndexException.java +++ b/src/main/java/org/elasticsearch/index/IndexException.java @@ -20,6 +20,9 @@ package org.elasticsearch.index; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; /** * @@ -32,16 +35,25 @@ public class IndexException extends ElasticsearchException { this(index, msg, null); } - public IndexException(Index index, String msg, Throwable cause) { - this(index, true, msg, cause); - } - - protected IndexException(Index index, boolean withSpace, String msg, Throwable cause) { - super("[" + (index == null ? "_na" : index.name()) + "]" + (withSpace ? " " : "") + msg, cause); + protected IndexException(Index index, String msg, Throwable cause) { + super(msg, cause); this.index = index; } public Index index() { return index; } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (index != null) { + builder.field("index", index.getName()); + } + super.innerToXContent(builder, params); + } + + @Override + public String toString() { + return "[" + (index == null ? "_na" : index.name()) + "] " + getMessage(); + } } diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java index 462b9104d03..6e0f1d1827c 100644 --- a/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java +++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorException.java @@ -26,10 +26,6 @@ import org.elasticsearch.index.IndexException; */ public class PercolatorException extends IndexException { - public PercolatorException(Index index, String msg) { - super(index, msg); - } - public PercolatorException(Index index, String msg, Throwable cause) { super(index, msg, cause); } diff --git a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java index 8c3a0fbccbe..5bf1407a107 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java @@ -40,4 +40,5 @@ public class QueryParsingException extends IndexException { public RestStatus status() { return RestStatus.BAD_REQUEST; } + } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java index e9217fda4cb..019b4d13bb4 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java @@ -19,8 +19,11 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.IndexException; +import java.io.IOException; + /** * */ @@ -33,11 +36,24 @@ public class IndexShardException extends IndexException { } public IndexShardException(ShardId shardId, String msg, Throwable cause) { - super(shardId == null ? null : shardId.index(), false, "[" + (shardId == null ? "_na" : shardId.id()) + "] " + msg, cause); + super(shardId == null ? null : shardId.index(), msg, cause); this.shardId = shardId; } public ShardId shardId() { return shardId; } + + @Override + public String toString() { + return (shardId == null ? "_na" : shardId) + getMessage(); + } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (shardId != null) { + builder.field("shard", shardId.getId()); + } + super.innerToXContent(builder, params); + } } diff --git a/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java index aa7b559bb30..95d1c0f4d6d 100644 --- a/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java +++ b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java @@ -27,10 +27,6 @@ import org.elasticsearch.index.IndexException; */ public class AliasFilterParsingException extends IndexException { - public AliasFilterParsingException(Index index, String name, String desc) { - super(index, "[" + name + "], " + desc); - } - public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) { super(index, "[" + name + "], " + desc, ex); } diff --git a/src/main/java/org/elasticsearch/indices/IndexMissingException.java b/src/main/java/org/elasticsearch/indices/IndexMissingException.java index dbadef5a076..1bec1585a7c 100644 --- a/src/main/java/org/elasticsearch/indices/IndexMissingException.java +++ b/src/main/java/org/elasticsearch/indices/IndexMissingException.java @@ -29,7 +29,7 @@ import org.elasticsearch.rest.RestStatus; public class IndexMissingException extends IndexException { public IndexMissingException(Index index) { - super(index, "missing"); + super(index, "no such index"); } @Override diff --git a/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/src/main/java/org/elasticsearch/indices/TypeMissingException.java index ae2830b9575..eb80c057a45 100644 --- a/src/main/java/org/elasticsearch/indices/TypeMissingException.java +++ b/src/main/java/org/elasticsearch/indices/TypeMissingException.java @@ -34,11 +34,6 @@ public class TypeMissingException extends IndexException { super(index, "type[" + Arrays.toString(types) + "] missing"); } - public TypeMissingException(Index index, String[] types, String message) { - super(index, "type[" + Arrays.toString(types) + "] missing: " + message); - } - - @Override public RestStatus status() { return RestStatus.NOT_FOUND; diff --git a/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index b72008cf63e..693188307e3 100644 --- a/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -20,13 +20,16 @@ package org.elasticsearch.rest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; -import static org.elasticsearch.ExceptionsHelper.detailedMessage; public class BytesRestResponse extends RestResponse { @@ -78,7 +81,7 @@ public class BytesRestResponse extends RestResponse { } public BytesRestResponse(RestChannel channel, Throwable t) throws IOException { - this(channel, ((t instanceof ElasticsearchException) ? ((ElasticsearchException) t).status() : RestStatus.INTERNAL_SERVER_ERROR), t); + this(channel, ExceptionsHelper.status(t), t); } public BytesRestResponse(RestChannel channel, RestStatus status, Throwable t) throws IOException { @@ -114,9 +117,22 @@ public class BytesRestResponse extends RestResponse { private static XContentBuilder convert(RestChannel channel, RestStatus status, Throwable t) throws IOException { XContentBuilder builder = channel.newBuilder().startObject(); if (t == null) { - builder.field("error", "Unknown"); + builder.field("error", "unknown"); } else if (channel.detailedErrorsEnabled()) { - builder.field("error", detailedMessage(t)); + builder.field("error"); + builder.startObject(); + final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t); + builder.field("root_cause"); + builder.startArray(); + for (ElasticsearchException rootCause : rootCauses){ + builder.startObject(); + rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), channel.request())); + builder.endObject(); + } + builder.endArray(); + + ElasticsearchException.toXContent(builder, channel.request(), t); + builder.endObject(); if (channel.request().paramAsBoolean("error_trace", false)) { buildErrorTrace(t, builder); } @@ -128,6 +144,7 @@ public class BytesRestResponse extends RestResponse { return builder; } + private static void buildErrorTrace(Throwable t, XContentBuilder builder) throws IOException { builder.startObject("error_trace"); boolean first = true; diff --git a/src/main/java/org/elasticsearch/search/SearchContextException.java b/src/main/java/org/elasticsearch/search/SearchContextException.java index 39bea64188f..599515830e1 100644 --- a/src/main/java/org/elasticsearch/search/SearchContextException.java +++ b/src/main/java/org/elasticsearch/search/SearchContextException.java @@ -35,19 +35,6 @@ public class SearchContextException extends SearchException { } private static String buildMessage(SearchContext context, String msg) { - StringBuilder sb = new StringBuilder(); - sb.append('[').append(context.shardTarget().index()).append("][").append(context.shardTarget().shardId()).append("]: "); - if (context.parsedQuery() != null) { - try { - sb.append("query[").append(context.parsedQuery().query()).append("],"); - } catch (Exception e) { - sb.append("query[_failed_to_string_],"); - } - } - sb.append("from[").append(context.from()).append("],size[").append(context.size()).append("]"); - if (context.sort() != null) { - sb.append(",sort[").append(context.sort()).append("]"); - } - return sb.append(": ").append(msg).toString(); + return msg; } } diff --git a/src/main/java/org/elasticsearch/search/SearchParseException.java b/src/main/java/org/elasticsearch/search/SearchParseException.java index 5f528f2d57a..923532373a5 100644 --- a/src/main/java/org/elasticsearch/search/SearchParseException.java +++ b/src/main/java/org/elasticsearch/search/SearchParseException.java @@ -28,11 +28,11 @@ import org.elasticsearch.search.internal.SearchContext; public class SearchParseException extends SearchContextException { public SearchParseException(SearchContext context, String msg) { - super(context, "Parse Failure [" + msg + "]"); + super(context, msg); } public SearchParseException(SearchContext context, String msg, Throwable cause) { - super(context, "Parse Failure [" + msg + "]", cause); + super(context, msg, cause); } @Override diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 2c605f889aa..26447471c2c 100644 --- a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -19,13 +19,27 @@ package org.elasticsearch; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.io.BytesStream; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.transport.RemoteTransportException; import org.junit.Test; +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; + import static org.hamcrest.Matchers.equalTo; public class ElasticsearchExceptionTests extends ElasticsearchTestCase { @@ -43,5 +57,203 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { exception = new RemoteTransportException("test", new IndexMissingException(new Index("test"))); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); + + exception = new RemoteTransportException("test", new IllegalArgumentException("foobar")); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + + exception = new RemoteTransportException("test", new IllegalStateException("foobar")); + assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); } + + public void testGuessRootCause() { + { + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new ElasticsearchIllegalArgumentException("index is closed", new RuntimeException("foobar")))); + ElasticsearchException[] rootCauses = exception.guessRootCauses(); + assertEquals(rootCauses.length, 1); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "illegal_argument_exception"); + assertEquals(rootCauses[0].getMessage(), "index is closed"); + ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); + if (randomBoolean()) { + rootCauses = (randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex).guessRootCauses(); + } else { + rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex); + } + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "query_parsing_exception"); + assertEquals(rootCauses[0].getMessage(), "foobar"); + + ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar")); + rootCauses = oneLevel.guessRootCauses(); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception"); + assertEquals(rootCauses[0].getMessage(), "foo"); + } + { + ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); + final ElasticsearchException[] rootCauses = ex.guessRootCauses(); + assertEquals(rootCauses.length, 2); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "query_parsing_exception"); + assertEquals(rootCauses[0].getMessage(), "foobar"); + assertEquals(((QueryParsingException)rootCauses[0]).index().name(), "foo"); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "query_parsing_exception"); + assertEquals(rootCauses[1].getMessage(), "foobar"); + assertEquals(((QueryParsingException)rootCauses[1]).index().name(), "foo1"); + + } + + } + + public void testDeduplicate() throws IOException { + { + ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String expected = "{\n" + + " \"type\" : \"search_phase_execution_exception\",\n" + + " \"reason\" : \"all shards failed\",\n" + + " \"phase\" : \"search\",\n" + + " \"grouped\" : true,\n" + + " \"failed_shards\" : [ {\n" + + " \"shard\" : 1,\n" + + " \"index\" : \"foo\",\n" + + " \"node\" : \"node_1\",\n" + + " \"reason\" : {\n" + + " \"type\" : \"query_parsing_exception\",\n" + + " \"reason\" : \"foobar\",\n" + + " \"index\" : \"foo\"\n" + + " }\n" + + " } ]\n" + + "}"; + assertEquals(expected, builder.string()); + } + { + ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String expected = "{\n" + + " \"type\" : \"search_phase_execution_exception\",\n" + + " \"reason\" : \"all shards failed\",\n" + + " \"phase\" : \"search\",\n" + + " \"grouped\" : true,\n" + + " \"failed_shards\" : [ {\n" + + " \"shard\" : 1,\n" + + " \"index\" : \"foo\",\n" + + " \"node\" : \"node_1\",\n" + + " \"reason\" : {\n" + + " \"type\" : \"query_parsing_exception\",\n" + + " \"reason\" : \"foobar\",\n" + + " \"index\" : \"foo\"\n" + + " }\n" + + " }, {\n" + + " \"shard\" : 1,\n" + + " \"index\" : \"foo1\",\n" + + " \"node\" : \"node_1\",\n" + + " \"reason\" : {\n" + + " \"type\" : \"query_parsing_exception\",\n" + + " \"reason\" : \"foobar\",\n" + + " \"index\" : \"foo1\"\n" + + " }\n" + + " } ]\n" + + "}"; + assertEquals(expected, builder.string()); + } + } + + public void testGetRootCause() { + Exception root = new RuntimeException("foobar"); + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new ElasticsearchIllegalArgumentException("index is closed", root))); + assertEquals(root, exception.getRootCause()); + assertTrue(exception.contains(RuntimeException.class)); + assertFalse(exception.contains(EOFException.class)); + } + + public void testToString() { + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new ElasticsearchIllegalArgumentException("index is closed", new RuntimeException("foobar")))); + assertEquals("ElasticsearchException[foo]; nested: ElasticsearchException[bar]; nested: ElasticsearchIllegalArgumentException[index is closed]; nested: RuntimeException[foobar];", exception.toString()); + } + + public void testToXContent() throws IOException { + { + ElasticsearchException ex = new ElasticsearchException("foo", new ElasticsearchException("bar", new ElasticsearchIllegalArgumentException("index is closed", new RuntimeException("foobar")))); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String expected = "{\n" + + " \"type\" : \"exception\",\n" + + " \"reason\" : \"foo\",\n" + + " \"caused_by\" : {\n" + + " \"type\" : \"exception\",\n" + + " \"reason\" : \"bar\",\n" + + " \"caused_by\" : {\n" + + " \"type\" : \"illegal_argument_exception\",\n" + + " \"reason\" : \"index is closed\",\n" + + " \"caused_by\" : {\n" + + " \"type\" : \"runtime_exception\",\n" + + " \"reason\" : \"foobar\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + assertEquals(expected, builder.string()); + } + + { + Exception ex = new FileNotFoundException("foo not found"); + if (randomBoolean()) { + // just a wrapper which is omitted + ex = new RemoteTransportException("foobar", ex); + } + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + + String expected = "{\n" + + " \"type\" : \"file_not_found_exception\",\n" + + " \"reason\" : \"foo not found\"\n" + + "}"; + assertEquals(expected, builder.string()); + } + + { // test equivalence + ElasticsearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + + XContentBuilder otherBuilder = XContentFactory.jsonBuilder().prettyPrint(); + + otherBuilder.startObject(); + ex.toXContent(otherBuilder, ToXContent.EMPTY_PARAMS); + otherBuilder.endObject(); + assertEquals(otherBuilder.string(), builder.string()); + } + } + + public void testSerializeElasticsearchException() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + QueryParsingException ex = new QueryParsingException(new Index("foo"), "foobar"); + out.writeThrowable(ex); + + BytesStreamInput in = new BytesStreamInput(out.bytes()); + QueryParsingException e = in.readThrowable(); + assertEquals(ex.index(), e.index()); + assertEquals(ex.getMessage(), e.getMessage()); + } + } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java index 6b9379fe052..5140df378dd 100644 --- a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java +++ b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java @@ -48,7 +48,7 @@ public class BulkProcessorClusterSettingsTests extends ElasticsearchIntegrationT assertEquals(3, responses.length); assertFalse("Operation on existing index should succeed", responses[0].isFailed()); assertTrue("Missing index should have been flagged", responses[1].isFailed()); - assertEquals("IndexMissingException[[wontwork] missing]", responses[1].getFailureMessage()); + assertEquals("[wontwork] no such index", responses[1].getFailureMessage()); assertFalse("Operation on existing index should succeed", responses[2].isFailed()); } } diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index e63927c7933..3349b33ed60 100644 --- a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -441,7 +441,8 @@ public class MetaDataTests extends ElasticsearchTestCase { md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoo-closed", "foofoobar"); fail(); } catch(IndexClosedException e) { - assertThat(e.getMessage(), containsString("[foofoo-closed] closed")); + assertThat(e.getMessage(), equalTo("closed")); + assertEquals(e.index().getName(), "foofoo-closed"); } String[] results = md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "barbaz"); diff --git a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java index ffc8db1d36b..877714681c6 100644 --- a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java +++ b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java @@ -120,7 +120,7 @@ public class DeleteByQueryTests extends ElasticsearchIntegrationTest { assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("test").getShardInfo().getFailures().length, equalTo(twitter.numPrimaries)); for (ActionWriteResponse.ShardInfo.Failure failure : response.getIndices().get("test").getShardInfo().getFailures()) { - assertThat(failure.reason(), containsString("[test] [has_child] query and filter unsupported in delete_by_query api")); + assertThat(failure.reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(failure.shardId(), greaterThan(-1)); } diff --git a/src/test/java/org/elasticsearch/document/BulkTests.java b/src/test/java/org/elasticsearch/document/BulkTests.java index f49914606dd..380828372bb 100644 --- a/src/test/java/org/elasticsearch/document/BulkTests.java +++ b/src/test/java/org/elasticsearch/document/BulkTests.java @@ -133,7 +133,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { assertThat(bulkResponse.getItems()[1].getResponse(), nullValue()); assertThat(bulkResponse.getItems()[1].getFailure().getIndex(), equalTo("test")); assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7")); - assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("DocumentMissingException")); + assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("document missing")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getIndex(), equalTo("test")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l)); @@ -173,7 +173,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { .add(client().prepareUpdate("test", "type", "2").setDoc("field", "2")) .add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get(); - assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version")); + assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict")); assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l)); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l)); @@ -194,7 +194,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.FORCE)) .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.INTERNAL)).get(); - assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version")); + assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict")); assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(20l)); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(21l)); } @@ -325,7 +325,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { int id = i + (numDocs / 2); if (i >= (numDocs / 2)) { assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id))); - assertThat(response.getItems()[i].getFailure().getMessage(), containsString("DocumentMissingException")); + assertThat(response.getItems()[i].getFailure().getMessage(), containsString("document missing")); } else { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id))); assertThat(response.getItems()[i].getVersion(), equalTo(3l)); diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 6e928ca4f81..b258420a245 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -1191,7 +1191,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { queryParser.parse(query).query(); fail(); } catch (QueryParsingException ex) { - assertThat(ex.getMessage(), equalTo("[test] [terms] query does not support multiple fields")); + assertThat(ex.getMessage(), equalTo("[terms] query does not support multiple fields")); } } @@ -1207,7 +1207,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { queryParser.parse(query).query(); fail(); } catch (QueryParsingException ex) { - assertThat(ex.getMessage(), equalTo("[test] [terms] filter does not support multiple fields")); + assertThat(ex.getMessage(), equalTo("[terms] filter does not support multiple fields")); } } diff --git a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java index 311449aaece..f09562f690e 100644 --- a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java +++ b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java @@ -241,8 +241,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("should have thrown an exception"); } catch (Exception e) { String errMsg = "[fielddata] Data too large, data for [test] would be larger than limit of [10/10b]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } assertFailures(client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC), @@ -263,8 +263,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("should have thrown an exception"); } catch (Exception e) { String errMsg = "[parent] Data too large, data for [test] would be larger than limit of [15/15b]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " +e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } } @@ -297,8 +297,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("aggregation should have tripped the breaker"); } catch (Exception e) { String errMsg = "CircuitBreakingException[[request] Data too large, data for [] would be larger than limit of [10/10b]]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 39a54516e9b..e02c2bef8b4 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -495,7 +495,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { } catch(ElasticsearchIllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); assertThat(e.getCause(), instanceOf(QueryParsingException.class)); - assertThat(e.getCause().getMessage(), equalTo("[test] No filter registered for [invalid]")); + assertThat(e.getCause().getMessage(), equalTo("No filter registered for [invalid]")); } } @@ -530,7 +530,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { createIndex("test"); fail("index creation should have failed due to alias with existing index name in mathching index template"); } catch(InvalidAliasNameException e) { - assertThat(e.getMessage(), equalTo("[test] Invalid alias name [index], an index exists with the same name as the alias")); + assertThat(e.getMessage(), equalTo("Invalid alias name [index], an index exists with the same name as the alias")); } } diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java index 9ee0ecba47f..fc95131d250 100644 --- a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java +++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java @@ -707,7 +707,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { .execute().actionGet(); Assert.fail("SearchPhaseExecutionException should have been thrown"); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("type [string] doesn't support mode [SUM]")); + assertThat(e.toString(), containsString("type [string] doesn't support mode [SUM]")); } } diff --git a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 989ea7194fc..784015e9db0 100644 --- a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -20,11 +20,18 @@ package org.elasticsearch.rest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.transport.RemoteTransportException; import org.junit.Test; import java.io.FileNotFoundException; +import java.io.IOException; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -70,8 +77,8 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().toUtf8(); - assertThat(text, containsString("ElasticsearchException[an error occurred reading data]")); - assertThat(text, containsString("FileNotFoundException[/foo/bar]")); + assertThat(text, containsString("{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}")); + assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}")); } @Test @@ -97,11 +104,21 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().toUtf8(); - assertThat(text, containsString("\"error\":\"Throwable[an error occurred reading data]")); - assertThat(text, containsString("FileNotFoundException[/foo/bar]")); + assertThat(text, containsString("\"type\":\"throwable\",\"reason\":\"an error occurred reading data\"")); + assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}")); assertThat(text, containsString("\"error_trace\":{\"message\":\"an error occurred reading data\"")); } + public void testGuessRootCause() throws IOException { + RestRequest request = new FakeRestRequest(); + RestChannel channel = new DetailedExceptionRestChannel(request); + + Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + BytesRestResponse response = new BytesRestResponse(channel, t); + String text = response.content().toUtf8(); + assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]")); + } + @Test public void testNullThrowable() throws Exception { RestRequest request = new FakeRestRequest(); @@ -109,10 +126,47 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { BytesRestResponse response = new BytesRestResponse(channel, null); String text = response.content().toUtf8(); - assertThat(text, containsString("\"error\":\"Unknown\"")); + assertThat(text, containsString("\"error\":\"unknown\"")); assertThat(text, not(containsString("error_trace"))); } + @Test + public void testConvert() throws IOException { + RestRequest request = new FakeRestRequest(); + request.params().put("pretty", "true"); + RestChannel channel = new DetailedExceptionRestChannel(request); + ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1}); + BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex)); + String text = response.content().toUtf8(); + String expected = "{\n" + + " \"error\" : {\n" + + " \"root_cause\" : [ {\n" + + " \"type\" : \"query_parsing_exception\",\n" + + " \"reason\" : \"foobar\",\n" + + " \"index\" : \"foo\"\n" + + " } ],\n" + + " \"type\" : \"search_phase_execution_exception\",\n" + + " \"reason\" : \"all shards failed\",\n" + + " \"phase\" : \"search\",\n" + + " \"grouped\" : true,\n" + + " \"failed_shards\" : [ {\n" + + " \"shard\" : 1,\n" + + " \"index\" : \"foo\",\n" + + " \"node\" : \"node_1\",\n" + + " \"reason\" : {\n" + + " \"type\" : \"query_parsing_exception\",\n" + + " \"reason\" : \"foobar\",\n" + + " \"index\" : \"foo\"\n" + + " }\n" + + " } ]\n" + + " },\n" + + " \"status\" : 400\n" + + "}"; + assertEquals(expected.trim(), text.trim()); + } + private static class ExceptionWithHeaders extends ElasticsearchException.WithRestHeaders { ExceptionWithHeaders() { diff --git a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java b/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java index 8e521a6f392..c575d9152a1 100644 --- a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java +++ b/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java @@ -153,8 +153,7 @@ public class GroovySandboxScriptTests extends ElasticsearchIntegrationTest { "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); fail("script: " + script + " failed to be caught be the sandbox!"); } catch (SearchPhaseExecutionException e) { - String msg = ExceptionsHelper.detailedMessage(ExceptionsHelper.unwrapCause(e)); - assertThat("script failed, but with incorrect message: " + msg, msg.contains(failMessage), equalTo(true)); + assertThat("script failed, but with incorrect message: " + e.toString(), e.toString().contains(failMessage), equalTo(true)); } } } diff --git a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java index 801f4b36e40..657d2bc361e 100644 --- a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java +++ b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java @@ -76,12 +76,12 @@ public class GroovyScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setQuery(constantScoreQuery(scriptFilter("1 == not_found").lang(GroovyScriptEngineService.NAME))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should not contained NotSerializableTransportException", - ExceptionsHelper.detailedMessage(e).contains("NotSerializableTransportException"), equalTo(false)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained GroovyScriptExecutionException", - ExceptionsHelper.detailedMessage(e).contains("GroovyScriptExecutionException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained not_found", - ExceptionsHelper.detailedMessage(e).contains("No such property: not_found"), equalTo(true)); + assertThat(e.toString()+ "should not contained NotSerializableTransportException", + e.toString().contains("NotSerializableTransportException"), equalTo(false)); + assertThat(e.toString()+ "should have contained GroovyScriptExecutionException", + e.toString().contains("GroovyScriptExecutionException"), equalTo(true)); + assertThat(e.toString()+ "should have contained not_found", + e.toString().contains("No such property: not_found"), equalTo(true)); } try { @@ -89,12 +89,12 @@ public class GroovyScriptTests extends ElasticsearchIntegrationTest { scriptFilter("assert false").lang("groovy"))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should not contained NotSerializableTransportException", - ExceptionsHelper.detailedMessage(e).contains("NotSerializableTransportException"), equalTo(false)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained GroovyScriptExecutionException", - ExceptionsHelper.detailedMessage(e).contains("GroovyScriptExecutionException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained an assert error", - ExceptionsHelper.detailedMessage(e).contains("PowerAssertionError[assert false"), equalTo(true)); + assertThat(e.toString()+ "should not contained NotSerializableTransportException", + e.toString().contains("NotSerializableTransportException"), equalTo(false)); + assertThat(e.toString()+ "should have contained GroovyScriptExecutionException", + e.toString().contains("GroovyScriptExecutionException"), equalTo(true)); + assertThat(e.toString()+ "should have contained an assert error", + e.toString().contains("PowerAssertionError[assert false"), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/src/test/java/org/elasticsearch/script/IndexLookupTests.java index 85940106a35..a9a61555b3a 100644 --- a/src/test/java/org/elasticsearch/script/IndexLookupTests.java +++ b/src/test/java/org/elasticsearch/script/IndexLookupTests.java @@ -178,8 +178,8 @@ public class IndexLookupTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet(); } catch (SearchPhaseExecutionException e) { assertThat( - "got: " + e.getDetailedMessage(), - e.getDetailedMessage() + "got: " + e.toString(), + e.toString() .indexOf( "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]"), Matchers.greaterThan(-1)); diff --git a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java index ac44e4d6dbc..1064bf464c7 100644 --- a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java +++ b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java @@ -158,20 +158,20 @@ public class IndexedScriptTests extends ElasticsearchIntegrationTest { fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); } try { String query = "{ \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"expression\" }}}"; client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); fail("search script should have been rejected"); } catch(Exception e) { - assertThat(e.getMessage(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled")); } try { String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_id\":\"script1\", \"script_lang\":\"expression\" } } } }"; client().prepareSearch("test").setSource(source).get(); } catch(Exception e) { - assertThat(e.getMessage(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled")); } } } diff --git a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java index 78a28520d4a..88471f712b4 100644 --- a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java +++ b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java @@ -107,7 +107,7 @@ public class OnDiskScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setSource(source).get(); fail("aggs script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [aggs] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [expression] are disabled")); } String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"expression\" }}, size:1}"; @@ -128,21 +128,21 @@ public class OnDiskScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setSource(source).get(); fail("aggs script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled")); } String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"mustache\" }}, size:1}"; try { client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); fail("search script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled")); } try { client().prepareUpdate("test", "scriptTest", "1").setScript("script1", ScriptService.ScriptType.FILE).setScriptLang(MustacheScriptEngineService.NAME).get(); fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [update] and lang [mustache] are disabled")); + assertThat(e.getCause().toString(), containsString("scripts of type [file], operation [update] and lang [mustache] are disabled")); } } } diff --git a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java b/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java index 0a759c07b98..0799fa0ddbd 100644 --- a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java +++ b/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java @@ -51,7 +51,7 @@ public class SandboxDisabledTests extends ElasticsearchIntegrationTest { "\"sort\":{\"_script\": {\"script\": \"doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); fail("shards should fail because the sandbox and dynamic scripting are disabled"); } catch (Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [inline], operation [search] and lang [groovy] are disabled")); + assertThat(e.toString(), containsString("scripts of type [inline], operation [search] and lang [groovy] are disabled")); } } } diff --git a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index 9ef64d82477..8ee8d1dcbf1 100644 --- a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -111,10 +111,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc['bogus'].value").get(); fail("Expected missing field to cause failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained missing field error", - ExceptionsHelper.detailedMessage(e).contains("does not exist in mappings"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained missing field error", + e.toString().contains("does not exist in mappings"), equalTo(true)); } } @@ -141,10 +141,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("garbage%@#%@").get(); fail("Expected expression compilation failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained compilation failure", - ExceptionsHelper.detailedMessage(e).contains("Failed to parse expression"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained compilation failure", + e.toString().contains("Failed to parse expression"), equalTo(true)); } } @@ -154,10 +154,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("a", "a", "astring").get(); fail("Expected string parameter to cause failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained non-numeric parameter error", - ExceptionsHelper.detailedMessage(e).contains("must be a numeric type"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained non-numeric parameter error", + e.toString().contains("must be a numeric type"), equalTo(true)); } } @@ -167,10 +167,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc['text'].value").get(); fail("Expected text field to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained non-numeric field error", - ExceptionsHelper.detailedMessage(e).contains("must be numeric"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained non-numeric field error", + e.toString().contains("must be numeric"), equalTo(true)); } } @@ -180,10 +180,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("bogus").get(); fail("Expected bogus variable to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained unknown variable error", - ExceptionsHelper.detailedMessage(e).contains("Unknown variable"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained unknown variable error", + e.toString().contains("Unknown variable"), equalTo(true)); } } @@ -193,10 +193,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc").get(); fail("Expected doc variable without field to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained a missing specific field error", - ExceptionsHelper.detailedMessage(e).contains("must be used with a specific field"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained a missing specific field error", + e.toString().contains("must be used with a specific field"), equalTo(true)); } } @@ -206,10 +206,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc['foo'].bogus").get(); fail("Expected bogus field member to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained field member error", - ExceptionsHelper.detailedMessage(e).contains("Invalid member for field"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained field member error", + e.toString().contains("Invalid member for field"), equalTo(true)); } } @@ -260,7 +260,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed message = rsp.getShardFailures()[0].reason(); } catch (SearchPhaseExecutionException e) { - message = ExceptionsHelper.detailedMessage(e); + message = e.toString(); } assertThat(message + "should have contained ExpressionScriptExecutionException", message.contains("ExpressionScriptExecutionException"), equalTo(true)); diff --git a/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java b/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java index e7b1d8fbc59..640ac7ffb81 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java @@ -57,7 +57,7 @@ public class AggregationsIntegrationTests extends ElasticsearchIntegrationTest { client().prepareSearch("index").setSearchType(SearchType.SCAN).setScroll(new TimeValue(500)).addAggregation(terms("f").field("f")).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getMessage(), e.getMessage().contains("aggregations are not supported with search_type=scan")); + assertTrue(e.toString(), e.toString().contains("aggregations are not supported with search_type=scan")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index b34d09bfc1a..b9f7e3e511d 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -1314,7 +1314,7 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { .actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("IllegalArgumentException")); + assertThat(e.toString(), containsString("IllegalArgumentException")); } } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java index 58a50e8938a..9a6c7c0f9f1 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java @@ -1017,7 +1017,7 @@ public class HistogramTests extends ElasticsearchIntegrationTest { .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).execute().actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Missing required field [interval]")); + assertThat(e.toString(), containsString("Missing required field [interval]")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java index ac28cd4c8e4..d437eb34915 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java @@ -365,7 +365,7 @@ public class NestedTests extends ElasticsearchIntegrationTest { .execute().actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[nested] nested path [incorrect] is not nested")); + assertThat(e.toString(), containsString("[nested] nested path [incorrect] is not nested")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java index 0322b6cc8a7..70bedbdb40f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java @@ -520,7 +520,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { ).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("No mapping found for [xyz] in order to sort on")); + assertThat(e.toString(), containsString("No mapping found for [xyz] in order to sort on")); } } @@ -553,7 +553,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { .get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); + assertThat(e.toString(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); } } diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index 00aad548d64..a536f64260d 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -108,14 +108,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; /** * @@ -1612,13 +1605,13 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get(); fail(); } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured")); + assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } try { client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get(); fail(); } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured")); + assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } refresh(); @@ -1645,7 +1638,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .endObject().endObject()).get(); fail(); } catch (MergeMappingException e) { - assertThat(e.getMessage(), equalTo("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]]}")); + assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]]}")); } } @@ -2332,7 +2325,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("none", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = SUM @@ -2412,7 +2405,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("sum", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = MAX @@ -2492,7 +2485,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("max", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = AVG @@ -2572,7 +2565,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("avg", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // HasChildFilter @@ -2652,7 +2645,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxFilter(3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } } diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java index 59ff93d27d8..315442a0022 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java @@ -874,8 +874,8 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { searchSource().query(query))).actionGet(); fail("Should fail with SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException failure) { - assertTrue(failure.getMessage().contains("SearchParseException")); - assertFalse(failure.getMessage().contains("NullPointerException")); + assertTrue(failure.toString().contains("SearchParseException")); + assertFalse(failure.toString().contains("NullPointerException")); } query = "{\n" + @@ -908,26 +908,26 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { searchSource().query(query))).actionGet(); fail("Should fail with SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException failure) { - assertTrue(failure.getMessage().contains("SearchParseException")); - assertFalse(failure.getMessage().contains("NullPointerException")); - assertTrue(failure.getMessage().contains("One entry in functions list is missing a function")); + assertTrue(failure.toString().contains("SearchParseException")); + assertFalse(failure.toString().contains("NullPointerException")); + assertTrue(failure.toString().contains("One entry in functions list is missing a function")); } // next test java client try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery(FilterBuilders.matchAllFilter(), null)).get(); } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(FilterBuilders.matchAllFilter(), null)).get(); } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(null)).get(); } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + assertTrue(failure.toString().contains("function must not be null")); } } diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index d0a5a6b357b..9d3b887fd19 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -483,7 +483,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get(); fail("SearchPhaseExecutionException should have been thrown"); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getMessage().contains("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery")); + assertTrue(e.toString().contains("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery")); } cluster().wipeIndices("test"); } catch (MapperParsingException ex) { @@ -563,7 +563,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { fail("expected SearchPhaseExecutionException (total failure)"); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), containsString("unit [D] not supported for date math")); + assertThat(e.toString(), containsString("unit [D] not supported for date math")); } } @@ -2493,8 +2493,8 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .get(); fail("query is invalid and should have produced a parse exception"); } catch (Exception e) { - assertThat("query could not be parsed due to bad format: " + e.getMessage(), - e.getMessage().contains("Illegal value for id, expecting a string or number, got: START_ARRAY"), + assertThat("query could not be parsed due to bad format: " + e.toString(), + e.toString().contains("Illegal value for id, expecting a string or number, got: START_ARRAY"), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java index 2e3276107fa..f4a2e81df61 100644 --- a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java +++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java @@ -234,7 +234,7 @@ public class SimpleSearchTests extends ElasticsearchIntegrationTest { client().prepareSearch("idx").setFrom(Integer.MAX_VALUE).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Result window is too large, from + size must be less than or equal to:")); + assertThat(e.toString(), containsString("Result window is too large, from + size must be less than or equal to:")); } } } diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java index b40e5547b1a..7972df64741 100644 --- a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java +++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java @@ -1131,7 +1131,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } catch (SearchPhaseExecutionException e) { //we check that it's a parse failure rather than a different shard failure for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { - assertThat(shardSearchFailure.reason(), containsString("Parse Failure [No mapping found for [kkk] in order to sort on]")); + assertThat(shardSearchFailure.toString(), containsString("[No mapping found for [kkk] in order to sort on]")); } } diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java index b2528428297..f4c7c74933f 100644 --- a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java @@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.google.common.collect.Lists; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; @@ -50,7 +49,6 @@ import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; import java.io.IOException; @@ -177,7 +175,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with a float weight was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("2.5")); + assertThat(e.toString(), containsString("2.5")); } } @@ -221,7 +219,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with a non-number representing string as weight was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("thisIsNotValid")); + assertThat(e.toString(), containsString("thisIsNotValid")); } } @@ -239,7 +237,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with weight string representing value > Int.MAX_VALUE was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString(weight)); + assertThat(e.toString(), containsString(weight)); } } @@ -774,7 +772,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { fail("Expected an exception due to trying to sort on completion field, but did not happen"); } catch (SearchPhaseExecutionException e) { assertThat(e.status().getStatus(), is(400)); - assertThat(e.getMessage(), containsString("Sorting not supported for field[" + FIELD + "]")); + assertThat(e.toString(), containsString("Sorting not supported for field[" + FIELD + "]")); } } @@ -1096,7 +1094,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { // Exception must be thrown assertFalse(true); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getDetailedMessage().contains("found no fielddata type for field [" + FIELD + "]")); + assertTrue(e.toString().contains("found no fielddata type for field [" + FIELD + "]")); } } diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index c691f3c5834..0a52c36ce32 100644 --- a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -310,7 +310,7 @@ public class ElasticsearchAssertions { assertVersionSerializable(searchResponse); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(restStatus)); - assertThat(e.getMessage(), reasonMatcher); + assertThat(e.toString(), reasonMatcher); for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { assertThat(shardSearchFailure.status(), equalTo(restStatus)); assertThat(shardSearchFailure.reason(), reasonMatcher); From dd4a22bfedf4a3c0c34ea598dfd97903d09b6396 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 24 Apr 2015 00:39:31 +0200 Subject: [PATCH 100/236] bulk: Fields defined in the `_default_` mapping of an index template should be picked up when an index alias filter is parsed if a new index is introduced when a document is indexed into an index that doesn't exist yet via the bulk api. Closes #10609 --- .../action/bulk/TransportBulkAction.java | 24 +++++++--- .../template/SimpleIndexTemplateTests.java | 46 +++++++++++++++---- 2 files changed, 55 insertions(+), 15 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 4cdc4887060..8d3ad3de9cd 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.bulk; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -100,22 +99,33 @@ public class TransportBulkAction extends HandledTransportAction responses = new AtomicArray<>(bulkRequest.requests.size()); if (autoCreateIndex.needToCheck()) { - final Set indices = Sets.newHashSet(); + // Keep track of all unique indices and all unique types per index for the create index requests: + final Map> indicesAndTypes = new HashMap<>(); for (ActionRequest request : bulkRequest.requests) { if (request instanceof DocumentRequest) { DocumentRequest req = (DocumentRequest) request; - if (!indices.contains(req.index())) { - indices.add(req.index()); + Set types = indicesAndTypes.get(req.index()); + if (types == null) { + indicesAndTypes.put(req.index(), types = new HashSet<>()); } + types.add(req.type()); } else { throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); } } - final AtomicInteger counter = new AtomicInteger(indices.size()); + final AtomicInteger counter = new AtomicInteger(indicesAndTypes.size()); ClusterState state = clusterService.state(); - for (final String index : indices) { + for (Map.Entry> entry : indicesAndTypes.entrySet()) { + final String index = entry.getKey(); if (autoCreateIndex.shouldAutoCreate(index, state)) { - createIndexAction.execute(new CreateIndexRequest(bulkRequest).index(index).cause("auto(bulk api)").masterNodeTimeout(bulkRequest.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(bulkRequest); + createIndexRequest.index(index); + for (String type : entry.getValue()) { + createIndexRequest.mapping(type); + } + createIndexRequest.cause("auto(bulk api)"); + createIndexRequest.masterNodeTimeout(bulkRequest.timeout()); + createIndexAction.execute(createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { if (counter.decrementAndGet() == 0) { diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index e02c2bef8b4..1c3f8f8c9ca 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.Priority; @@ -604,38 +606,66 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exception { // Indexing into a should succeed, because the field mapping for field 'field' is defined in the test mapping. client().admin().indices().preparePutTemplate("template1") - .setTemplate("a") + .setTemplate("a*") .setOrder(0) .addMapping("test", "field", "type=string") .addAlias(new Alias("alias1").filter(termFilter("field", "value"))).get(); // Indexing into b should succeed, because the field mapping for field 'field' is defined in the _default_ mapping and the test type exists. client().admin().indices().preparePutTemplate("template2") - .setTemplate("b") + .setTemplate("b*") .setOrder(0) .addMapping("_default_", "field", "type=string") .addMapping("test") .addAlias(new Alias("alias2").filter(termFilter("field", "value"))).get(); // Indexing into c should succeed, because the field mapping for field 'field' is defined in the _default_ mapping. client().admin().indices().preparePutTemplate("template3") - .setTemplate("c") + .setTemplate("c*") .setOrder(0) .addMapping("_default_", "field", "type=string") .addAlias(new Alias("alias3").filter(termFilter("field", "value"))).get(); // Indexing into d index should fail, since there is field with name 'field' in the mapping client().admin().indices().preparePutTemplate("template4") - .setTemplate("d") + .setTemplate("d*") .setOrder(0) .addAlias(new Alias("alias4").filter(termFilter("field", "value"))).get(); - client().prepareIndex("a", "test", "test").setSource("{}").get(); - client().prepareIndex("b", "test", "test").setSource("{}").get(); - client().prepareIndex("c", "test", "test").setSource("{}").get(); + client().prepareIndex("a1", "test", "test").setSource("{}").get(); + BulkResponse response = client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("a2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + + client().prepareIndex("b1", "test", "test").setSource("{}").get(); + response = client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("b2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + + client().prepareIndex("c1", "test", "test").setSource("{}").get(); + response = client().prepareBulk().add(new IndexRequest("c2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("c2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + try { - client().prepareIndex("d", "test", "test").setSource("{}").get(); + client().prepareIndex("d1", "test", "test").setSource("{}").get(); fail(); } catch (Exception e) { assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchIllegalArgumentException.class)); assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]")); } + response = client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(true)); + assertThat(response.getItems()[0].isFailed(), equalTo(true)); + assertThat(response.getItems()[0].getFailureMessage(), containsString("failed to parse filter for alias [alias4]")); } } From 84549f334d99aad394b41d60b558135712d4591a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 10:09:24 +0200 Subject: [PATCH 101/236] [TEST] use toString rather than getDetailedMessage() --- src/test/java/org/elasticsearch/script/IndexLookupTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/src/test/java/org/elasticsearch/script/IndexLookupTests.java index a9a61555b3a..cc1900a3fb2 100644 --- a/src/test/java/org/elasticsearch/script/IndexLookupTests.java +++ b/src/test/java/org/elasticsearch/script/IndexLookupTests.java @@ -581,8 +581,8 @@ public class IndexLookupTests extends ElasticsearchIntegrationTest { } } catch (SearchPhaseExecutionException ex) { assertThat( - "got " + ex.getDetailedMessage(), - ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), + "got " + ex.toString(), + ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), Matchers.greaterThan(-1)); } } From 3ff0b21c21f7843ecced4b0042bfbc31fb8070b2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 10:37:55 +0200 Subject: [PATCH 102/236] Remove dead code --- .../TransportShardReplicationOperationAction.java | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 6b9e4ca14d8..9c7b65bc939 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -247,8 +247,8 @@ public abstract class TransportShardReplicationOperationAction= 0; - //older nodes will send the concrete index as part of the request - shardId = new ShardId(request.index(), shard); - } } @Override From b444d2c31a9f7bfc9a200592e4c1db7881a7791c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 24 Apr 2015 12:05:58 +0200 Subject: [PATCH 103/236] Test: wait for green --- .../elasticsearch/fieldstats/FieldStatsIntegrationTests.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java index e31b60a2380..491b281b9f9 100644 --- a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java @@ -45,6 +45,7 @@ public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { "test", "string", "type=string", "date", "type=date", "double", "type=double", "double", "type=double", "float", "type=float", "long", "type=long", "integer", "type=integer", "short", "type=short", "byte", "type=byte" )); + ensureGreen("test"); byte minByte = Byte.MAX_VALUE; byte maxByte = Byte.MIN_VALUE; @@ -129,6 +130,7 @@ public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test3").addMapping( "test", "value", "type=long" )); + ensureGreen("test1", "test2", "test3"); indexRange("test1", -10, 100); indexRange("test2", 101, 200); @@ -180,6 +182,7 @@ public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test2").addMapping( "test", "value", "type=string" )); + ensureGreen("test1", "test2"); client().prepareIndex("test1", "test").setSource("value", 1l).get(); client().prepareIndex("test1", "test").setSource("value", 2l).get(); From 5bdfdc42d99b12e7999fb36c8f1a72d43d7b5606 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 21 Apr 2015 15:52:44 +0200 Subject: [PATCH 104/236] Refactor TransportShardReplicationOperationAction Refactor TransportShardReplicationOperationAction state management into clear separate Primary phase and Replication phase. The primary phase is responsible for routing the request to the node holding the primary, validating it and performing the operation on the primary. The Replication phase is responsible for sending the request to the replicas and managing their responses. This also adds unit test infrastructure for this class, and some basic tests. We can extend later as we continue developing. Closes #10749 --- ...nsportShardReplicationOperationAction.java | 790 ++++++++++-------- .../transport/DummyTransportAddress.java | 5 + .../ShardReplicationOperationTests.java | 586 +++++++++++++ .../test/cluster/TestClusterService.java | 249 ++++++ .../test/transport/CapturingTransport.java | 180 ++++ 5 files changed, 1458 insertions(+), 352 deletions(-) create mode 100644 src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java create mode 100644 src/test/java/org/elasticsearch/test/cluster/TestClusterService.java create mode 100644 src/test/java/org/elasticsearch/test/transport/CapturingTransport.java diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 9c7b65bc939..529586927fd 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.*; @@ -100,7 +99,7 @@ public abstract class TransportShardReplicationOperationAction listener) { - new AsyncShardOperationAction(request, listener).start(); + new PrimaryPhase(request, listener).run(); } protected abstract Request newRequestInstance(); @@ -112,10 +111,10 @@ public abstract class TransportShardReplicationOperationAction shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; + protected abstract Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws Exception; @@ -296,149 +295,163 @@ public abstract class TransportShardReplicationOperationAction + * Note that as soon as we start sending request to replicas, state responsibility is transferred to {@link ReplicationPhase} + */ + final class PrimaryPhase extends AbstractRunnable { private final ActionListener listener; private final InternalRequest internalRequest; - private volatile ShardIterator shardIt; - private final AtomicBoolean primaryOperationStarted = new AtomicBoolean(); - private volatile ClusterStateObserver observer; + private final ClusterStateObserver observer; + private final AtomicBoolean finished = new AtomicBoolean(false); - AsyncShardOperationAction(Request request, ActionListener listener) { + + PrimaryPhase(Request request, ActionListener listener) { this.internalRequest = new InternalRequest(request); this.listener = listener; - } - - public void start() { this.observer = new ClusterStateObserver(clusterService, internalRequest.request().timeout(), logger); - doStart(); } - /** - * Returns true if the action starting to be performed on the primary (or is done). - */ - protected void doStart() throws ElasticsearchException { - try { - ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); - if (blockException != null) { - if (blockException.retryable()) { - logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); - retry(blockException); - return; - } else { - throw blockException; - } - } - if (resolveIndex()) { - internalRequest.concreteIndex(observer.observedState().metaData().concreteSingleIndex(internalRequest.request().index(), internalRequest.request().indicesOptions())); - } else { - internalRequest.concreteIndex(internalRequest.request().index()); - } + @Override + public void onFailure(Throwable e) { + finishWithUnexpectedFailure(e); + } - resolveRequest(observer.observedState(), internalRequest, listener); - - blockException = checkRequestBlock(observer.observedState(), internalRequest); - if (blockException != null) { - if (blockException.retryable()) { - logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); - retry(blockException); - return; - } else { - throw blockException; - } - } - shardIt = shards(observer.observedState(), internalRequest); - } catch (Throwable e) { - listener.onFailure(e); + protected void doRun() { + if (checkBlocks() == false) { return; } - - // no shardIt, might be in the case between index gateway recovery and shardIt initialization - if (shardIt.size() == 0) { - logger.trace("no shard instances known for shard [{}], scheduling a retry", shardIt.shardId()); + final ShardIterator shardIt = shards(observer.observedState(), internalRequest); + final ShardRouting primary = resolvePrimary(shardIt); + if (primary == null) { retryBecauseUnavailable(shardIt.shardId(), "No active shards."); return; } + if (primary.active() == false) { + logger.trace("primary shard [{}] is not yet active, scheduling a retry.", primary.shardId()); + retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); + return; + } + if (observer.observedState().nodes().nodeExists(primary.currentNodeId()) == false) { + logger.trace("primary shard [{}] is assigned to anode we do not know the node, scheduling a retry.", primary.shardId(), primary.currentNodeId()); + retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); + return; + } + routeRequestOrPerformLocally(primary, shardIt); + } - boolean foundPrimary = false; - ShardRouting shardX; - while ((shardX = shardIt.nextOrNull()) != null) { - final ShardRouting shard = shardX; - // we only deal with primary shardIt here... - if (!shard.primary()) { - continue; - } - if (!shard.active() || !observer.observedState().nodes().nodeExists(shard.currentNodeId())) { - logger.trace("primary shard [{}] is not yet active or we do not know the node it is assigned to [{}], scheduling a retry.", shard.shardId(), shard.currentNodeId()); - retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); - return; - } - - if (!primaryOperationStarted.compareAndSet(false, true)) { - return; - } - - foundPrimary = true; - if (shard.currentNodeId().equals(observer.observedState().nodes().localNodeId())) { - try { - if (internalRequest.request().operationThreaded()) { - threadPool.executor(executor).execute(new Runnable() { - @Override - public void run() { - try { - performOnPrimary(shard.id(), shard); - } catch (Throwable t) { - listener.onFailure(t); - } - } - }); - } else { - performOnPrimary(shard.id(), shard); - } - } catch (Throwable t) { - listener.onFailure(t); - } + /** + * checks for any cluster state blocks. Returns true if operation is OK to proceeded. + * if false is return, no further action is needed. The method takes care of any continuation, by either + * responding to the listener or scheduling a retry + */ + protected boolean checkBlocks() { + ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); + if (blockException != null) { + if (blockException.retryable()) { + logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); + retry(blockException); } else { - DiscoveryNode node = observer.observedState().nodes().get(shard.currentNodeId()); - transportService.sendRequest(node, actionName, internalRequest.request(), transportOptions, new BaseTransportResponseHandler() { + finishAsFailed(blockException); + } + return false; + } + if (resolveIndex()) { + internalRequest.concreteIndex(observer.observedState().metaData().concreteSingleIndex(internalRequest.request().index(), internalRequest.request().indicesOptions())); + } else { + internalRequest.concreteIndex(internalRequest.request().index()); + } - @Override - public Response newInstance() { - return newResponseInstance(); - } + resolveRequest(observer.observedState(), internalRequest, listener); - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + blockException = checkRequestBlock(observer.observedState(), internalRequest); + if (blockException != null) { + if (blockException.retryable()) { + logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); + retry(blockException); + } else { + finishAsFailed(blockException); + } + return false; + } + return true; + } - @Override - public void handleResponse(Response response) { - listener.onResponse(response); - } + protected ShardRouting resolvePrimary(ShardIterator shardIt) { + // no shardIt, might be in the case between index gateway recovery and shardIt initialization + ShardRouting shard; + while ((shard = shardIt.nextOrNull()) != null) { + // we only deal with primary shardIt here... + if (shard.primary()) { + return shard; + } + } + return null; + } - @Override - public void handleException(TransportException exp) { + /** send the request to the node holding the primary or execute if local */ + protected void routeRequestOrPerformLocally(final ShardRouting primary, final ShardIterator shardsIt) { + if (primary.currentNodeId().equals(observer.observedState().nodes().localNodeId())) { + try { + if (internalRequest.request().operationThreaded()) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + finishAsFailed(t); + } + + @Override + protected void doRun() throws Exception { + performOnPrimary(primary, shardsIt); + } + }); + } else { + performOnPrimary(primary, shardsIt); + } + } catch (Throwable t) { + // no commit: check threadpool rejection. + finishAsFailed(t); + } + } else { + DiscoveryNode node = observer.observedState().nodes().get(primary.currentNodeId()); + transportService.sendRequest(node, actionName, internalRequest.request(), transportOptions, new BaseTransportResponseHandler() { + + @Override + public Response newInstance() { + return newResponseInstance(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(Response response) { + finishOnRemoteSuccess(response); + } + + @Override + public void handleException(TransportException exp) { + try { // if we got disconnected from the node, or the node / shard is not in the right state (being closed) if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException || retryPrimaryException(exp)) { - primaryOperationStarted.set(false); internalRequest.request().setCanHaveDuplicates(); // we already marked it as started when we executed it (removed the listener) so pass false // to re-add to the cluster listener logger.trace("received an error from node the primary was assigned to ({}), scheduling a retry", exp.getMessage()); retry(exp); } else { - listener.onFailure(exp); + finishAsFailed(exp); } + } catch (Throwable t) { + finishWithUnexpectedFailure(t); } - }); - } - break; - } - // we won't find a primary if there are no shards in the shard iterator, retry... - if (!foundPrimary) { - logger.trace("couldn't find a eligible primary shard, scheduling for retry."); - retryBecauseUnavailable(shardIt.shardId(), "No active shards."); + } + }); } } @@ -446,7 +459,7 @@ public abstract class TransportShardReplicationOperationAction primaryResponse = shardOperationOnPrimary(clusterState, por); - performReplicas(por, primaryResponse); + PrimaryOperationRequest por = new PrimaryOperationRequest(primary.id(), internalRequest.concreteIndex(), internalRequest.request()); + Tuple primaryResponse = shardOperationOnPrimary(observer.observedState(), por); + logger.trace("operation completed on primary [{}]", primary); + replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener); } catch (Throwable e) { internalRequest.request.setCanHaveDuplicates(); // shard has not been allocated yet, retry it here if (retryPrimaryException(e)) { - primaryOperationStarted.set(false); logger.trace("had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage()); retry(e); return; } if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (logger.isTraceEnabled()) { - logger.trace(shard.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); + logger.trace(primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } else { if (logger.isDebugEnabled()) { - logger.debug(shard.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); + logger.debug(primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } - listener.onFailure(e); - } - } - - void performReplicas(PrimaryOperationRequest por, Tuple primaryResponse) { - ShardRouting shard; - // we double check on the state, if it got changed we need to make sure we take the latest one cause - // maybe a replica shard started its recovery process and we need to apply it there... - - // we also need to make sure if the new state has a new primary shard (that we indexed to before) started - // and assigned to another node (while the indexing happened). In that case, we want to apply it on the - // new primary shard as well... - ClusterState newState = clusterService.state(); - ShardRouting newPrimaryShard = null; - int numberOfUnassignedReplicas = 0; - if (observer.observedState() != newState) { - shardIt.reset(); - ShardRouting originalPrimaryShard = null; - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.primary()) { - originalPrimaryShard = shard; - break; - } - } - if (originalPrimaryShard == null || !originalPrimaryShard.active()) { - throw new ElasticsearchIllegalStateException("unexpected state, failed to find primary shard on an index operation that succeeded"); - } - - observer.reset(newState); - shardIt = shards(newState, internalRequest); - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.primary()) { - if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId())) { - newPrimaryShard = null; - } else { - newPrimaryShard = shard; - } - } - - if (!shard.primary() && shard.unassigned()) { - numberOfUnassignedReplicas++; - } - } - shardIt.reset(); - internalRequest.request().setCanHaveDuplicates(); // safe side, cluster state changed, we might have dups - } else { - shardIt.reset(); - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.state() != ShardRoutingState.STARTED) { - internalRequest.request().setCanHaveDuplicates(); - } - if (!shard.primary() && shard.unassigned()) { - numberOfUnassignedReplicas++; - } - } - shardIt.reset(); - } - - int numberOfPendingShardInstances = shardIt.assignedReplicasIncludingRelocating(); - if (newPrimaryShard != null) { - numberOfPendingShardInstances++; - } - ReplicationState replicationState = new ReplicationState(por, shardIt, primaryResponse.v1(), primaryResponse.v2(), listener, numberOfPendingShardInstances, numberOfUnassignedReplicas); - if (numberOfPendingShardInstances == 0) { - replicationState.forceFinish(); + finishAsFailed(e); return; } - IndexMetaData indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex()); - if (newPrimaryShard != null) { - performOnReplica(replicationState, newPrimaryShard, newPrimaryShard.currentNodeId(), indexMetaData); - } - - shardIt.reset(); // reset the iterator - while ((shard = shardIt.nextOrNull()) != null) { - // if its unassigned, nothing to do here... - if (shard.unassigned()) { - continue; - } - - // if the shard is primary and relocating, add one to the counter since we perform it on the replica as well - // (and we already did it on the primary) - boolean doOnlyOnRelocating = false; - if (shard.primary()) { - if (shard.relocating()) { - doOnlyOnRelocating = true; - } else { - continue; - } - } - // we index on a replica that is initializing as well since we might not have got the event - // yet that it was started. We will get an exception IllegalShardState exception if its not started - // and that's fine, we will ignore it - if (!doOnlyOnRelocating) { - performOnReplica(replicationState, shard, shard.currentNodeId(), indexMetaData); - } - if (shard.relocating()) { - performOnReplica(replicationState, shard, shard.relocatingNodeId(), indexMetaData); - } - } + finishAndMoveToReplication(replicationPhase); } - void performOnReplica(final ReplicationState state, final ShardRouting shard, final String nodeId, final IndexMetaData indexMetaData) { - // if we don't have that node, it means that it might have failed and will be created again, in - // this case, we don't have to do the operation, and just let it failover - if (!observer.observedState().nodes().nodeExists(nodeId)) { - state.onReplicaFailure(nodeId, null); - return; - } - - final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId(), state.replicaRequest()); - - // If the replicas use shadow replicas, there is no reason to - // perform the action on the replica, so skip it and - // immediately return - if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { - // this delays mapping updates on replicas because they have - // to wait until they get the new mapping through the cluster - // state, which is why we recommend pre-defined mappings for - // indices using shadow replicas - state.onReplicaSuccess(); - return; - } - - if (!nodeId.equals(observer.observedState().nodes().localNodeId())) { - final DiscoveryNode node = observer.observedState().nodes().get(nodeId); - transportService.sendRequest(node, transportReplicaAction, shardRequest, - transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty vResponse) { - state.onReplicaSuccess(); - } - - @Override - public void handleException(TransportException exp) { - state.onReplicaFailure(nodeId, exp); - logger.trace("[{}] Transport failure during replica request [{}] ", exp, node, internalRequest.request()); - if (!ignoreReplicaException(exp)) { - logger.warn("Failed to perform " + actionName + " on remote replica " + node + shardIt.shardId(), exp); - shardStateAction.shardFailed(shard, indexMetaData.getUUID(), - "Failed to perform [" + actionName + "] on replica, message [" + ExceptionsHelper.detailedMessage(exp) + "]"); - } - } - - }); - } else { - if (internalRequest.request().operationThreaded()) { - try { - threadPool.executor(executor).execute(new AbstractRunnable() { - @Override - protected void doRun() { - try { - shardOperationOnReplica(shardRequest); - state.onReplicaSuccess(); - } catch (Throwable e) { - state.onReplicaFailure(nodeId, e); - failReplicaIfNeeded(shard.index(), shard.id(), e); - } - } - - // we must never reject on because of thread pool capacity on replicas - @Override - public boolean isForceExecution() { - return true; - } - - @Override - public void onFailure(Throwable t) { - state.onReplicaFailure(nodeId, t); - } - }); - } catch (Throwable e) { - failReplicaIfNeeded(shard.index(), shard.id(), e); - state.onReplicaFailure(nodeId, e); - } - } else { - try { - shardOperationOnReplica(shardRequest); - state.onReplicaSuccess(); - } catch (Throwable e) { - failReplicaIfNeeded(shard.index(), shard.id(), e); - state.onReplicaFailure(nodeId, e); - } - } - } - } - - boolean raiseFailureIfHaveNotEnoughActiveShardCopies(ShardRouting shard, ClusterState state) { - if (!checkWriteConsistency) { - return false; + /** + * checks whether we can perform a write based on the write consistency setting + * returns **null* if OK to proceed, or a string describing the reason to stop + */ + String checkWriteConsistency(ShardRouting shard) { + if (checkWriteConsistency == false) { + return null; } final WriteConsistencyLevel consistencyLevel; @@ -697,11 +574,11 @@ public abstract class TransportShardReplicationOperationAction 2) { // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard with 1 replica, quorum is 1 (which is what it is initialized to) requiredNumber = (shardRoutingTable.getSize() / 2) + 1; @@ -722,24 +599,21 @@ public abstract class TransportShardReplicationOperationAction listener; private final AtomicBoolean finished = new AtomicBoolean(false); private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard private final ConcurrentMap shardReplicaFailures = ConcurrentCollections.newConcurrentMap(); - + private final IndexMetaData indexMetaData; + private final ShardRouting originalPrimaryShard; private final AtomicInteger pending; - private final int numberOfShardInstances; + private final int totalShards; + private final ClusterStateObserver observer; - public ReplicationState(PrimaryOperationRequest por, ShardIterator shardsIter, Response finalResponse, ReplicaRequest replicaRequest, ActionListener listener, int numberOfPendingShardInstances, int numberOfUnassignedReplicas) { - this.request = por.request; - this.finalResponse = finalResponse; + /** + * the constructor doesn't take any action, just calculates state. Call {@link #run()} to start + * replicating. + */ + public ReplicationPhase(ShardIterator originalShardIt, ReplicaRequest replicaRequest, Response finalResponse, + ClusterStateObserver observer, ShardRouting originalPrimaryShard, + InternalRequest internalRequest, ActionListener listener) { this.replicaRequest = replicaRequest; - this.shardId = shardsIter.shardId(); this.listener = listener; - this.numberOfShardInstances = 1 + numberOfPendingShardInstances + numberOfUnassignedReplicas; + this.finalResponse = finalResponse; + this.originalPrimaryShard = originalPrimaryShard; + this.observer = observer; + indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex()); + + ShardRouting shard; + // we double check on the state, if it got changed we need to make sure we take the latest one cause + // maybe a replica shard started its recovery process and we need to apply it there... + + // we also need to make sure if the new state has a new primary shard (that we indexed to before) started + // and assigned to another node (while the indexing happened). In that case, we want to apply it on the + // new primary shard as well... + ClusterState newState = clusterService.state(); + + int numberOfUnassignedOrShadowReplicas = 0; + int numberOfPendingShardInstances = 0; + if (observer.observedState() != newState) { + observer.reset(newState); + shardIt = shards(newState, internalRequest); + while ((shard = shardIt.nextOrNull()) != null) { + if (shard.primary()) { + if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId()) == false) { + // there is a new primary, we'll have to replicate to it. + numberOfPendingShardInstances++; + } + if (shard.relocating()) { + numberOfPendingShardInstances++; + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { + // If the replicas use shadow replicas, there is no reason to + // perform the action on the replica, so skip it and + // immediately return + + // this delays mapping updates on replicas because they have + // to wait until they get the new mapping through the cluster + // state, which is why we recommend pre-defined mappings for + // indices using shadow replicas + numberOfUnassignedOrShadowReplicas++; + } else if (shard.unassigned()) { + numberOfUnassignedOrShadowReplicas++; + } else if (shard.relocating()) { + // we need to send to two copies + numberOfPendingShardInstances += 2; + } else { + numberOfPendingShardInstances++; + } + } + internalRequest.request().setCanHaveDuplicates(); // safe side, cluster state changed, we might have dups + } else { + shardIt = originalShardIt; + shardIt.reset(); + while ((shard = shardIt.nextOrNull()) != null) { + if (shard.state() != ShardRoutingState.STARTED) { + replicaRequest.setCanHaveDuplicates(); + } + if (shard.unassigned()) { + numberOfUnassignedOrShadowReplicas++; + } else if (shard.primary()) { + if (shard.relocating()) { + // we have to replicate to the other copy + numberOfPendingShardInstances += 1; + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { + // If the replicas use shadow replicas, there is no reason to + // perform the action on the replica, so skip it and + // immediately return + + // this delays mapping updates on replicas because they have + // to wait until they get the new mapping through the cluster + // state, which is why we recommend pre-defined mappings for + // indices using shadow replicas + numberOfUnassignedOrShadowReplicas++; + } else if (shard.relocating()) { + // we need to send to two copies + numberOfPendingShardInstances += 2; + } else { + numberOfPendingShardInstances++; + } + } + } + + // one for the primary already done + this.totalShards = 1 + numberOfPendingShardInstances + numberOfUnassignedOrShadowReplicas; this.pending = new AtomicInteger(numberOfPendingShardInstances); } - public Request request() { - return this.request; + /** total shard copies */ + int totalShards() { + return totalShards; } - public ReplicaRequest replicaRequest() { - return this.replicaRequest; + /** total successful operations so far */ + int successful() { + return success.get(); } - public void onReplicaFailure(String nodeId, @Nullable Throwable e) { + /** number of pending operations */ + int pending() { + return pending.get(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error while replicating for action [{}]. shard [{}]. ", t, actionName, shardIt.shardId()); + forceFinishAsFailed(t); + } + + /** start sending current requests to replicas */ + @Override + protected void doRun() { + if (pending.get() == 0) { + doFinish(); + return; + } + ShardRouting shard; + shardIt.reset(); // reset the iterator + while ((shard = shardIt.nextOrNull()) != null) { + // if its unassigned, nothing to do here... + if (shard.unassigned()) { + continue; + } + + // we index on a replica that is initializing as well since we might not have got the event + // yet that it was started. We will get an exception IllegalShardState exception if its not started + // and that's fine, we will ignore it + if (shard.primary()) { + if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId()) == false) { + // there is a new primary, we'll have to replicate to it. + performOnReplica(shard, shard.currentNodeId()); + } + if (shard.relocating()) { + performOnReplica(shard, shard.relocatingNodeId()); + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings()) == false) { + performOnReplica(shard, shard.currentNodeId()); + if (shard.relocating()) { + performOnReplica(shard, shard.relocatingNodeId()); + } + } + } + } + + /** send operation to the given node or perform it if local */ + void performOnReplica(final ShardRouting shard, final String nodeId) { + // if we don't have that node, it means that it might have failed and will be created again, in + // this case, we don't have to do the operation, and just let it failover + if (!observer.observedState().nodes().nodeExists(nodeId)) { + onReplicaFailure(nodeId, null); + return; + } + + final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId(), replicaRequest); + + if (!nodeId.equals(observer.observedState().nodes().localNodeId())) { + final DiscoveryNode node = observer.observedState().nodes().get(nodeId); + transportService.sendRequest(node, transportReplicaAction, shardRequest, + transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty vResponse) { + onReplicaSuccess(); + } + + @Override + public void handleException(TransportException exp) { + onReplicaFailure(nodeId, exp); + logger.trace("[{}] transport failure during replica request [{}] ", exp, node, replicaRequest); + if (ignoreReplicaException(exp) == false) { + logger.warn("failed to perform " + actionName + " on remote replica " + node + shardIt.shardId(), exp); + shardStateAction.shardFailed(shard, indexMetaData.getUUID(), + "Failed to perform [" + actionName + "] on replica, message [" + ExceptionsHelper.detailedMessage(exp) + "]"); + } + } + + }); + } else { + if (replicaRequest.operationThreaded()) { + try { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() { + try { + shardOperationOnReplica(shardRequest); + onReplicaSuccess(); + } catch (Throwable e) { + onReplicaFailure(nodeId, e); + failReplicaIfNeeded(shard.index(), shard.id(), e); + } + } + + // we must never reject on because of thread pool capacity on replicas + @Override + public boolean isForceExecution() { + return true; + } + + @Override + public void onFailure(Throwable t) { + onReplicaFailure(nodeId, t); + } + }); + } catch (Throwable e) { + failReplicaIfNeeded(shard.index(), shard.id(), e); + onReplicaFailure(nodeId, e); + } + } else { + try { + shardOperationOnReplica(shardRequest); + onReplicaSuccess(); + } catch (Throwable e) { + failReplicaIfNeeded(shard.index(), shard.id(), e); + onReplicaFailure(nodeId, e); + } + } + } + } + + + void onReplicaFailure(String nodeId, @Nullable Throwable e) { // Only version conflict should be ignored from being put into the _shards header? - if (e != null && !ignoreReplicaException(e)) { + if (e != null && ignoreReplicaException(e) == false) { shardReplicaFailures.put(nodeId, e); } - finishIfNeeded(); + decPendingAndFinishIfNeeded(); } - public void onReplicaSuccess() { + void onReplicaSuccess() { success.incrementAndGet(); - finishIfNeeded(); + decPendingAndFinishIfNeeded(); } - public void forceFinish() { - doFinish(); - } - - private void finishIfNeeded() { - if (pending.decrementAndGet() == 0) { + private void decPendingAndFinishIfNeeded() { + if (pending.decrementAndGet() <= 0) { doFinish(); } } + private void forceFinishAsFailed(Throwable t) { + if (finished.compareAndSet(false, true)) { + listener.onFailure(t); + } + } + private void doFinish() { if (finished.compareAndSet(false, true)) { + final ShardId shardId = shardIt.shardId(); final ActionWriteResponse.ShardInfo.Failure[] failuresArray; if (!shardReplicaFailures.isEmpty()) { int slot = 0; @@ -824,9 +911,8 @@ public abstract class TransportShardReplicationOperationAction void assertListenerThrows(String msg, PlainActionFuture listener, Class klass) throws InterruptedException { + try { + listener.get(); + fail(msg); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(klass)); + } + + } + + @Test + public void testBlocks() throws ExecutionException, InterruptedException { + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + assertFalse("primary phase should stop execution", primaryPhase.checkBlocks()); + assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(new Request().timeout("5ms"), listener); + assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); + assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); + + + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(new Request(), listener); + assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); + assertFalse("primary phase should wait on retryable block", listener.isDone()); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertListenerThrows("primary phase should fail operation when moving from a retryable block a non-retryable one", listener, ClusterBlockException.class); + } + + ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { + int assignedReplicas = randomIntBetween(0, numberOfReplicas); + return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); + } + + ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + // no point in randomizing - node assignment later on does it too. + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); + + } + + ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { + final int numberOfReplicas = replicaStates.length; + + int numberOfNodes = numberOfReplicas + 1; + if (primaryState == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + for (ShardRoutingState state : replicaStates) { + if (state == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + } + numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures + final ShardId shardId = new ShardId(index, 0); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(ImmutableSettings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId, false); + + String primaryNode = null; + String relocatingNode = null; + if (primaryState != ShardRoutingState.UNASSIGNED) { + if (primaryLocal) { + primaryNode = newNode(0).id(); + unassignedNodes.remove(primaryNode); + } else { + primaryNode = selectAndRemove(unassignedNodes); + } + if (primaryState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } + indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, 0, primaryNode, relocatingNode, true, primaryState, 0)); + + for (ShardRoutingState replicaState : replicaStates) { + String replicaNode = null; + relocatingNode = null; + if (replicaState != ShardRoutingState.UNASSIGNED) { + assert primaryNode != null : "a replica is assigned but the primary isn't"; + replicaNode = selectAndRemove(unassignedNodes); + if (replicaState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } + indexShardRoutingBuilder.addShard( + new ImmutableShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, 0)); + + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); + return state.build(); + } + + private String selectAndRemove(Set strings) { + String selection = randomFrom(strings.toArray(new String[strings.size()])); + strings.remove(selection); + return selection; + } + + @Test + public void testNotStartedPrimary() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + // no replicas in oder to skip the replication part + clusterService.setState(state(index, true, + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); + + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + + Request request = new Request(shardId).timeout("1ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); + + request = new Request(shardId); + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertFalse("unassigned primary didn't cause a retry", listener.isDone()); + + clusterService.setState(state(index, true, ShardRoutingState.STARTED)); + logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); + + listener.get(); + assertTrue("request wasn't processed on primary, despite of it being assigned", request.processedOnPrimary.get()); + } + + @Test + public void testRoutingToPrimary() { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3)); + + logger.debug("using state: \n{}", clusterService.state().prettyPrint()); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + assertTrue(primaryPhase.checkBlocks()); + primaryPhase.routeRequestOrPerformLocally(shardRoutingTable.primaryShard(), shardRoutingTable.shardsIt()); + if (primaryNodeId.equals(clusterService.localNode().id())) { + logger.info("--> primary is assigned locally, testing for execution"); + assertTrue("request failed to be processed on a local primary", request.processedOnPrimary.get()); + } else { + logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId); + final List capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId); + assertThat(capturedRequests, notNullValue()); + assertThat(capturedRequests.size(), equalTo(1)); + assertThat(capturedRequests.get(0).action, equalTo("testAction")); + } + } + + @Test + public void testWriteConsistency() { + action = new ActionWithConsistency(ImmutableSettings.EMPTY, "testActionWithConsistency", transportService, clusterService, threadPool); + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + final int assignedReplicas = randomInt(2); + final int unassignedReplicas = randomInt(2); + final int totalShards = 1 + assignedReplicas + unassignedReplicas; + final boolean passesWriteConsistency; + Request request = new Request(shardId).consistencyLevel(randomFrom(WriteConsistencyLevel.values())); + switch (request.consistencyLevel()) { + case ONE: + passesWriteConsistency = true; + break; + case DEFAULT: + case QUORUM: + if (totalShards <= 2) { + passesWriteConsistency = true; // primary is enough + } else { + passesWriteConsistency = assignedReplicas + 1 >= (totalShards / 2) + 1; + } + break; + case ALL: + passesWriteConsistency = unassignedReplicas == 0; + break; + default: + throw new RuntimeException("unknown consistency level [" + request.consistencyLevel() + "]"); + } + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + + clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}", + request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry", + clusterService.state().prettyPrint()); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + PlainActionFuture listener = new PlainActionFuture<>(); + + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + if (passesWriteConsistency) { + assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), nullValue()); + primaryPhase.run(); + assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get()); + } else { + assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), notNullValue()); + primaryPhase.run(); + assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get()); + for (int i = 0; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.STARTED; + } + clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get()); + } + + } + + @Test + public void testReplication() throws ExecutionException, InterruptedException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + int assignedReplicas = 0; + int totalShards = 0; + for (ShardRouting shard : shardRoutingTable) { + totalShards++; + if (shard.primary() == false && shard.assignedToNode()) { + assignedReplicas++; + } + if (shard.relocating()) { + assignedReplicas++; + totalShards++; + } + } + + runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + } + + @Test + public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + ClusterState state = stateWithStartedPrimary(index, true, randomInt(5)); + MetaData.Builder metaData = MetaData.builder(state.metaData()); + ImmutableSettings.Builder settings = ImmutableSettings.builder().put(metaData.get(index).settings()); + settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); + metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); + clusterService.setState(ClusterState.builder(state).metaData(metaData)); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + int assignedReplicas = 0; + int totalShards = 0; + for (ShardRouting shard : shardRoutingTable) { + totalShards++; + if (shard.primary() && shard.relocating()) { + assignedReplicas++; + totalShards++; + } + } + + runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + } + + + protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { + final ShardRouting primaryShard = shardRoutingTable.primaryShard(); + final ShardIterator shardIt = shardRoutingTable.shardsIt(); + final ShardId shardId = shardIt.shardId(); + final Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); + + + final TransportShardReplicationOperationAction.InternalRequest internalRequest = action.new InternalRequest(request); + internalRequest.concreteIndex(shardId.index().name()); + TransportShardReplicationOperationAction.ReplicationPhase replicationPhase = + action.new ReplicationPhase(shardIt, request, + new Response(), new ClusterStateObserver(clusterService, logger), + primaryShard, internalRequest, listener); + + assertThat(replicationPhase.totalShards(), equalTo(totalShards)); + assertThat(replicationPhase.pending(), equalTo(assignedReplicas)); + replicationPhase.run(); + final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests(); + transport.clear(); + assertThat(capturedRequests.length, equalTo(assignedReplicas)); + if (assignedReplicas > 0) { + assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false)); + } + int pending = replicationPhase.pending(); + int criticalFailures = 0; // failures that should fail the shard + int successfull = 1; + for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) { + if (randomBoolean()) { + Throwable t; + if (randomBoolean()) { + t = new CorruptIndexException("simulated", (String) null); + criticalFailures++; + } else { + t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING); + } + logger.debug("--> simulating failure on {} with [{}]", capturedRequest.node, t.getClass().getSimpleName()); + transport.handleResponse(capturedRequest.requestId, t); + } else { + successfull++; + transport.handleResponse(capturedRequest.requestId, TransportResponse.Empty.INSTANCE); + } + pending--; + assertThat(replicationPhase.pending(), equalTo(pending)); + assertThat(replicationPhase.successful(), equalTo(successfull)); + } + assertThat(listener.isDone(), equalTo(true)); + Response response = listener.get(); + final ActionWriteResponse.ShardInfo shardInfo = response.getShardInfo(); + assertThat(shardInfo.getFailed(), equalTo(criticalFailures)); + assertThat(shardInfo.getFailures(), arrayWithSize(criticalFailures)); + assertThat(shardInfo.getSuccessful(), equalTo(successfull)); + assertThat(shardInfo.getTotal(), equalTo(totalShards)); + + assertThat("failed to see enough shard failures", transport.capturedRequests().length, equalTo(criticalFailures)); + for (CapturingTransport.CapturedRequest capturedRequest : transport.capturedRequests()) { + assertThat(capturedRequest.action, equalTo(ShardStateAction.SHARD_FAILED_ACTION_NAME)); + } + } + + + static class Request extends ShardReplicationOperationRequest { + int shardId; + public AtomicBoolean processedOnPrimary = new AtomicBoolean(); + public AtomicInteger processedOnReplicas = new AtomicInteger(); + + Request() { + this.operationThreaded(false); + } + + Request(ShardId shardId) { + this(); + this.shardId = shardId.id(); + this.index(shardId.index().name()); + // keep things simple + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(shardId); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardId = in.readVInt(); + } + } + + static class Response extends ActionWriteResponse { + + } + + static class Action extends TransportShardReplicationOperationAction { + + Action(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool) { + super(settings, actionName, transportService, clusterService, null, threadPool, + new ShardStateAction(settings, clusterService, transportService, null, null), + new ActionFilters(new HashSet())); + } + + @Override + protected Request newRequestInstance() { + return new Request(); + } + + @Override + protected Request newReplicaRequestInstance() { + return new Request(); + } + + @Override + protected Response newResponseInstance() { + return new Response(); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + boolean executedBefore = shardRequest.request.processedOnPrimary.getAndSet(true); + assert executedBefore == false : "request has already been executed on the primary"; + return new Tuple<>(new Response(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { + shardRequest.request.processedOnReplicas.incrementAndGet(); + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) throws ElasticsearchException { + return clusterState.getRoutingTable().index(request.concreteIndex()).shard(request.request().shardId).shardsIt(); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected boolean resolveIndex() { + return false; + } + } + + static class ActionWithConsistency extends Action { + + ActionWithConsistency(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super(settings, actionName, transportService, clusterService, threadPool); + } + + @Override + protected boolean checkWriteConsistency() { + return true; + } + } + + static DiscoveryNode newNode(int nodeId) { + return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); + } + + +} diff --git a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java new file mode 100644 index 00000000000..27f09489763 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.cluster; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.service.PendingClusterTask; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ScheduledFuture; + +/** a class that simulate simple cluster service features, like state storage and listeners */ +public class TestClusterService implements ClusterService { + + volatile ClusterState state; + private final Collection listeners = new CopyOnWriteArrayList<>(); + private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); + private final ThreadPool threadPool; + + public TestClusterService() { + this(ClusterState.builder(new ClusterName("test")).build()); + } + + public TestClusterService(ThreadPool threadPool) { + this(ClusterState.builder(new ClusterName("test")).build(), threadPool); + } + + public TestClusterService(ClusterState state) { + this(state, null); + } + + public TestClusterService(ClusterState state, @Nullable ThreadPool threadPool) { + if (state.getNodes().size() == 0) { + state = ClusterState.builder(state).nodes( + DiscoveryNodes.builder() + .put(new DiscoveryNode("test_id", DummyTransportAddress.INSTANCE, Version.CURRENT)) + .localNodeId("test_id")).build(); + } + + assert state.getNodes().localNode() != null; + this.state = state; + this.threadPool = threadPool; + + } + + + /** set the current state and trigger any registered listeners about the change */ + public void setState(ClusterState state) { + assert state.getNodes().localNode() != null; + // make sure we have a version increment + state = ClusterState.builder(state).version(this.state.version() + 1).build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state); + this.state = state; + for (ClusterStateListener listener : listeners) { + listener.clusterChanged(event); + } + } + + /** set the current state and trigger any registered listeners about the change */ + public void setState(ClusterState.Builder state) { + setState(state.build()); + } + + @Override + public DiscoveryNode localNode() { + return state.getNodes().localNode(); + } + + @Override + public ClusterState state() { + return state; + } + + @Override + public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + throw new UnsupportedOperationException(); + + } + + @Override + public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + throw new UnsupportedOperationException(); + + } + + @Override + public OperationRouting operationRouting() { + return null; + } + + @Override + public void addFirst(ClusterStateListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void addLast(ClusterStateListener listener) { + listeners.add(listener); + } + + @Override + public void add(ClusterStateListener listener) { + listeners.add(listener); + } + + @Override + public void remove(ClusterStateListener listener) { + listeners.remove(listener); + for (Iterator it = onGoingTimeouts.iterator(); it.hasNext(); ) { + NotifyTimeout timeout = it.next(); + if (timeout.listener.equals(listener)) { + timeout.cancel(); + it.remove(); + } + } + } + + @Override + public void add(LocalNodeMasterListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void remove(LocalNodeMasterListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) { + if (threadPool == null) { + throw new UnsupportedOperationException("TestClusterService wasn't initialized with a thread pool"); + } + NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout); + notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout); + onGoingTimeouts.add(notifyTimeout); + listeners.add(listener); + listener.postAdded(); + } + + @Override + public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { + throw new UnsupportedOperationException(); + } + + @Override + public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { + throw new UnsupportedOperationException(); + } + + @Override + public List pendingTasks() { + throw new UnsupportedOperationException(); + + } + + @Override + public int numberOfPendingTasks() { + throw new UnsupportedOperationException(); + } + + @Override + public Lifecycle.State lifecycleState() { + throw new UnsupportedOperationException(); + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterService start() throws ElasticsearchException { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterService stop() throws ElasticsearchException { + throw new UnsupportedOperationException(); + } + + @Override + public void close() throws ElasticsearchException { + throw new UnsupportedOperationException(); + } + + class NotifyTimeout implements Runnable { + final TimeoutClusterStateListener listener; + final TimeValue timeout; + volatile ScheduledFuture future; + + NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) { + this.listener = listener; + this.timeout = timeout; + } + + public void cancel() { + FutureUtils.cancel(future); + } + + @Override + public void run() { + if (future != null && future.isCancelled()) { + return; + } + listener.onTimeout(this.timeout); + // note, we rely on the listener to remove itself in case of timeout if needed + } + } +} diff --git a/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java b/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java new file mode 100644 index 00000000000..ca4c7950345 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.transport; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.transport.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; + +/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ +public class CapturingTransport implements Transport { + private TransportServiceAdapter adapter; + + static public class CapturedRequest { + final public DiscoveryNode node; + final public long requestId; + final public String action; + final public TransportRequest request; + + public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + this.node = node; + this.requestId = requestId; + this.action = action; + this.request = request; + } + } + + private BlockingQueue capturedRequests = ConcurrentCollections.newBlockingQueue(); + + /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ + public CapturedRequest[] capturedRequests() { + return capturedRequests.toArray(new CapturedRequest[0]); + } + + /** + * returns all requests captured so far, grouped by target node. + * Doesn't clear the captured request list. See {@link #clear()} + */ + public Map> capturedRequestsByTargetNode() { + Map> map = new HashMap<>(); + for (CapturedRequest request : capturedRequests) { + List nodeList = map.get(request.node.id()); + if (nodeList == null) { + nodeList = new ArrayList<>(); + map.put(request.node.id(), nodeList); + } + nodeList.add(request); + } + return map; + } + + /** clears captured requests */ + public void clear() { + capturedRequests.clear(); + } + + /** simulate a response for the given requestId */ + public void handleResponse(final long requestId, final TransportResponse response) { + adapter.onResponseReceived(requestId).handleResponse(response); + } + + /** simulate a remote error for the given requesTId */ + public void handleResponse(final long requestId, final Throwable t) { + adapter.onResponseReceived(requestId).handleException(new RemoteTransportException("remote failure", t)); + } + + + @Override + public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { + capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + } + + + @Override + public void transportServiceAdapter(TransportServiceAdapter adapter) { + this.adapter = adapter; + } + + @Override + public BoundTransportAddress boundAddress() { + return null; + } + + @Override + public Map profileBoundAddresses() { + return null; + } + + @Override + public TransportAddress[] addressesFromString(String address) throws Exception { + return new TransportAddress[0]; + } + + @Override + public boolean addressSupported(Class address) { + return false; + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return true; + } + + @Override + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + + } + + @Override + public long serverOpen() { + return 0; + } + + @Override + public Lifecycle.State lifecycleState() { + return null; + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + + } + + @Override + public Transport start() throws ElasticsearchException { + return null; + } + + @Override + public Transport stop() throws ElasticsearchException { + return null; + } + + @Override + public void close() throws ElasticsearchException { + + } +} From c25ca1ebfb6e2b408ca0ed352d73dc6b6fc30727 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 11:43:23 +0200 Subject: [PATCH 105/236] [CORE] Simplify IndexStore and friends Today we have a lot of bloat in the IndexStore and related classes. THe IndexStore interface is unneeded as we always subclass AbstractIndexStore and it hides circular dependencies that are problematic when added. Guice proxies them if you have an interface which is bad in general. This commit removes most of the bloat classes and unifies all the classes we have into a single one since they all just structural and don't encode any functionality. --- .../org/elasticsearch/index/IndexService.java | 9 +- .../settings/IndexDynamicSettingsModule.java | 6 +- .../store/{fs => }/FsDirectoryService.java | 52 +++++++- .../elasticsearch/index/store/IndexStore.java | 89 +++++++++++-- .../index/store/IndexStoreModule.java | 50 +++---- .../store/fs/DefaultFsDirectoryService.java | 63 --------- .../index/store/fs/DefaultFsIndexStore.java | 46 ------- .../store/fs/DefaultFsIndexStoreModule.java | 34 ----- .../store/fs/MmapFsDirectoryService.java | 49 ------- .../index/store/fs/MmapFsIndexStore.java | 46 ------- .../store/fs/MmapFsIndexStoreModule.java | 34 ----- .../index/store/fs/NioFsDirectoryService.java | 49 ------- .../index/store/fs/NioFsIndexStore.java | 46 ------- .../index/store/fs/NioFsIndexStoreModule.java | 34 ----- .../store/fs/SimpleFsDirectoryService.java | 49 ------- .../index/store/fs/SimpleFsIndexStore.java | 46 ------- .../store/fs/SimpleFsIndexStoreModule.java | 34 ----- .../store/support/AbstractIndexStore.java | 126 ------------------ .../indices/store/IndicesStore.java | 6 +- .../TransportNodesListShardStoreMetaData.java | 3 +- .../transport/TransportClientTests.java | 2 +- .../index/store/IndexStoreTests.java | 86 ++++++++++++ .../indices/settings/UpdateSettingsTests.java | 13 +- .../indices/stats/IndexStatsTests.java | 6 +- .../DedicatedClusterSnapshotRestoreTests.java | 10 +- .../SharedClusterSnapshotRestoreTests.java | 9 +- .../test/InternalTestCluster.java | 3 +- .../test/store/MockDirectoryHelper.java | 31 ++--- .../test/store/MockFSDirectoryService.java | 6 +- .../test/store/MockFSIndexStore.java | 11 +- 30 files changed, 273 insertions(+), 775 deletions(-) rename src/main/java/org/elasticsearch/index/store/{fs => }/FsDirectoryService.java (52%) delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java delete mode 100644 src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java delete mode 100644 src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java create mode 100644 src/test/java/org/elasticsearch/index/store/IndexStoreTests.java diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index 0ce365c2036..878d8162437 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -115,8 +115,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone private final BitsetFilterCache bitsetFilterCache; - private final IndexStore indexStore; - private final IndexSettingsService settingsService; private final NodeEnvironment nodeEnv; @@ -131,7 +129,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone public IndexService(Injector injector, Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv, AnalysisService analysisService, MapperService mapperService, IndexQueryParserService queryParserService, SimilarityService similarityService, IndexAliasesService aliasesService, IndexCache indexCache, - IndexStore indexStore, IndexSettingsService settingsService, + IndexSettingsService settingsService, IndexFieldDataService indexFieldData, BitsetFilterCache bitSetFilterCache, IndicesService indicesServices) { super(index, indexSettings); this.injector = injector; @@ -143,7 +141,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone this.aliasesService = aliasesService; this.indexCache = indexCache; this.indexFieldData = indexFieldData; - this.indexStore = indexStore; this.settingsService = settingsService; this.bitsetFilterCache = bitSetFilterCache; @@ -214,10 +211,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone return this.settingsService; } - public IndexStore store() { - return indexStore; - } - public IndexCache cache() { return indexCache; } diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index 91f8ac4f3bf..3fa975a31ed 100644 --- a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; @@ -51,8 +51,8 @@ public class IndexDynamicSettingsModule extends AbstractModule { public IndexDynamicSettingsModule() { indexDynamicSettings = new DynamicSettings(); - indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE); + indexDynamicSettings.addDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); + indexDynamicSettings.addDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE); diff --git a/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java similarity index 52% rename from src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java rename to src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index dab9346413e..f67bc340125 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java +++ b/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -17,13 +17,18 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; +import java.util.Set; +import com.google.common.collect.Sets; import org.apache.lucene.store.*; +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; @@ -31,19 +36,21 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.StoreException; /** */ -public abstract class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider { +public class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider { protected final IndexStore indexStore; private final CounterMetric rateLimitingTimeInNanos = new CounterMetric(); private final ShardPath path; - public FsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { - super(shardId, indexSettings); + @Inject + public FsDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { + super(path.getShardId(), indexSettings); this.path = path; this.indexStore = indexStore; } @@ -79,10 +86,45 @@ public abstract class FsDirectoryService extends DirectoryService implements Sto return new RateLimitedFSDirectory(wrapped, this, this) ; } - protected abstract Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException; @Override public void onPause(long nanos) { rateLimitingTimeInNanos.inc(nanos); } + + /* + * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS + * this provides good random access performance while not creating unnecessary mmaps for files like stored + * fields etc. + */ + private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); + + + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + final String storeType = indexSettings.get(IndexStoreModule.STORE_TYPE, IndexStoreModule.Type.DEFAULT.name()); + if (IndexStoreModule.Type.FS.match(storeType) || IndexStoreModule.Type.DEFAULT.match(storeType)) { + final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults + if (open instanceof MMapDirectory && Constants.WINDOWS == false) { + return newDefaultDir(location, (MMapDirectory) open, lockFactory); + } + return open; + } else if (IndexStoreModule.Type.SIMPLEFS.match(storeType)) { + return new SimpleFSDirectory(location, lockFactory); + } else if (IndexStoreModule.Type.NIOFS.match(storeType)) { + return new NIOFSDirectory(location, lockFactory); + } else if (IndexStoreModule.Type.MMAPFS.match(storeType)) { + return new MMapDirectory(location, lockFactory); + } + throw new IllegalArgumentException("No directory found for type [" + storeType + "]"); + } + + private Directory newDefaultDir(Path location, final MMapDirectory mmapDir, LockFactory lockFactory) throws IOException { + return new FileSwitchDirectory(PRIMARY_EXTENSIONS, mmapDir, new NIOFSDirectory(location, lockFactory), true) { + @Override + public String[] listAll() throws IOException { + // Avoid doing listAll twice: + return mmapDir.listAll(); + } + }; + } } diff --git a/src/main/java/org/elasticsearch/index/store/IndexStore.java b/src/main/java/org/elasticsearch/index/store/IndexStore.java index 161b915e508..0cae510f87b 100644 --- a/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -20,28 +20,101 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.settings.IndexSettingsService; +import org.elasticsearch.indices.store.IndicesStore; import java.io.Closeable; -import java.io.IOException; -import java.nio.file.Path; /** - * Index store is an index level information of the {@link Store} each shard will use. + * */ -public interface IndexStore extends Closeable { +public class IndexStore extends AbstractIndexComponent implements Closeable { + + public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type"; + public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec"; + + private final IndexSettingsService settingsService; + + class ApplySettings implements IndexSettingsService.Listener { + @Override + public void onRefreshSettings(Settings settings) { + String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, IndexStore.this.rateLimitingType); + if (!rateLimitingType.equals(IndexStore.this.rateLimitingType)) { + logger.info("updating index.store.throttle.type from [{}] to [{}]", IndexStore.this.rateLimitingType, rateLimitingType); + if (rateLimitingType.equalsIgnoreCase("node")) { + IndexStore.this.rateLimitingType = rateLimitingType; + IndexStore.this.nodeRateLimiting = true; + } else { + StoreRateLimiting.Type.fromString(rateLimitingType); + IndexStore.this.rateLimitingType = rateLimitingType; + IndexStore.this.nodeRateLimiting = false; + IndexStore.this.rateLimiting.setType(rateLimitingType); + } + } + + ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStore.this.rateLimitingThrottle); + if (!rateLimitingThrottle.equals(IndexStore.this.rateLimitingThrottle)) { + logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", IndexStore.this.rateLimitingThrottle, rateLimitingThrottle, IndexStore.this.rateLimitingType); + IndexStore.this.rateLimitingThrottle = rateLimitingThrottle; + IndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle); + } + } + } + protected final IndicesStore indicesStore; + + private volatile String rateLimitingType; + private volatile ByteSizeValue rateLimitingThrottle; + private volatile boolean nodeRateLimiting; + + private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); + + private final ApplySettings applySettings = new ApplySettings(); + + @Inject + public IndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService settingsService, IndicesStore indicesStore) { + super(index, indexSettings); + this.indicesStore = indicesStore; + + this.rateLimitingType = indexSettings.get(INDEX_STORE_THROTTLE_TYPE, "none"); + if (rateLimitingType.equalsIgnoreCase("node")) { + nodeRateLimiting = true; + } else { + nodeRateLimiting = false; + rateLimiting.setType(rateLimitingType); + } + this.rateLimitingThrottle = indexSettings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0)); + rateLimiting.setMaxRate(rateLimitingThrottle); + + logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); + this.settingsService = settingsService; + this.settingsService.addListener(applySettings); + } + + @Override + public void close() throws ElasticsearchException { + settingsService.removeListener(applySettings); + } /** * Returns the rate limiting, either of the index is explicitly configured, or * the node level one (defaults to the node level one). */ - StoreRateLimiting rateLimiting(); + public StoreRateLimiting rateLimiting() { + return nodeRateLimiting ? indicesStore.rateLimiting() : this.rateLimiting; + } /** * The shard store class that should be used for each shard. */ - Class shardDirectory(); - + public Class shardDirectory() { + return FsDirectoryService.class; + } } diff --git a/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java index a4aa4d694a2..c40301d560b 100644 --- a/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java @@ -20,23 +20,16 @@ package org.elasticsearch.index.store; import com.google.common.collect.ImmutableList; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.util.Constants; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.Modules; -import org.elasticsearch.common.inject.SpawnModules; +import org.elasticsearch.common.inject.*; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.store.fs.DefaultFsIndexStoreModule; -import org.elasticsearch.index.store.fs.MmapFsIndexStoreModule; -import org.elasticsearch.index.store.fs.NioFsIndexStoreModule; -import org.elasticsearch.index.store.fs.SimpleFsIndexStoreModule; /** * */ public class IndexStoreModule extends AbstractModule implements SpawnModules { + public static final String STORE_TYPE = "index.store.type"; + private final Settings settings; public static enum Type { @@ -75,36 +68,23 @@ public class IndexStoreModule extends AbstractModule implements SpawnModules { @Override public Iterable spawnModules() { - Class indexStoreModule = NioFsIndexStoreModule.class; - if ((Constants.WINDOWS || Constants.SUN_OS || Constants.LINUX) - && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { - if (Constants.WINDOWS) { - indexStoreModule = MmapFsIndexStoreModule.class; - } else { - // on linux and friends we only mmap dedicated files - indexStoreModule = DefaultFsIndexStoreModule.class; + final String storeType = settings.get(STORE_TYPE, Type.DEFAULT.name()); + for (Type type : Type.values()) { + if (type.match(storeType)) { + return ImmutableList.of(new DefaultStoreModule()); } - } else if (Constants.WINDOWS) { - indexStoreModule = SimpleFsIndexStoreModule.class; - } - String storeType = settings.get("index.store.type"); - if (Type.FS.match(storeType)) { - // nothing to set here ... (we default to fs) - } else if (Type.SIMPLEFS.match(storeType)) { - indexStoreModule = SimpleFsIndexStoreModule.class; - } else if (Type.NIOFS.match(storeType)) { - indexStoreModule = NioFsIndexStoreModule.class; - } else if (Type.MMAPFS.match(storeType)) { - indexStoreModule = MmapFsIndexStoreModule.class; - } else if (Type.DEFAULT.match(storeType)) { - indexStoreModule = DefaultFsIndexStoreModule.class; - } else if (storeType != null) { - indexStoreModule = settings.getAsClass("index.store.type", indexStoreModule, "org.elasticsearch.index.store.", "IndexStoreModule"); } + final Class indexStoreModule = settings.getAsClass(STORE_TYPE, null, "org.elasticsearch.index.store.", "IndexStoreModule"); return ImmutableList.of(Modules.createModule(indexStoreModule, settings)); } @Override - protected void configure() { + protected void configure() {} + + private static class DefaultStoreModule extends AbstractModule { + @Override + protected void configure() { + bind(IndexStore.class).asEagerSingleton(); + } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java deleted file mode 100644 index ee1ed85f1e7..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import com.google.common.collect.Sets; -import org.apache.lucene.store.*; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.Set; - -/** - */ -public class DefaultFsDirectoryService extends FsDirectoryService { - /* - * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS - * this provides good random access performance while not creating unnecessary mmaps for files like stored - * fields etc. - */ - private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); - - @Inject - public DefaultFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final MMapDirectory mmapDir = new MMapDirectory(location, lockFactory); - return new FileSwitchDirectory(PRIMARY_EXTENSIONS, mmapDir, new NIOFSDirectory(location, lockFactory), true) { - @Override - public String[] listAll() throws IOException { - // Avoid doing listAll twice: - return mmapDir.listAll(); - } - }; - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java deleted file mode 100644 index ccd8369a58e..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class DefaultFsIndexStore extends AbstractIndexStore { - - @Inject - public DefaultFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return DefaultFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java deleted file mode 100644 index 895ba668a0e..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class DefaultFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(DefaultFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java deleted file mode 100644 index 9f9102eba4d..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.MMapDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class MmapFsDirectoryService extends FsDirectoryService { - - @Inject - public MmapFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new MMapDirectory(location, buildLockFactory()); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java deleted file mode 100644 index 444cd67b713..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class MmapFsIndexStore extends AbstractIndexStore { - - @Inject - public MmapFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return MmapFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java deleted file mode 100644 index 7f655908355..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class MmapFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(MmapFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java deleted file mode 100644 index b2d99fa3b82..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.NIOFSDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class NioFsDirectoryService extends FsDirectoryService { - - @Inject - public NioFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new NIOFSDirectory(location, lockFactory); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java deleted file mode 100644 index 7f5cece99b6..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class NioFsIndexStore extends AbstractIndexStore { - - @Inject - public NioFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return NioFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java deleted file mode 100644 index 9db1cbdec6b..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class NioFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(NioFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java deleted file mode 100644 index 051b278cd11..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class SimpleFsDirectoryService extends FsDirectoryService { - - @Inject - public SimpleFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new SimpleFSDirectory(location, lockFactory); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java deleted file mode 100644 index 1a9f40b9779..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class SimpleFsIndexStore extends AbstractIndexStore { - - @Inject - public SimpleFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return SimpleFsDirectoryService.class; - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java deleted file mode 100644 index c35997aaf0d..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class SimpleFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(SimpleFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java b/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java deleted file mode 100644 index f4e63c55ebf..00000000000 --- a/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.support; - -import org.apache.lucene.store.StoreRateLimiting; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.settings.IndexSettingsService; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * - */ -public abstract class AbstractIndexStore extends AbstractIndexComponent implements IndexStore { - - public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type"; - public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec"; - - class ApplySettings implements IndexSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, AbstractIndexStore.this.rateLimitingType); - if (!rateLimitingType.equals(AbstractIndexStore.this.rateLimitingType)) { - logger.info("updating index.store.throttle.type from [{}] to [{}]", AbstractIndexStore.this.rateLimitingType, rateLimitingType); - if (rateLimitingType.equalsIgnoreCase("node")) { - AbstractIndexStore.this.rateLimitingType = rateLimitingType; - AbstractIndexStore.this.nodeRateLimiting = true; - } else { - StoreRateLimiting.Type.fromString(rateLimitingType); - AbstractIndexStore.this.rateLimitingType = rateLimitingType; - AbstractIndexStore.this.nodeRateLimiting = false; - AbstractIndexStore.this.rateLimiting.setType(rateLimitingType); - } - } - - ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, AbstractIndexStore.this.rateLimitingThrottle); - if (!rateLimitingThrottle.equals(AbstractIndexStore.this.rateLimitingThrottle)) { - logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", AbstractIndexStore.this.rateLimitingThrottle, rateLimitingThrottle, AbstractIndexStore.this.rateLimitingType); - AbstractIndexStore.this.rateLimitingThrottle = rateLimitingThrottle; - AbstractIndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle); - } - } - } - private final NodeEnvironment nodeEnv; - - private final Path[] locations; - - protected final IndexService indexService; - - protected final IndicesStore indicesStore; - - private volatile String rateLimitingType; - private volatile ByteSizeValue rateLimitingThrottle; - private volatile boolean nodeRateLimiting; - - private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); - - private final ApplySettings applySettings = new ApplySettings(); - - protected AbstractIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings); - this.indexService = indexService; - this.indicesStore = indicesStore; - - this.rateLimitingType = indexSettings.get(INDEX_STORE_THROTTLE_TYPE, "none"); - if (rateLimitingType.equalsIgnoreCase("node")) { - nodeRateLimiting = true; - } else { - nodeRateLimiting = false; - rateLimiting.setType(rateLimitingType); - } - this.rateLimitingThrottle = indexSettings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0)); - rateLimiting.setMaxRate(rateLimitingThrottle); - - logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); - - indexService.settingsService().addListener(applySettings); - this.nodeEnv = nodeEnv; - if (nodeEnv.hasNodeFile()) { - this.locations = nodeEnv.indexPaths(index); - } else { - this.locations = null; - } - - } - - @Override - public void close() throws ElasticsearchException { - indexService.settingsService().removeListener(applySettings); - } - - @Override - public StoreRateLimiting rateLimiting() { - return nodeRateLimiting ? indicesStore.rateLimiting() : this.rateLimiting; - } -} diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 643f91172b2..4737271c203 100644 --- a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -89,8 +89,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe } } - private final NodeEnvironment nodeEnv; - private final NodeSettingsService nodeSettingsService; private final IndicesService indicesService; @@ -107,10 +105,9 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe private TimeValue deleteShardTimeout; @Inject - public IndicesStore(Settings settings, NodeEnvironment nodeEnv, NodeSettingsService nodeSettingsService, IndicesService indicesService, + public IndicesStore(Settings settings, NodeSettingsService nodeSettingsService, IndicesService indicesService, ClusterService clusterService, TransportService transportService) { super(settings); - this.nodeEnv = nodeEnv; this.nodeSettingsService = nodeSettingsService; this.indicesService = indicesService; this.clusterService = clusterService; @@ -133,7 +130,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe IndicesStore() { super(ImmutableSettings.EMPTY); - nodeEnv = null; nodeSettingsService = null; indicesService = null; this.clusterService = null; diff --git a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 1f0bf84a2f4..26223aac2ea 100644 --- a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.IndicesService; @@ -168,7 +169,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio if (metaData == null) { return new StoreFilesMetaData(false, shardId, ImmutableMap.of()); } - String storeType = metaData.settings().get("index.store.type", "fs"); + String storeType = metaData.settings().get(IndexStoreModule.STORE_TYPE, "fs"); if (!storeType.contains("fs")) { return new StoreFilesMetaData(false, shardId, ImmutableMap.of()); } diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java index dcf35d2dff6..8e347935241 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.node.Node; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -57,7 +58,6 @@ public class TransportClientTests extends ElasticsearchIntegrationTest { .put("path.home", createTempDir()) .put("node.name", "testNodeVersionIsUpdated") .put("http.enabled", false) - .put("index.store.type", "ram") .put("config.ignore_system_properties", true) // make sure we get what we set :) .build()).clusterName("foobar").build(); node.start(); diff --git a/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java new file mode 100644 index 00000000000..a063edf7a8f --- /dev/null +++ b/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.store; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.store.*; +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.nio.file.Path; + +/** + */ +public class IndexStoreTests extends ElasticsearchTestCase { + + public void testStoreDirectory() throws IOException { + final Path tempDir = createTempDir(); + final IndexStoreModule.Type[] values = IndexStoreModule.Type.values(); + final IndexStoreModule.Type type = RandomPicks.randomFrom(random(), values); + Settings settings = ImmutableSettings.settingsBuilder().put(IndexStoreModule.STORE_TYPE, type.name()).build(); + FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0))); + try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + switch (type) { + case NIOFS: + assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); + break; + case MMAPFS: + assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); + break; + case SIMPLEFS: + assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory); + break; + case FS: + case DEFAULT: + if (Constants.WINDOWS) { + if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); + } else { + assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory); + } + } else if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + assertTrue(type + " " + directory.toString(), directory instanceof FileSwitchDirectory); + } else { + assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); + } + break; + } + } + } + + public void testStoreDirectoryDefault() throws IOException { + final Path tempDir = createTempDir(); + Settings settings = ImmutableSettings.EMPTY; + FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0))); + try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + if (Constants.WINDOWS) { + assertTrue(directory.toString(), directory instanceof MMapDirectory || directory instanceof SimpleFSDirectory); + } else { + assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); + } + } + } + + +} diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 73d08959622..0d00ea5291e 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -23,7 +23,6 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -37,8 +36,8 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.support.AbstractIndexStore; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -137,7 +136,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { // No throttling at first, only 1 non-replicated shard, force lots of merging: assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "none") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") @@ -175,13 +174,13 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { .indices() .prepareUpdateSettings("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, "1mb")) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, "1mb")) .get(); // Make sure setting says it is in fact changed: GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); - assertThat(getSettingsResponse.getSetting("test", AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE), equalTo("merge")); + assertThat(getSettingsResponse.getSetting("test", IndexStore.INDEX_STORE_THROTTLE_TYPE), equalTo("merge")); // Also make sure we see throttling kicking in: boolean done = false; @@ -215,7 +214,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { .indices() .prepareUpdateSettings("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "none")) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none")) .get(); // Optimize does a waitForMerges, which we must do to make sure all in-flight (throttled) merges finish: diff --git a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java index df7ba00d390..4ba7f711429 100644 --- a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java +++ b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java @@ -44,7 +44,7 @@ import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.query.FilterBuilders; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -308,7 +308,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { public void nonThrottleStats() throws Exception { assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") @@ -341,7 +341,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { public void throttleStats() throws Exception { assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java index 6cbff427afb..ff8264fdc03 100644 --- a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java @@ -21,11 +21,9 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntOpenHashSet; import com.carrotsearch.hppc.IntSet; -import com.carrotsearch.randomizedtesting.LifecycleScope; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; @@ -52,7 +50,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.elect.ElectMasterService; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; @@ -699,7 +697,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests logger.info("--> update index settings to back to normal"); assertAcked(client().admin().indices().prepareUpdateSettings("test-*").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "node") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node") )); // Make sure that snapshot finished - doesn't matter if it failed or succeeded @@ -745,8 +743,8 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests } assertAcked(client().admin().indices().prepareUpdateSettings(name).setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "all") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, between(100, 50000)) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, between(100, 50000)) )); } diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index 947527376b4..be3c44e9cc6 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -52,11 +52,10 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.nio.channels.SeekableByteChannel; @@ -1486,8 +1485,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { // Update settings to make sure that relocation is slow so we can start snapshot before relocation is finished assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "all") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100) )); logger.info("--> start relocations"); @@ -1502,7 +1501,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { // Update settings to back to normal assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "node") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node") )); logger.info("--> wait for snapshot to complete"); diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index b685448881b..660d228a43e 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -83,6 +83,7 @@ import org.elasticsearch.index.cache.filter.none.NoneFilterCache; import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -383,7 +384,7 @@ public final class InternalTestCluster extends TestCluster { .put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms") .put(SETTING_CLUSTER_NODE_SEED, seed); if (ENABLE_MOCK_MODULES && usually(random)) { - builder.put("index.store.type", MockFSIndexStoreModule.class.getName()); // no RAM dir for now! + builder.put(IndexStoreModule.STORE_TYPE, MockFSIndexStoreModule.class.getName()); // no RAM dir for now! builder.put(IndexShardModule.ENGINE_FACTORY, MockEngineFactory.class); builder.put(PageCacheRecyclerModule.CACHE_IMPL, MockPageCacheRecyclerModule.class.getName()); builder.put(BigArraysModule.IMPL, MockBigArraysModule.class.getName()); diff --git a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java index 06b844b950a..90e06d6a5df 100644 --- a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java +++ b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java @@ -19,22 +19,21 @@ package org.elasticsearch.test.store; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MockDirectoryWrapper.Throttling; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.NRTCachingDirectory; -import org.apache.lucene.util.Constants; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.fs.*; +import org.elasticsearch.index.store.IndexStoreModule; import com.carrotsearch.randomizedtesting.SeedUtils; import java.io.IOException; @@ -98,24 +97,10 @@ public class MockDirectoryHelper { } public FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { - if ((Constants.WINDOWS || Constants.SUN_OS) && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { - return new MmapFsDirectoryService(shardId, indexSettings, indexStore, path); - } else if (Constants.WINDOWS) { - return new SimpleFsDirectoryService(shardId, indexSettings, indexStore, path); - } - switch (random.nextInt(4)) { - case 2: - return new DefaultFsDirectoryService(shardId, indexSettings, indexStore, path); - case 1: - return new MmapFsDirectoryService(shardId, indexSettings, indexStore, path); - case 0: - if (random.nextInt(10) == 0) { - // use simplefs less, it synchronizes all threads reads - return new SimpleFsDirectoryService(shardId, indexSettings, indexStore, path); - } - default: - return new NioFsDirectoryService(shardId, indexSettings, indexStore, path); - } + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder(); + builder.put(indexSettings); + builder.put(IndexStoreModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexStoreModule.Type.values())); + return new FsDirectoryService(builder.build(), indexStore, path); } public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 23825b3b3ae..c10c2863db1 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.*; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.fs.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchTestCase; @@ -62,8 +62,8 @@ public class MockFSDirectoryService extends FsDirectoryService { private final boolean checkIndexOnClose; @Inject - public MockFSDirectoryService(final ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) { - super(shardId, indexSettings, indexStore, path); + public MockFSDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) { + super(indexSettings, indexStore, path); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); helper = new MockDirectoryHelper(shardId, indexSettings, logger, random, seed); diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java index 27adcc09f46..ade21f3182b 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -25,16 +25,17 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.store.IndicesStore; -public class MockFSIndexStore extends AbstractIndexStore { +public class MockFSIndexStore extends IndexStore { @Inject - public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, - IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); + public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, + IndicesStore indicesStore) { + super(index, indexSettings, indexSettingsService, indicesStore); } @Override From cb615ffecfbc7849bcd83386d5f7cb7bd57ea4ba Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 13:18:35 +0200 Subject: [PATCH 106/236] [TEST] Don't use pretty print in tests Windowns has slight differences --- .../ElasticsearchExceptionTests.java | 77 +++---------------- .../rest/BytesRestResponseTests.java | 26 +------ 2 files changed, 11 insertions(+), 92 deletions(-) diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 26447471c2c..74ea5f6e460 100644 --- a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -111,26 +111,11 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ex.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - String expected = "{\n" + - " \"type\" : \"search_phase_execution_exception\",\n" + - " \"reason\" : \"all shards failed\",\n" + - " \"phase\" : \"search\",\n" + - " \"grouped\" : true,\n" + - " \"failed_shards\" : [ {\n" + - " \"shard\" : 1,\n" + - " \"index\" : \"foo\",\n" + - " \"node\" : \"node_1\",\n" + - " \"reason\" : {\n" + - " \"type\" : \"query_parsing_exception\",\n" + - " \"reason\" : \"foobar\",\n" + - " \"index\" : \"foo\"\n" + - " }\n" + - " } ]\n" + - "}"; + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]}"; assertEquals(expected, builder.string()); } { @@ -138,35 +123,11 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 1)); ShardSearchFailure failure2 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ex.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - String expected = "{\n" + - " \"type\" : \"search_phase_execution_exception\",\n" + - " \"reason\" : \"all shards failed\",\n" + - " \"phase\" : \"search\",\n" + - " \"grouped\" : true,\n" + - " \"failed_shards\" : [ {\n" + - " \"shard\" : 1,\n" + - " \"index\" : \"foo\",\n" + - " \"node\" : \"node_1\",\n" + - " \"reason\" : {\n" + - " \"type\" : \"query_parsing_exception\",\n" + - " \"reason\" : \"foobar\",\n" + - " \"index\" : \"foo\"\n" + - " }\n" + - " }, {\n" + - " \"shard\" : 1,\n" + - " \"index\" : \"foo1\",\n" + - " \"node\" : \"node_1\",\n" + - " \"reason\" : {\n" + - " \"type\" : \"query_parsing_exception\",\n" + - " \"reason\" : \"foobar\",\n" + - " \"index\" : \"foo1\"\n" + - " }\n" + - " } ]\n" + - "}"; + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}"; assertEquals(expected, builder.string()); } } @@ -187,27 +148,12 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { public void testToXContent() throws IOException { { ElasticsearchException ex = new ElasticsearchException("foo", new ElasticsearchException("bar", new ElasticsearchIllegalArgumentException("index is closed", new RuntimeException("foobar")))); - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ex.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - String expected = "{\n" + - " \"type\" : \"exception\",\n" + - " \"reason\" : \"foo\",\n" + - " \"caused_by\" : {\n" + - " \"type\" : \"exception\",\n" + - " \"reason\" : \"bar\",\n" + - " \"caused_by\" : {\n" + - " \"type\" : \"illegal_argument_exception\",\n" + - " \"reason\" : \"index is closed\",\n" + - " \"caused_by\" : {\n" + - " \"type\" : \"runtime_exception\",\n" + - " \"reason\" : \"foobar\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}"; + String expected = "{\"type\":\"exception\",\"reason\":\"foo\",\"caused_by\":{\"type\":\"exception\",\"reason\":\"bar\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"index is closed\",\"caused_by\":{\"type\":\"runtime_exception\",\"reason\":\"foobar\"}}}}"; assertEquals(expected, builder.string()); } @@ -217,26 +163,23 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { // just a wrapper which is omitted ex = new RemoteTransportException("foobar", ex); } - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); builder.endObject(); - String expected = "{\n" + - " \"type\" : \"file_not_found_exception\",\n" + - " \"reason\" : \"foo not found\"\n" + - "}"; + String expected = "{\"type\":\"file_not_found_exception\",\"reason\":\"foo not found\"}"; assertEquals(expected, builder.string()); } { // test equivalence ElasticsearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); builder.endObject(); - XContentBuilder otherBuilder = XContentFactory.jsonBuilder().prettyPrint(); + XContentBuilder otherBuilder = XContentFactory.jsonBuilder(); otherBuilder.startObject(); ex.toXContent(otherBuilder, ToXContent.EMPTY_PARAMS); diff --git a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 784015e9db0..8c0203e58de 100644 --- a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -133,37 +133,13 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { @Test public void testConvert() throws IOException { RestRequest request = new FakeRestRequest(); - request.params().put("pretty", "true"); RestChannel channel = new DetailedExceptionRestChannel(request); ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1}); BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex)); String text = response.content().toUtf8(); - String expected = "{\n" + - " \"error\" : {\n" + - " \"root_cause\" : [ {\n" + - " \"type\" : \"query_parsing_exception\",\n" + - " \"reason\" : \"foobar\",\n" + - " \"index\" : \"foo\"\n" + - " } ],\n" + - " \"type\" : \"search_phase_execution_exception\",\n" + - " \"reason\" : \"all shards failed\",\n" + - " \"phase\" : \"search\",\n" + - " \"grouped\" : true,\n" + - " \"failed_shards\" : [ {\n" + - " \"shard\" : 1,\n" + - " \"index\" : \"foo\",\n" + - " \"node\" : \"node_1\",\n" + - " \"reason\" : {\n" + - " \"type\" : \"query_parsing_exception\",\n" + - " \"reason\" : \"foobar\",\n" + - " \"index\" : \"foo\"\n" + - " }\n" + - " } ]\n" + - " },\n" + - " \"status\" : 400\n" + - "}"; + String expected = "{\"error\":{\"root_cause\":[{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]},\"status\":400}"; assertEquals(expected.trim(), text.trim()); } From 8dbb79c96af8b2d7efc10c8dc3552cf0e2042407 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Tue, 21 Apr 2015 10:20:34 +0200 Subject: [PATCH 107/236] refactor and cleanup transport request handling This refactoring and cleanup is that each request handler ends up implementing too many methods that can be provided when the request handler itself is registered, including a prototype like class that can be used to instantiate new request instances for streaming. closes #10730 --- .../health/TransportClusterHealthAction.java | 7 +- .../hotthreads/NodesHotThreadsRequest.java | 5 + .../TransportNodesHotThreadsAction.java | 18 +- .../node/info/TransportNodesInfoAction.java | 18 +- .../liveness/TransportLivenessAction.java | 14 +- .../TransportNodesShutdownAction.java | 21 +- .../node/stats/TransportNodesStatsAction.java | 18 +- .../TransportDeleteRepositoryAction.java | 7 +- .../get/TransportGetRepositoriesAction.java | 7 +- .../put/TransportPutRepositoryAction.java | 7 +- .../TransportVerifyRepositoryAction.java | 7 +- .../TransportClusterRerouteAction.java | 7 +- .../TransportClusterUpdateSettingsAction.java | 7 +- .../TransportClusterSearchShardsAction.java | 7 +- .../create/TransportCreateSnapshotAction.java | 7 +- .../delete/TransportDeleteSnapshotAction.java | 7 +- .../get/TransportGetSnapshotsAction.java | 7 +- .../TransportRestoreSnapshotAction.java | 7 +- .../status/TransportNodesSnapshotsStatus.java | 18 +- .../TransportSnapshotsStatusAction.java | 7 +- .../state/TransportClusterStateAction.java | 7 +- .../cluster/stats/ClusterStatsRequest.java | 3 + .../stats/TransportClusterStatsAction.java | 18 +- .../TransportPendingClusterTasksAction.java | 7 +- .../alias/TransportIndicesAliasesAction.java | 7 +- .../exists/TransportAliasesExistAction.java | 7 +- .../alias/get/TransportGetAliasesAction.java | 7 +- .../analyze/TransportAnalyzeAction.java | 56 +---- .../TransportClearIndicesCacheAction.java | 18 +- .../close/TransportCloseIndexAction.java | 7 +- .../create/TransportCreateIndexAction.java | 7 +- .../delete/TransportDeleteIndexAction.java | 7 +- .../exists/indices/IndicesExistsRequest.java | 5 + .../indices/TransportIndicesExistsAction.java | 7 +- .../types/TransportTypesExistsAction.java | 7 +- .../indices/flush/TransportFlushAction.java | 18 +- .../indices/get/TransportGetIndexAction.java | 7 +- .../get/TransportGetFieldMappingsAction.java | 8 +- .../TransportGetFieldMappingsIndexAction.java | 13 +- .../get/TransportGetMappingsAction.java | 7 +- .../put/TransportPutMappingAction.java | 7 +- .../open/TransportOpenIndexAction.java | 7 +- .../optimize/TransportOptimizeAction.java | 18 +- .../recovery/TransportRecoveryAction.java | 25 +- .../refresh/TransportRefreshAction.java | 18 +- .../TransportIndicesSegmentsAction.java | 18 +- .../get/TransportGetSettingsAction.java | 7 +- .../put/TransportUpdateSettingsAction.java | 7 +- .../stats/TransportIndicesStatsAction.java | 18 +- .../TransportDeleteIndexTemplateAction.java | 7 +- .../get/TransportGetIndexTemplatesAction.java | 7 +- .../put/TransportPutIndexTemplateAction.java | 7 +- .../query/TransportValidateQueryAction.java | 18 +- .../delete/TransportDeleteWarmerAction.java | 7 +- .../warmer/get/TransportGetWarmersAction.java | 7 +- .../warmer/put/TransportPutWarmerAction.java | 7 +- .../action/bulk/TransportBulkAction.java | 11 +- .../action/bulk/TransportShardBulkAction.java | 27 +-- .../action/count/TransportCountAction.java | 21 +- .../action/delete/TransportDeleteAction.java | 25 +- .../TransportDeleteByQueryAction.java | 7 +- .../TransportShardDeleteByQueryAction.java | 26 +-- .../action/exists/TransportExistsAction.java | 21 +- .../explain/TransportExplainAction.java | 13 +- .../TransportFieldStatsTransportAction.java | 17 +- .../action/get/TransportGetAction.java | 13 +- .../action/get/TransportMultiGetAction.java | 7 +- .../get/TransportShardMultiGetAction.java | 13 +- .../action/index/TransportIndexAction.java | 29 +-- .../TransportDeleteIndexedScriptAction.java | 7 +- .../get/TransportGetIndexedScriptAction.java | 7 +- .../put/TransportPutIndexedScriptAction.java | 7 +- .../mlt/TransportMoreLikeThisAction.java | 11 +- .../TransportMultiPercolateAction.java | 7 +- .../percolate/TransportPercolateAction.java | 18 +- .../TransportShardMultiPercolateAction.java | 13 +- .../search/TransportClearScrollAction.java | 10 +- .../search/TransportMultiSearchAction.java | 8 +- .../action/search/TransportSearchAction.java | 7 +- .../search/TransportSearchScrollAction.java | 12 +- .../suggest/TransportSuggestAction.java | 19 +- .../support/HandledTransportAction.java | 32 +-- .../TransportBroadcastOperationAction.java | 26 +-- .../TransportMasterNodeOperationAction.java | 52 +---- ...ransportMasterNodeReadOperationAction.java | 4 +- .../info/TransportClusterInfoAction.java | 4 +- .../nodes/TransportNodesOperationAction.java | 31 +-- .../ShardReplicationOperationRequest.java | 8 +- ...portIndicesReplicationOperationAction.java | 52 +---- ...nsportShardReplicationOperationAction.java | 104 ++------- .../custom/SingleCustomOperationRequest.java | 7 + .../TransportSingleCustomOperationAction.java | 96 ++------ ...ransportInstanceSingleOperationAction.java | 51 +--- .../shard/SingleShardOperationRequest.java | 9 +- .../TransportShardSingleOperationAction.java | 102 +------- .../TransportMultiTermVectorsAction.java | 7 +- .../TransportShardMultiTermsVectorAction.java | 13 +- .../TransportTermVectorsAction.java | 14 +- .../dfs/TransportDfsOnlyAction.java | 18 +- .../action/update/TransportUpdateAction.java | 7 +- .../action/index/NodeIndexDeletedAction.java | 30 +-- .../index/NodeMappingRefreshAction.java | 14 +- .../action/shard/ShardStateAction.java | 30 +-- .../discovery/zen/ZenDiscovery.java | 15 +- .../zen/fd/MasterFaultDetection.java | 14 +- .../discovery/zen/fd/NodesFaultDetection.java | 15 +- .../zen/membership/MembershipAction.java | 44 +--- .../zen/ping/multicast/MulticastZenPing.java | 15 +- .../zen/ping/unicast/UnicastZenPing.java | 16 +- .../publish/PublishClusterStateAction.java | 18 +- .../gateway/LocalAllocateDangledIndices.java | 15 +- .../TransportNodesListGatewayMetaState.java | 18 +- ...ransportNodesListGatewayStartedShards.java | 18 +- .../indices/recovery/RecoverySource.java | 17 +- .../indices/recovery/RecoveryTarget.java | 85 +------ .../indices/store/IndicesStore.java | 14 +- .../TransportNodesListShardStoreMetaData.java | 18 +- .../VerifyNodeRepositoryAction.java | 24 +- .../PublishRiverClusterStateAction.java | 21 +- .../action/SearchServiceTransportAction.java | 211 +++-------------- .../snapshots/RestoreService.java | 17 +- .../snapshots/SnapshotsService.java | 17 +- .../BaseTransportRequestHandler.java | 34 --- .../transport/RequestHandlerRegistry.java | 74 ++++++ .../transport/TransportRequestHandler.java | 9 - .../transport/TransportService.java | 60 +++-- .../transport/TransportServiceAdapter.java | 2 +- .../transport/local/LocalTransport.java | 16 +- .../netty/MessageChannelHandler.java | 26 +-- .../action/IndicesRequestTests.java | 19 +- .../ShardReplicationOperationTests.java | 21 +- .../BenchmarkNettyLargeMessages.java | 12 +- .../transport/TransportBenchmark.java | 12 +- .../test/transport/MockTransportService.java | 4 +- .../AbstractSimpleTransportTests.java | 218 ++---------------- .../transport/ActionNamesTests.java | 10 +- .../netty/NettyScheduledPingTests.java | 14 +- .../transport/netty/NettyTransportTests.java | 31 ++- 138 files changed, 518 insertions(+), 2334 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java create mode 100644 src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 04bb0afae06..9d5812a3810 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -45,7 +45,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati @Inject public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters) { - super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterHealthRequest.class); this.clusterName = clusterName; } @@ -60,11 +60,6 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati return null; // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) } - @Override - protected ClusterHealthRequest newRequest() { - return new ClusterHealthRequest(); - } - @Override protected ClusterHealthResponse newResponse() { return new ClusterHealthResponse(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 8db432b11e3..fe092d7dc81 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -38,6 +38,11 @@ public class NodesHotThreadsRequest extends NodesOperationRequest nodesInfos = new ArrayList<>(); @@ -70,16 +66,6 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction { +public final class TransportLivenessAction implements TransportRequestHandler { private final ClusterService clusterService; private final ClusterName clusterName; @@ -37,21 +37,11 @@ public final class TransportLivenessAction extends BaseTransportRequestHandler { - - @Override - public NodeShutdownRequest newInstance() { - return new NodeShutdownRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + private class NodeShutdownRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeShutdownRequest request, TransportChannel channel) throws Exception { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 339b8c4717d..ee9814eab6b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -49,15 +49,11 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction nodeStats = Lists.newArrayList(); @@ -70,16 +66,6 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction { + ClusterStatsRequest() { + } + /** * Get stats from nodes based on the nodes ids specified. If none are passed, stats * based on all nodes will be returned. diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a123bccf6b8..fd689945a58 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -67,16 +67,12 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction nodeStats = new ArrayList<>(responses.length()); @@ -90,16 +86,6 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction { private final IndicesService indicesService; - private final IndicesAnalysisService indicesAnalysisService; private static final Settings DEFAULT_SETTINGS = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); @@ -68,20 +65,9 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction @Inject public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters) { - super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, AnalyzeRequest.class, ThreadPool.Names.INDEX); this.indicesService = indicesService; this.indicesAnalysisService = indicesAnalysisService; - transportService.registerHandler(AnalyzeAction.NAME, new TransportHandler()); - } - - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - - @Override - protected AnalyzeRequest newRequest() { - return new AnalyzeRequest(); } @Override @@ -260,44 +246,4 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction return new AnalyzeResponse(tokens); } - - private class TransportHandler extends BaseTransportRequestHandler { - - @Override - public AnalyzeRequest newInstance() { - return newRequest(); - } - - @Override - public void messageReceived(AnalyzeRequest request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); - // if we have a local operation, execute it on a thread since we don't spawn - request.operationThreaded(true); - execute(request, new ActionListener() { - @Override - public void onResponse(AnalyzeResponse result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response for get", e1); - } - } - }); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 2b79f98aca5..ef162e205ca 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -58,21 +58,12 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, IndicesQueryCache indicesQueryCache, ActionFilters actionFilters) { - super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, + ClearIndicesCacheRequest.class, ShardClearIndicesCacheRequest.class, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; this.indicesQueryCache = indicesQueryCache; } - @Override - protected String executor() { - return ThreadPool.Names.MANAGEMENT; - } - - @Override - protected ClearIndicesCacheRequest newRequestInstance() { - return new ClearIndicesCacheRequest(); - } - @Override protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { int successfulShards = 0; @@ -95,11 +86,6 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio return new ClearIndicesCacheResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); } - @Override - protected ShardClearIndicesCacheRequest newShardRequest() { - return new ShardClearIndicesCacheRequest(); - } - @Override protected ShardClearIndicesCacheRequest newShardRequest(int numShards, ShardRouting shard, ClearIndicesCacheRequest request) { return new ShardClearIndicesCacheRequest(shard.shardId(), request); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 4be9d842d26..c0afb720e89 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -47,7 +47,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, CloseIndexRequest.class); this.indexStateService = indexStateService; this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -58,11 +58,6 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio return ThreadPool.Names.SAME; } - @Override - protected CloseIndexRequest newRequest() { - return new CloseIndexRequest(); - } - @Override protected CloseIndexResponse newResponse() { return new CloseIndexResponse(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 9edd9b74664..14e8e1c9a24 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -45,7 +45,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi @Inject public TransportCreateIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataCreateIndexService createIndexService, ActionFilters actionFilters) { - super(settings, CreateIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, CreateIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, CreateIndexRequest.class); this.createIndexService = createIndexService; } @@ -55,11 +55,6 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi return ThreadPool.Names.SAME; } - @Override - protected CreateIndexRequest newRequest() { - return new CreateIndexRequest(); - } - @Override protected CreateIndexResponse newResponse() { return new CreateIndexResponse(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 72d27374614..67ffc22c400 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -48,7 +48,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteIndexRequest.class); this.deleteIndexService = deleteIndexService; this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -58,11 +58,6 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi return ThreadPool.Names.SAME; } - @Override - protected DeleteIndexRequest newRequest() { - return new DeleteIndexRequest(); - } - @Override protected DeleteIndexResponse newResponse() { return new DeleteIndexResponse(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index c5b533df1b7..e104090e962 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -37,6 +37,11 @@ public class IndicesExistsRequest extends MasterNodeReadOperationRequest { +public class TransportRecoveryAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; @Inject public TransportRecoveryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters) { - - super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters, + RecoveryRequest.class, ShardRecoveryRequest.class, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; } - @Override - protected String executor() { - return ThreadPool.Names.MANAGEMENT; - } - - @Override - protected RecoveryRequest newRequestInstance() { - return new RecoveryRequest(); - } - @Override protected RecoveryResponse newResponse(RecoveryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { @@ -120,14 +109,8 @@ public class TransportRecoveryAction extends } } - RecoveryResponse response = new RecoveryResponse(shardsResponses.length(), successfulShards, + return new RecoveryResponse(shardsResponses.length(), successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); - return response; - } - - @Override - protected ShardRecoveryRequest newShardRequest() { - return new ShardRecoveryRequest(); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 6e75bebefa4..73af682f4ff 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -53,20 +53,11 @@ public class TransportRefreshAction extends TransportBroadcastOperationAction { private final AutoCreateIndex autoCreateIndex; - private final boolean allowIdGeneration; - private final ClusterService clusterService; - private final TransportShardBulkAction shardBulkAction; - private final TransportCreateIndexAction createIndexAction; @Inject public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction, ActionFilters actionFilters) { - super(settings, BulkAction.NAME, threadPool, transportService, actionFilters); + super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest.class); this.clusterService = clusterService; this.shardBulkAction = shardBulkAction; this.createIndexAction = createIndexAction; @@ -88,11 +84,6 @@ public class TransportBulkAction extends HandledTransportAction listener) { final long startTime = System.currentTimeMillis(); diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 4a29cfae58a..6fac03ac5c9 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -56,6 +56,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.river.RiverIndexName; @@ -83,17 +84,13 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + BulkShardRequest.class, BulkShardRequest.class, ThreadPool.Names.BULK); this.mappingUpdatedAction = mappingUpdatedAction; this.updateHelper = updateHelper; this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); } - @Override - protected String executor() { - return ThreadPool.Names.BULK; - } - @Override protected boolean checkWriteConsistency() { return true; @@ -103,17 +100,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation protected TransportRequestOptions transportOptions() { return BulkAction.INSTANCE.transportOptions(settings); } - - @Override - protected BulkShardRequest newRequestInstance() { - return new BulkShardRequest(); - } - - @Override - protected BulkShardRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected BulkShardResponse newResponseInstance() { return new BulkShardResponse(); @@ -546,10 +532,9 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws Exception { - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - final BulkShardRequest request = shardRequest.request; + protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) throws Exception { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardId.id()); for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; if (item == null || item.isIgnoreOnReplica()) { diff --git a/src/main/java/org/elasticsearch/action/count/TransportCountAction.java b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java index adc78226688..0c2496cda1a 100644 --- a/src/main/java/org/elasticsearch/action/count/TransportCountAction.java +++ b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java @@ -65,18 +65,16 @@ import static org.elasticsearch.search.internal.SearchContext.DEFAULT_TERMINATE_ public class TransportCountAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; @Inject public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, CountAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, CountAction.NAME, threadPool, clusterService, transportService, actionFilters, + CountRequest.class, ShardCountRequest.class, ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; @@ -89,21 +87,6 @@ public class TransportCountAction extends TransportBroadcastOperationAction { private final AutoCreateIndex autoCreateIndex; - private final TransportCreateIndexAction createIndexAction; @Inject public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, ActionFilters actionFilters) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + DeleteRequest.class, DeleteRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.autoCreateIndex = new AutoCreateIndex(settings); } - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - @Override protected void doExecute(final DeleteRequest request, final ActionListener listener) { if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -125,16 +121,6 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct return true; } - @Override - protected DeleteRequest newRequestInstance() { - return new DeleteRequest(); - } - - @Override - protected DeleteRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected DeleteResponse newResponseInstance() { return new DeleteResponse(); @@ -165,9 +151,8 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - DeleteRequest request = shardRequest.request; - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA); indexShard.delete(delete); diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java index 0800a639a81..8566e32ea26 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java +++ b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java @@ -48,7 +48,7 @@ public class TransportDeleteByQueryAction extends TransportIndicesReplicationOpe public TransportDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService, ThreadPool threadPool, TransportIndexDeleteByQueryAction indexDeleteByQueryAction, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, DeleteByQueryAction.NAME, transportService, clusterService, threadPool, indexDeleteByQueryAction, actionFilters); + super(settings, DeleteByQueryAction.NAME, transportService, clusterService, threadPool, indexDeleteByQueryAction, actionFilters, DeleteByQueryRequest.class); this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -63,11 +63,6 @@ public class TransportDeleteByQueryAction extends TransportIndicesReplicationOpe return clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); } - @Override - protected DeleteByQueryRequest newRequestInstance() { - return new DeleteByQueryRequest(); - } - @Override protected DeleteByQueryResponse newResponseInstance(DeleteByQueryRequest request, AtomicReferenceArray indexResponses) { DeleteByQueryResponse response = new DeleteByQueryResponse(); diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java index 6e364302e83..99add9e6504 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java +++ b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.DefaultSearchContext; @@ -62,7 +63,8 @@ public class TransportShardDeleteByQueryAction extends TransportShardReplication ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + ShardDeleteByQueryRequest.class, ShardDeleteByQueryRequest.class, ThreadPool.Names.INDEX); this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays; @@ -73,21 +75,6 @@ public class TransportShardDeleteByQueryAction extends TransportShardReplication return true; } - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - - @Override - protected ShardDeleteByQueryRequest newRequestInstance() { - return new ShardDeleteByQueryRequest(); - } - - @Override - protected ShardDeleteByQueryRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected ShardDeleteByQueryResponse newResponseInstance() { return new ShardDeleteByQueryResponse(); @@ -121,10 +108,9 @@ public class TransportShardDeleteByQueryAction extends TransportShardReplication @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - ShardDeleteByQueryRequest request = shardRequest.request; - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); + protected void shardOperationOnReplica(ShardId shardId, ShardDeleteByQueryRequest request) { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardId.id()); SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchLocalRequest(request.types(), request.nowInMillis()), null, indexShard.acquireSearcher(DELETE_BY_QUERY_API, true), indexService, indexShard, scriptService, diff --git a/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java b/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java index 9c9dbdb8a8b..8d782012931 100644 --- a/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java +++ b/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java @@ -65,18 +65,16 @@ import static org.elasticsearch.action.exists.ExistsRequest.DEFAULT_MIN_SCORE; public class TransportExistsAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; @Inject public TransportExistsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ExistsAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ExistsAction.NAME, threadPool, clusterService, transportService, actionFilters, + ExistsRequest.class, ShardExistsRequest.class, ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; @@ -89,21 +87,6 @@ public class TransportExistsAction extends TransportBroadcastOperationAction listener) { ClusterState clusterState = clusterService.state(); diff --git a/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index dc93cd935b1..246ba1f93d8 100644 --- a/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -48,7 +48,8 @@ public class TransportShardMultiGetAction extends TransportShardSingleOperationA @Inject public TransportShardMultiGetAction(Settings settings, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + MultiGetShardRequest.class, ThreadPool.Names.GET); this.indicesService = indicesService; this.realtime = settings.getAsBoolean("action.get.realtime", true); @@ -59,16 +60,6 @@ public class TransportShardMultiGetAction extends TransportShardSingleOperationA return true; } - @Override - protected String executor() { - return ThreadPool.Names.GET; - } - - @Override - protected MultiGetShardRequest newRequest() { - return new MultiGetShardRequest(); - } - @Override protected MultiGetShardResponse newResponse() { return new MultiGetShardResponse(); diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 494f70708cb..5a8c96f352c 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.river.RiverIndexName; @@ -68,18 +69,16 @@ import java.io.IOException; public class TransportIndexAction extends TransportShardReplicationOperationAction { private final AutoCreateIndex autoCreateIndex; - private final boolean allowIdGeneration; - private final TransportCreateIndexAction createIndexAction; - private final MappingUpdatedAction mappingUpdatedAction; @Inject public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters) { - super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + IndexRequest.class, IndexRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.mappingUpdatedAction = mappingUpdatedAction; this.autoCreateIndex = new AutoCreateIndex(settings); @@ -145,26 +144,11 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi return true; } - @Override - protected IndexRequest newRequestInstance() { - return new IndexRequest(); - } - - @Override - protected IndexRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected IndexResponse newResponseInstance() { return new IndexResponse(); } - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - @Override protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { return clusterService.operationRouting() @@ -260,10 +244,9 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws IOException { - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - IndexRequest request = shardRequest.request; + protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); if (request.opType() == IndexRequest.OpType.INDEX) { diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java index f8c14d6d97a..fc205916e43 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java @@ -40,15 +40,10 @@ public class TransportDeleteIndexedScriptAction extends HandledTransportAction listener) { scriptService.deleteScriptFromIndex(request, new DelegatingActionListener(listener) { diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java index 90a24968d94..1777258af59 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java @@ -39,15 +39,10 @@ public class TransportGetIndexedScriptAction extends HandledTransportAction listener){ // forward the handling to the script service we are running on a network thread here... diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java index 3fcb60992f5..44c6e3679eb 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java @@ -40,15 +40,10 @@ public class TransportPutIndexedScriptAction extends HandledTransportAction listener) { scriptService.putScriptToIndex(request, new DelegatingActionListener(listener) { diff --git a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java index eb656e96c1a..d78745e3667 100644 --- a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java @@ -69,19 +69,15 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; public class TransportMoreLikeThisAction extends HandledTransportAction { private final TransportSearchAction searchAction; - private final TransportGetAction getAction; - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; @Inject public TransportMoreLikeThisAction(Settings settings, ThreadPool threadPool, TransportSearchAction searchAction, TransportGetAction getAction, ClusterService clusterService, IndicesService indicesService, TransportService transportService, ActionFilters actionFilters) { - super(settings, MoreLikeThisAction.NAME, threadPool, transportService, actionFilters); + super(settings, MoreLikeThisAction.NAME, threadPool, transportService, actionFilters, MoreLikeThisRequest.class); this.searchAction = searchAction; this.getAction = getAction; this.indicesService = indicesService; @@ -89,11 +85,6 @@ public class TransportMoreLikeThisAction extends HandledTransportAction listener) { // update to actual index name diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java index 3992221b0f9..50476176c47 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java @@ -60,18 +60,13 @@ public class TransportMultiPercolateAction extends HandledTransportAction listener) { final ClusterState clusterState = clusterService.state(); diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 7733ccf5069..46005f30dc8 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -62,7 +62,8 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, TransportGetAction getAction, ActionFilters actionFilters) { - super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters, + PercolateRequest.class, PercolateShardRequest.class, ThreadPool.Names.PERCOLATE); this.percolatorService = percolatorService; this.getAction = getAction; } @@ -95,16 +96,6 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< } } - @Override - protected String executor() { - return ThreadPool.Names.PERCOLATE; - } - - @Override - protected PercolateRequest newRequestInstance() { - return new PercolateRequest(); - } - @Override protected ClusterBlockException checkGlobalBlock(ClusterState state, PercolateRequest request) { return state.blocks().globalBlockedException(ClusterBlockLevel.READ); @@ -165,11 +156,6 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< } } - @Override - protected PercolateShardRequest newShardRequest() { - return new PercolateShardRequest(); - } - @Override protected PercolateShardRequest newShardRequest(int numShards, ShardRouting shard, PercolateRequest request) { return new PercolateShardRequest(shard.shardId(), numShards, request); diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 6795a4ed549..f221f758233 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -57,7 +57,8 @@ public class TransportShardMultiPercolateAction extends TransportShardSingleOper @Inject public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + Request.class, ThreadPool.Names.PERCOLATE); this.percolatorService = percolatorService; } @@ -66,16 +67,6 @@ public class TransportShardMultiPercolateAction extends TransportShardSingleOper return true; } - @Override - protected String executor() { - return ThreadPool.Names.PERCOLATE; - } - - @Override - protected Request newRequest() { - return new Request(); - } - @Override protected Response newResponse() { return new Response(); diff --git a/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index f81b476a4b5..6312bc2bca2 100644 --- a/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -33,8 +32,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -54,7 +51,7 @@ public class TransportClearScrollAction extends HandledTransportAction { private final ClusterService clusterService; - private final TransportSearchAction searchAction; @Inject public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportSearchAction searchAction, ActionFilters actionFilters) { - super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters); + super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest.class); this.clusterService = clusterService; this.searchAction = searchAction; } @@ -82,9 +81,4 @@ public class TransportMultiSearchAction extends HandledTransportAction { private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction; - private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction; - private final TransportSearchScrollScanAction scanAction; @Inject @@ -54,7 +49,7 @@ public class TransportSearchScrollAction extends HandledTransportAction { private final IndicesService indicesService; - private final SuggestPhase suggestPhase; @Inject public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, SuggestPhase suggestPhase, ActionFilters actionFilters) { - super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, + SuggestRequest.class, ShardSuggestRequest.class, ThreadPool.Names.SUGGEST); this.indicesService = indicesService; this.suggestPhase = suggestPhase; } - @Override - protected String executor() { - return ThreadPool.Names.SUGGEST; - } - - @Override - protected SuggestRequest newRequestInstance() { - return new SuggestRequest(); - } - - @Override - protected ShardSuggestRequest newShardRequest() { - return new ShardSuggestRequest(); - } - @Override protected ShardSuggestRequest newShardRequest(int numShards, ShardRouting shard, SuggestRequest request) { return new ShardSuggestRequest(shard.shardId(), request); diff --git a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index b69032f3cdf..f939893a98e 100644 --- a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; /** @@ -32,30 +32,12 @@ import org.elasticsearch.transport.TransportService; */ public abstract class HandledTransportAction extends TransportAction{ - /** - * Sub classes implement this call to get new instance of a Request object - * @return Request - */ - protected abstract Request newRequestInstance(); - - protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters){ + protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, Class request) { super(settings, actionName, threadPool, actionFilters); - transportService.registerHandler(actionName, new TransportHandler() { - @Override - public Request newInstance(){ - return newRequestInstance(); - } - }); + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); } - abstract class TransportHandler extends BaseTransportRequestHandler{ - - /** - * Call to get an instance of type Request - * @return Request - */ - @Override - public abstract Request newInstance(); + class TransportHandler implements TransportRequestHandler { @Override public final void messageReceived(final Request request, final TransportChannel channel) throws Exception { @@ -82,12 +64,6 @@ public abstract class HandledTransportAction request, Class shardRequest, String shardExecutor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; this.threadPool = threadPool; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler()); } @Override @@ -71,12 +69,8 @@ public abstract class TransportBroadcastOperationAction { - - @Override - public ShardRequest newInstance() { - return newShardRequest(); - } - - @Override - public String executor() { - return executor; - } + class ShardTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception { diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java index 6a459e50d49..15e90c0784d 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; @@ -42,28 +43,23 @@ import org.elasticsearch.transport.*; /** * A base class for operations that needs to be performed on the master node. */ -public abstract class TransportMasterNodeOperationAction extends TransportAction { +public abstract class TransportMasterNodeOperationAction extends HandledTransportAction { protected final TransportService transportService; - protected final ClusterService clusterService; final String executor; - protected TransportMasterNodeOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportMasterNodeOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.transportService = transportService; this.clusterService = clusterService; - this.executor = executor(); - - transportService.registerHandler(actionName, new TransportHandler()); } protected abstract String executor(); - protected abstract Request newRequest(); - protected abstract Response newResponse(); protected abstract void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception; @@ -225,42 +221,4 @@ public abstract class TransportMasterNodeOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // we just send back a response, no need to fork a listener - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response", e1); - } - } - }); - } - } } diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java index 7d7453f581d..383de7ceb53 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java @@ -36,8 +36,8 @@ public abstract class TransportMasterNodeReadOperationAction request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters,request); this.forceLocal = settings.getAsBoolean(FORCE_LOCAL_SETTING, null); } diff --git a/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index 00d3e566bc6..45a21a60ea3 100644 --- a/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -33,8 +33,8 @@ import org.elasticsearch.transport.TransportService; */ public abstract class TransportClusterInfoAction extends TransportMasterNodeReadOperationAction { - public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters); + public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Class request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, request); } @Override diff --git a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java index 358f7d0860f..65bdaeb7c26 100644 --- a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -48,19 +47,18 @@ public abstract class TransportNodesOperationAction request, Class nodeRequest, String nodeExecutor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterName = clusterName; this.clusterService = clusterService; this.transportService = transportService; this.transportNodeAction = actionName + "[n]"; - this.executor = executor(); - transportService.registerHandler(transportNodeAction, new NodeTransportHandler()); + transportService.registerRequestHandler(transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler()); } @Override @@ -72,12 +70,8 @@ public abstract class TransportNodesOperationAction { - - @Override - public NodeRequest newInstance() { - return newNodeRequest(); - } + class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeRequest request, final TransportChannel channel) throws Exception { channel.sendResponse(nodeOperation(request)); } - - @Override - public String toString() { - return transportNodeAction; - } - - @Override - public String executor() { - return executor; - } } } diff --git a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java index dc7461020be..0d9730c246a 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -40,8 +41,9 @@ public abstract class ShardReplicationOperationRequest - extends TransportAction { + extends HandledTransportAction { protected final ClusterService clusterService; protected final TransportIndexReplicationOperationAction indexAction; protected TransportIndicesReplicationOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - TransportIndexReplicationOperationAction indexAction, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + TransportIndexReplicationOperationAction indexAction, ActionFilters actionFilters, + Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.indexAction = indexAction; - - transportService.registerHandler(actionName, new TransportHandler()); } @@ -116,8 +114,6 @@ public abstract class TransportIndicesReplicationOperationAction routing, long startTimeInMillis); @@ -127,42 +123,4 @@ public abstract class TransportIndicesReplicationOperationAction { - - @Override - public Request newInstance() { - return newRequestInstance(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // no need for a threaded listener, since we just send a response - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send error response for action [" + actionName + "] and request [" + request + "]", e1); - } - } - }); - } - } } diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 529586927fd..bde8a67b098 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -78,7 +78,8 @@ public abstract class TransportShardReplicationOperationAction request, Class replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters); this.transportService = transportService; this.clusterService = clusterService; @@ -86,11 +87,12 @@ public abstract class TransportShardReplicationOperationAction shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; - protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest) throws Exception; + protected abstract void shardOperationOnReplica(ShardId shardId, ReplicaRequest shardRequest) throws Exception; protected abstract ShardIterator shards(ClusterState clusterState, InternalRequest request) throws ElasticsearchException; @@ -175,18 +171,7 @@ public abstract class TransportShardReplicationOperationAction { - - @Override - public Request newInstance() { - return newRequestInstance(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { // no need to have a threaded listener since we just send back a response @@ -215,30 +200,13 @@ public abstract class TransportShardReplicationOperationAction { - + class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override - public ReplicaOperationRequest newInstance() { - return new ReplicaOperationRequest(); - } - - @Override - public String executor() { - return executor; - } - - // we must never reject on because of thread pool capacity on replicas - @Override - public boolean isForceExecution() { - return true; - } - - @Override - public void messageReceived(final ReplicaOperationRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { try { - shardOperationOnReplica(request); + shardOperationOnReplica(request.internalShardId, request); } catch (Throwable t) { - failReplicaIfNeeded(request.shardId.getIndex(), request.shardId.id(), t); + failReplicaIfNeeded(request.internalShardId.getIndex(), request.internalShardId.id(), t); throw t; } channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -255,46 +223,6 @@ public abstract class TransportShardReplicationOperationAction extends ActionRequest implements IndicesRequest { + ShardId internalShardId; + private boolean threadedOperation = true; private boolean preferLocal = true; private String index; @@ -113,6 +116,9 @@ public abstract class SingleCustomOperationRequest extends TransportAction { +public abstract class TransportSingleCustomOperationAction extends HandledTransportAction { protected final ClusterService clusterService; - protected final TransportService transportService; final String transportShardAction; final String executor; - protected TransportSingleCustomOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportSingleCustomOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, + Class request, String executor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); + this.executor = executor; - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, request, executor, new ShardTransportHandler()); } @Override @@ -73,8 +66,6 @@ public abstract class TransportSingleCustomOperationAction() { + internalRequest.request().internalShardId = shard.shardId(); + transportService.sendRequest(node, transportShardAction, internalRequest.request(), new BaseTransportResponseHandler() { @Override public Response newInstance() { return newResponse(); @@ -290,73 +280,15 @@ public abstract class TransportSingleCustomOperationAction { + private class ShardTransportHandler implements TransportRequestHandler { @Override - public ShardSingleOperationRequest newInstance() { - return new ShardSingleOperationRequest(); - } - - @Override - public String executor() { - return executor; - } - - @Override - public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception { - Response response = shardOperation(request.request(), request.shardId()); + public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + Response response = shardOperation(request, request.internalShardId); channel.sendResponse(response); } } - protected class ShardSingleOperationRequest extends TransportRequest implements IndicesRequest { - - private Request request; - private ShardId shardId; - - ShardSingleOperationRequest() { - } - - public ShardSingleOperationRequest(Request request, ShardId shardId) { - super(request); - this.request = request; - this.shardId = shardId; - } - - public Request request() { - return request; - } - - @Override - public String[] indices() { - return request.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return request.indicesOptions(); - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = newRequest(); - request.readFrom(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - shardId.writeTo(out); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 3e2ec365c37..2c54b3ef8e8 100644 --- a/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -47,22 +48,18 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * */ -public abstract class TransportInstanceSingleOperationAction extends TransportAction { +public abstract class TransportInstanceSingleOperationAction extends HandledTransportAction { protected final ClusterService clusterService; - protected final TransportService transportService; final String executor; - protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; - this.executor = executor(); - - transportService.registerHandler(actionName, new TransportHandler()); } @Override @@ -74,8 +71,6 @@ public abstract class TransportInstanceSingleOperationAction listener) throws ElasticsearchException; - protected abstract Request newRequest(); - protected abstract Response newResponse(); protected ClusterBlockException checkGlobalBlock(ClusterState state) { @@ -279,44 +274,6 @@ public abstract class TransportInstanceSingleOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(Request request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response for get", e1); - } - } - }); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java index 8a0263fd0d5..74db0435709 100644 --- a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -34,10 +35,10 @@ import java.io.IOException; */ public abstract class SingleShardOperationRequest extends ActionRequest implements IndicesRequest { + ShardId internalShardId; + protected String index; - public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - private boolean threadedOperation = true; protected SingleShardOperationRequest() { @@ -107,6 +108,9 @@ public abstract class SingleShardOperationRequest request, String executor) { super(settings, actionName, threadPool, actionFilters); this.clusterService = clusterService; this.transportService = transportService; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); + this.executor = executor; if (!isSubAction()) { - transportService.registerHandler(actionName, new TransportHandler()); + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); } - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, request, executor, new ShardTransportHandler()); } /** @@ -89,12 +83,8 @@ public abstract class TransportShardSingleOperationAction() { + internalRequest.request().internalShardId = shardRouting.shardId(); + transportService.sendRequest(node, transportShardAction, internalRequest.request(), new BaseTransportResponseHandler() { @Override public Response newInstance() { @@ -237,17 +228,7 @@ public abstract class TransportShardSingleOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { @@ -277,77 +258,18 @@ public abstract class TransportShardSingleOperationAction { + private class ShardTransportHandler implements TransportRequestHandler { @Override - public ShardSingleOperationRequest newInstance() { - return new ShardSingleOperationRequest(); - } - - @Override - public String executor() { - return executor; - } - - @Override - public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final Request request, final TransportChannel channel) throws Exception { if (logger.isTraceEnabled()) { - logger.trace("executing [{}] on shard [{}]", request.request(), request.shardId()); + logger.trace("executing [{}] on shard [{}]", request, request.internalShardId); } - Response response = shardOperation(request.request(), request.shardId()); + Response response = shardOperation(request, request.internalShardId); channel.sendResponse(response); } } - class ShardSingleOperationRequest extends TransportRequest implements IndicesRequest { - - private Request request; - - private ShardId shardId; - - ShardSingleOperationRequest() { - } - - ShardSingleOperationRequest(Request request, ShardId shardId) { - super(request); - this.request = request; - this.shardId = shardId; - } - - public Request request() { - return request; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String[] indices() { - return request.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return request.indicesOptions(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = newRequest(); - request.readFrom(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - shardId.writeTo(out); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 96b3d8d04ed..91c924e6027 100644 --- a/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -46,7 +46,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction { - - @Override - public NodeIndexDeletedMessage newInstance() { - return new NodeIndexDeletedMessage(); - } + private class NodeIndexDeletedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception { @@ -127,19 +122,9 @@ public class NodeIndexDeletedAction extends AbstractComponent { } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - private class NodeIndexStoreDeletedTransportHandler extends BaseTransportRequestHandler { - - @Override - public NodeIndexStoreDeletedMessage newInstance() { - return new NodeIndexStoreDeletedMessage(); - } + private class NodeIndexStoreDeletedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeIndexStoreDeletedMessage message, TransportChannel channel) throws Exception { @@ -148,11 +133,6 @@ public class NodeIndexDeletedAction extends AbstractComponent { } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class NodeIndexDeletedMessage extends TransportRequest { diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index c14452a0fef..794e5775852 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -52,7 +52,7 @@ public class NodeMappingRefreshAction extends AbstractComponent { super(settings); this.transportService = transportService; this.metaDataMappingService = metaDataMappingService; - transportService.registerHandler(ACTION_NAME, new NodeMappingRefreshTransportHandler()); + transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest.class, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) throws ElasticsearchException { @@ -64,23 +64,13 @@ public class NodeMappingRefreshAction extends AbstractComponent { transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } - private class NodeMappingRefreshTransportHandler extends BaseTransportRequestHandler { - - @Override - public NodeMappingRefreshRequest newInstance() { - return new NodeMappingRefreshRequest(); - } + private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception { metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types()); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } public static class NodeMappingRefreshRequest extends TransportRequest implements IndicesRequest { diff --git a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index feb72fc7078..7630306ebe1 100644 --- a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -72,8 +72,8 @@ public class ShardStateAction extends AbstractComponent { this.allocationService = allocationService; this.routingService = routingService; - transportService.registerHandler(SHARD_STARTED_ACTION_NAME, new ShardStartedTransportHandler()); - transportService.registerHandler(SHARD_FAILED_ACTION_NAME, new ShardFailedTransportHandler()); + transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardStartedTransportHandler()); + transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardFailedTransportHandler()); } public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) throws ElasticsearchException { @@ -287,42 +287,22 @@ public class ShardStateAction extends AbstractComponent { }); } - private class ShardFailedTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardRoutingEntry newInstance() { - return new ShardRoutingEntry(); - } + private class ShardFailedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { handleShardFailureOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - class ShardStartedTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardRoutingEntry newInstance() { - return new ShardRoutingEntry(); - } + class ShardStartedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { shardStartedOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class ShardRoutingEntry extends TransportRequest { @@ -335,7 +315,7 @@ public class ShardStateAction extends AbstractComponent { volatile boolean processed; // state field, no need to serialize - private ShardRoutingEntry() { + ShardRoutingEntry() { } private ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String reason) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index b9279d7e832..92354785856 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -195,7 +195,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.joinThreadControl = new JoinThreadControl(threadPool); - transportService.registerHandler(DISCOVERY_REJOIN_ACTION_NAME, new RejoinClusterRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest.class, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); dynamicSettings.addDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, new Validator() { @Override @@ -1242,13 +1242,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - class RejoinClusterRequestHandler extends BaseTransportRequestHandler { - - @Override - public RejoinClusterRequest newInstance() { - return new RejoinClusterRequest(); - } - + class RejoinClusterRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() { @@ -1273,11 +1267,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } }); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } class ApplySettings implements NodeSettingsService.Listener { diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 867866b8b2b..d713b6d0b39 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -77,7 +77,7 @@ public class MasterFaultDetection extends FaultDetection { logger.debug("[master] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); - transportService.registerHandler(MASTER_PING_ACTION_NAME, new MasterPingRequestHandler()); + transportService.registerRequestHandler(MASTER_PING_ACTION_NAME, MasterPingRequest.class, ThreadPool.Names.SAME, new MasterPingRequestHandler()); } public DiscoveryNode masterNode() { @@ -317,12 +317,7 @@ public class MasterFaultDetection extends FaultDetection { } } - private class MasterPingRequestHandler extends BaseTransportRequestHandler { - - @Override - public MasterPingRequest newInstance() { - return new MasterPingRequest(); - } + private class MasterPingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final MasterPingRequest request, final TransportChannel channel) throws Exception { @@ -390,11 +385,6 @@ public class MasterFaultDetection extends FaultDetection { channel.sendResponse(new MasterPingResponseResponse()); } } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index 8cbb863dd80..f367db8ca8c 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -65,7 +65,7 @@ public class NodesFaultDetection extends FaultDetection { logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); - transportService.registerHandler(PING_ACTION_NAME, new PingRequestHandler()); + transportService.registerRequestHandler(PING_ACTION_NAME, PingRequest.class, ThreadPool.Names.SAME, new PingRequestHandler()); } public void setLocalNode(DiscoveryNode localNode) { @@ -239,13 +239,7 @@ public class NodesFaultDetection extends FaultDetection { } } - class PingRequestHandler extends BaseTransportRequestHandler { - - @Override - public PingRequest newInstance() { - return new PingRequest(); - } - + class PingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(PingRequest request, TransportChannel channel) throws Exception { // if we are not the node we are supposed to be pinged, send an exception @@ -264,11 +258,6 @@ public class NodesFaultDetection extends FaultDetection { channel.sendResponse(new PingResponse()); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 36fcde3ddcc..b245a6a0d35 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -72,9 +72,9 @@ public class MembershipAction extends AbstractComponent { this.listener = listener; this.clusterService = clusterService; - transportService.registerHandler(DISCOVERY_JOIN_ACTION_NAME, new JoinRequestRequestHandler()); - transportService.registerHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequestRequestHandler()); - transportService.registerHandler(DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest.class, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest.class, ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest.class, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); } public void close() { @@ -133,12 +133,7 @@ public class MembershipAction extends AbstractComponent { } - private class JoinRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public JoinRequest newInstance() { - return new JoinRequest(); - } + private class JoinRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final JoinRequest request, final TransportChannel channel) throws Exception { @@ -162,36 +157,21 @@ public class MembershipAction extends AbstractComponent { } }); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } - class ValidateJoinRequest extends TransportRequest { + static class ValidateJoinRequest extends TransportRequest { ValidateJoinRequest() { } } - private class ValidateJoinRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public ValidateJoinRequest newInstance() { - return new ValidateJoinRequest(); - } + class ValidateJoinRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { // for now, the mere fact that we can serialize the cluster state acts as validation.... channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } static class LeaveRequest extends TransportRequest { @@ -218,22 +198,12 @@ public class MembershipAction extends AbstractComponent { } } - private class LeaveRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public LeaveRequest newInstance() { - return new LeaveRequest(); - } + private class LeaveRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(LeaveRequest request, TransportChannel channel) throws Exception { listener.onLeave(request.node); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index de56bdc3730..e5f0f2ffad3 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -107,7 +107,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem logger.debug("using group [{}], with port [{}], ttl [{}], and address [{}]", group, port, ttl, address); - this.transportService.registerHandler(ACTION_NAME, new MulticastPingResponseRequestHandler()); + this.transportService.registerRequestHandler(ACTION_NAME, MulticastPingResponse.class, ThreadPool.Names.SAME, new MulticastPingResponseRequestHandler()); } @Override @@ -326,13 +326,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } } - class MulticastPingResponseRequestHandler extends BaseTransportRequestHandler { - - @Override - public MulticastPingResponse newInstance() { - return new MulticastPingResponse(); - } - + class MulticastPingResponseRequestHandler implements TransportRequestHandler { @Override public void messageReceived(MulticastPingResponse request, TransportChannel channel) throws Exception { if (logger.isTraceEnabled()) { @@ -346,11 +340,6 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class MulticastPingResponse extends TransportRequest { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 5fcba119281..de7a5e309f0 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -135,7 +135,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen } this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]); - transportService.registerHandler(ACTION_NAME, new UnicastPingRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest.class, ThreadPool.Names.SAME, new UnicastPingRequestHandler()); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastConnectExecutor = EsExecutors.newScaling(0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory); @@ -483,17 +483,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen return unicastPingResponse; } - class UnicastPingRequestHandler extends BaseTransportRequestHandler { - - @Override - public UnicastPingRequest newInstance() { - return new UnicastPingRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + class UnicastPingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception { @@ -504,9 +494,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen static class UnicastPingRequest extends TransportRequest { int id; - TimeValue timeout; - PingResponse pingResponse; UnicastPingRequest() { diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index e8352f389c5..fd1ba85c25c 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -52,9 +52,9 @@ public class PublishClusterStateAction extends AbstractComponent { public static final String ACTION_NAME = "internal:discovery/zen/publish"; - public static interface NewClusterStateListener { + public interface NewClusterStateListener { - static interface NewStateProcessed { + interface NewStateProcessed { void onNewClusterStateProcessed(); @@ -76,7 +76,7 @@ public class PublishClusterStateAction extends AbstractComponent { this.nodesProvider = nodesProvider; this.listener = listener; this.discoverySettings = discoverySettings; - transportService.registerHandler(ACTION_NAME, new PublishClusterStateRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, BytesTransportRequest.class, ThreadPool.Names.SAME, new PublishClusterStateRequestHandler()); } public void close() { @@ -171,12 +171,7 @@ public class PublishClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public BytesTransportRequest newInstance() { - return new BytesTransportRequest(); - } + private class PublishClusterStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { @@ -220,10 +215,5 @@ public class PublishClusterStateAction extends AbstractComponent { } } } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } } diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 622652d1c56..43dec7edb51 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -60,7 +60,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.allocationService = allocationService; - transportService.registerHandler(ACTION_NAME, new AllocateDangledRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, AllocateDangledRequest.class, ThreadPool.Names.SAME, new AllocateDangledRequestHandler()); } public void allocateDangled(Collection indices, final Listener listener) { @@ -100,13 +100,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { void onFailure(Throwable e); } - class AllocateDangledRequestHandler extends BaseTransportRequestHandler { - - @Override - public AllocateDangledRequest newInstance() { - return new AllocateDangledRequest(); - } - + class AllocateDangledRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel) throws Exception { String[] indexNames = new String[request.indices.length]; @@ -173,11 +167,6 @@ public class LocalAllocateDangledIndices extends AbstractComponent { } }); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class AllocateDangledRequest extends TransportRequest { diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 5f5c73873e7..77ab900ce90 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -53,7 +53,8 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA @Inject public TransportNodesListGatewayMetaState(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); } TransportNodesListGatewayMetaState init(GatewayMetaState metaState) { @@ -65,26 +66,11 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA return execute(new Request(nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - @Override protected boolean transportCompress() { return true; // compress since the metadata can become large } - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 87edb9cc3c1..4a812784e20 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -55,7 +55,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat @Inject public TransportNodesListGatewayStartedShards(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, NodeEnvironment env) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); this.nodeEnv = env; } @@ -63,26 +64,11 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat return execute(new Request(shardId, indexUUID, nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - @Override protected boolean transportCompress() { return true; // this can become big... } - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 6878ab863d9..ced38423e54 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -36,8 +36,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import java.util.*; @@ -82,7 +82,7 @@ public class RecoverySource extends AbstractComponent { this.recoverySettings = recoverySettings; - transportService.registerHandler(Actions.START_RECOVERY, new StartRecoveryTransportRequestHandler()); + transportService.registerRequestHandler(Actions.START_RECOVERY, StartRecoveryRequest.class, ThreadPool.Names.GENERIC, new StartRecoveryTransportRequestHandler()); } private RecoveryResponse recover(final StartRecoveryRequest request) { @@ -129,18 +129,7 @@ public class RecoverySource extends AbstractComponent { return handler.getResponse(); } - class StartRecoveryTransportRequestHandler extends BaseTransportRequestHandler { - - @Override - public StartRecoveryRequest newInstance() { - return new StartRecoveryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + class StartRecoveryTransportRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception { RecoveryResponse response = recover(request); diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 4382f6bdb29..fcfc9722a03 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -95,12 +95,12 @@ public class RecoveryTarget extends AbstractComponent { this.clusterService = clusterService; this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); - transportService.registerHandler(Actions.FILES_INFO, new FilesInfoRequestHandler()); - transportService.registerHandler(Actions.FILE_CHUNK, new FileChunkTransportRequestHandler()); - transportService.registerHandler(Actions.CLEAN_FILES, new CleanFilesRequestHandler()); - transportService.registerHandler(Actions.PREPARE_TRANSLOG, new PrepareForTranslogOperationsRequestHandler()); - transportService.registerHandler(Actions.TRANSLOG_OPS, new TranslogOperationsRequestHandler()); - transportService.registerHandler(Actions.FINALIZE, new FinalizeRecoveryRequestHandler()); + transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest.class, ThreadPool.Names.GENERIC, new FilesInfoRequestHandler()); + transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest.class, ThreadPool.Names.GENERIC, new FileChunkTransportRequestHandler()); + transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest.class, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest.class, ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest.class, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest.class, ThreadPool.Names.GENERIC, new FinalizeRecoveryRequestHandler()); indicesLifecycle.addListener(new IndicesLifecycle.Listener() { @Override @@ -267,17 +267,7 @@ public class RecoveryTarget extends AbstractComponent { void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); } - class PrepareForTranslogOperationsRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryPrepareForTranslogOperationsRequest newInstance() { - return new RecoveryPrepareForTranslogOperationsRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { @@ -290,17 +280,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class FinalizeRecoveryRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryFinalizeRecoveryRequest newInstance() { - return new RecoveryFinalizeRecoveryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class FinalizeRecoveryRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { @@ -312,18 +292,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class TranslogOperationsRequestHandler extends BaseTransportRequestHandler { - - - @Override - public RecoveryTranslogOperationsRequest newInstance() { - return new RecoveryTranslogOperationsRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class TranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception { @@ -339,17 +308,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class FilesInfoRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryFilesInfoRequest newInstance() { - return new RecoveryFilesInfoRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class FilesInfoRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { @@ -370,17 +329,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class CleanFilesRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryCleanFilesRequest newInstance() { - return new RecoveryCleanFilesRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class CleanFilesRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { @@ -419,21 +368,11 @@ public class RecoveryTarget extends AbstractComponent { } } - class FileChunkTransportRequestHandler extends BaseTransportRequestHandler { + class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); - @Override - public RecoveryFileChunkRequest newInstance() { - return new RecoveryFileChunkRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - @Override public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 4737271c203..32e695a828c 100644 --- a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -112,7 +112,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe this.indicesService = indicesService; this.clusterService = clusterService; this.transportService = transportService; - transportService.registerHandler(ACTION_SHARD_EXISTS, new ShardActiveRequestHandler()); + transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest.class, ThreadPool.Names.SAME, new ShardActiveRequestHandler()); // we don't limit by default (we default to CMS's auto throttle instead): this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name()); @@ -324,17 +324,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe } - private class ShardActiveRequestHandler extends BaseTransportRequestHandler { - - @Override - public ShardActiveRequest newInstance() { - return new ShardActiveRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + private class ShardActiveRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final ShardActiveRequest request, final TransportChannel channel) throws Exception { diff --git a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 26223aac2ea..ab1ba9232aa 100644 --- a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -71,7 +71,8 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio @Inject public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, NodeEnvironment nodeEnv, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); this.indicesService = indicesService; this.nodeEnv = nodeEnv; } @@ -80,21 +81,6 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); diff --git a/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 3150c8e6b29..11fb1cb3225 100644 --- a/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories; import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +56,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.repositoriesService = repositoriesService; - transportService.registerHandler(ACTION_NAME, new VerifyNodeRepositoryRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest.class, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler()); } public void close() { @@ -117,16 +115,15 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { blobStoreIndexShardRepository.verify(verificationToken); } - private class VerifyNodeRepositoryRequest extends TransportRequest { + static class VerifyNodeRepositoryRequest extends TransportRequest { private String repository; - private String verificationToken; - private VerifyNodeRepositoryRequest() { + VerifyNodeRepositoryRequest() { } - private VerifyNodeRepositoryRequest(String repository, String verificationToken) { + VerifyNodeRepositoryRequest(String repository, String verificationToken) { this.repository = repository; this.verificationToken = verificationToken; } @@ -146,18 +143,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { } } - private class VerifyNodeRepositoryRequestHandler extends BaseTransportRequestHandler { - - @Override - public VerifyNodeRepositoryRequest newInstance() { - return new VerifyNodeRepositoryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + class VerifyNodeRepositoryRequestHandler implements TransportRequestHandler { @Override public void messageReceived(VerifyNodeRepositoryRequest request, TransportChannel channel) throws Exception { doVerify(request.repository, request.verificationToken); diff --git a/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java index 0b39d50d4bf..d783adee752 100644 --- a/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java +++ b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java @@ -38,7 +38,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { public static final String ACTION_NAME = "internal:river/state/publish"; - public static interface NewClusterStateListener { + public interface NewClusterStateListener { void onNewClusterState(RiverClusterState clusterState); } @@ -54,7 +54,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.listener = listener; - transportService.registerHandler(ACTION_NAME, new PublishClusterStateRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, PublishClusterStateRequest.class, ThreadPool.Names.SAME, new PublishClusterStateRequestHandler()); } public void close() { @@ -87,11 +87,11 @@ public class PublishRiverClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequest extends TransportRequest { + static class PublishClusterStateRequest extends TransportRequest { private RiverClusterState clusterState; - private PublishClusterStateRequest() { + PublishClusterStateRequest() { } private PublishClusterStateRequest(RiverClusterState clusterState) { @@ -111,18 +111,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public PublishClusterStateRequest newInstance() { - return new PublishClusterStateRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + private class PublishClusterStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(PublishClusterStateRequest request, TransportChannel channel) throws Exception { listener.onNewClusterState(request.clusterState); diff --git a/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java index 13fe4bd15fb..5730a023554 100644 --- a/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java +++ b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java @@ -76,20 +76,20 @@ public class SearchServiceTransportAction extends AbstractComponent { this.transportService = transportService; this.searchService = searchService; - transportService.registerHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextTransportHandler()); - transportService.registerHandler(FREE_CONTEXT_ACTION_NAME, new SearchFreeContextTransportHandler()); - transportService.registerHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsTransportHandler()); - transportService.registerHandler(DFS_ACTION_NAME, new SearchDfsTransportHandler()); - transportService.registerHandler(QUERY_ACTION_NAME, new SearchQueryTransportHandler()); - transportService.registerHandler(QUERY_ID_ACTION_NAME, new SearchQueryByIdTransportHandler()); - transportService.registerHandler(QUERY_SCROLL_ACTION_NAME, new SearchQueryScrollTransportHandler()); - transportService.registerHandler(QUERY_FETCH_ACTION_NAME, new SearchQueryFetchTransportHandler()); - transportService.registerHandler(QUERY_QUERY_FETCH_ACTION_NAME, new SearchQueryQueryFetchTransportHandler()); - transportService.registerHandler(QUERY_FETCH_SCROLL_ACTION_NAME, new SearchQueryFetchScrollTransportHandler()); - transportService.registerHandler(FETCH_ID_SCROLL_ACTION_NAME, new ScrollFetchByIdTransportHandler()); - transportService.registerHandler(FETCH_ID_ACTION_NAME, new SearchFetchByIdTransportHandler()); - transportService.registerHandler(SCAN_ACTION_NAME, new SearchScanTransportHandler()); - transportService.registerHandler(SCAN_SCROLL_ACTION_NAME, new SearchScanScrollTransportHandler()); + transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest.class, ThreadPool.Names.SAME, new FreeContextTransportHandler<>()); + transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest.class, ThreadPool.Names.SAME, new FreeContextTransportHandler()); + transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest.class, ThreadPool.Names.SAME, new ClearScrollContextsTransportHandler()); + transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchDfsTransportHandler()); + transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchQueryTransportHandler()); + transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryByIdTransportHandler()); + transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryScrollTransportHandler()); + transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchQueryFetchTransportHandler()); + transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryQueryFetchTransportHandler()); + transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryFetchScrollTransportHandler()); + transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest.class, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler<>()); + transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest.class, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler()); + transportService.registerRequestHandler(SCAN_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchScanTransportHandler()); + transportService.registerRequestHandler(SCAN_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchScanScrollTransportHandler()); } public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) { @@ -327,36 +327,12 @@ public class SearchServiceTransportAction extends AbstractComponent { } } - private abstract class BaseFreeContextTransportHandler extends BaseTransportRequestHandler { - @Override - public abstract FreeContextRequest newInstance(); - + class FreeContextTransportHandler implements TransportRequestHandler { @Override public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception { boolean freed = searchService.freeContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); } - - @Override - public String executor() { - // freeing the context is cheap, - // no need for fork it to another thread - return ThreadPool.Names.SAME; - } - } - - class ScrollFreeContextTransportHandler extends BaseFreeContextTransportHandler { - @Override - public ScrollFreeContextRequest newInstance() { - return new ScrollFreeContextRequest(); - } - } - - class SearchFreeContextTransportHandler extends BaseFreeContextTransportHandler { - @Override - public SearchFreeContextRequest newInstance() { - return new SearchFreeContextRequest(); - } } static class ClearScrollContextsRequest extends TransportRequest { @@ -370,226 +346,91 @@ public class SearchServiceTransportAction extends AbstractComponent { } - class ClearScrollContextsTransportHandler extends BaseTransportRequestHandler { - - @Override - public ClearScrollContextsRequest newInstance() { - return new ClearScrollContextsRequest(); - } - + class ClearScrollContextsTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - // freeing the context is cheap, - // no need for fork it to another thread - return ThreadPool.Names.SAME; - } } - private class SearchDfsTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchDfsTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { DfsSearchResult result = searchService.executeDfsPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchQueryTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QuerySearchResultProvider result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryByIdTransportHandler extends BaseTransportRequestHandler { - - @Override - public QuerySearchRequest newInstance() { - return new QuerySearchRequest(); - } - + class SearchQueryByIdTransportHandler implements TransportRequestHandler { @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { QuerySearchResult result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchQueryScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQuerySearchResult result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryFetchTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchQueryFetchTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryQueryFetchTransportHandler extends BaseTransportRequestHandler { - - @Override - public QuerySearchRequest newInstance() { - return new QuerySearchRequest(); - } - + class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler { @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { QueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private abstract class FetchByIdTransportHandler extends BaseTransportRequestHandler { - - @Override - public abstract Request newInstance(); - + class FetchByIdTransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, TransportChannel channel) throws Exception { FetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class ScrollFetchByIdTransportHandler extends FetchByIdTransportHandler { - @Override - public ShardFetchRequest newInstance() { - return new ShardFetchRequest(); - } - } - - private class SearchFetchByIdTransportHandler extends FetchByIdTransportHandler { - @Override - public ShardFetchSearchRequest newInstance() { - return new ShardFetchSearchRequest(); - } - } - - private class SearchQueryFetchScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchScanTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchScanTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QuerySearchResult result = searchService.executeScan(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchScanScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchScanScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQueryFetchSearchResult result = searchService.executeScan(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } } diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/src/main/java/org/elasticsearch/snapshots/RestoreService.java index d0f3a35bcfa..6790991c298 100644 --- a/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -128,7 +128,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.allocationService = allocationService; this.createIndexService = createIndexService; this.dynamicSettings = dynamicSettings; - transportService.registerHandler(UPDATE_RESTORE_ACTION_NAME, new UpdateRestoreStateRequestHandler()); + transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest.class, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); } @@ -937,7 +937,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis /** * Internal class that is used to send notifications about finished shard restore operations to master node */ - private static class UpdateIndexShardRestoreStatusRequest extends TransportRequest { + static class UpdateIndexShardRestoreStatusRequest extends TransportRequest { private SnapshotId snapshotId; private ShardId shardId; private ShardRestoreStatus status; @@ -984,22 +984,11 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis /** * Internal class that is used to send notifications about finished shard restore operations to master node */ - private class UpdateRestoreStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public UpdateIndexShardRestoreStatusRequest newInstance() { - return new UpdateIndexShardRestoreStatusRequest(); - } - + class UpdateRestoreStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UpdateIndexShardRestoreStatusRequest request, final TransportChannel channel) throws Exception { updateRestoreStateOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } } diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index ab7ec1e1755..54693726d6d 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -118,7 +118,7 @@ public class SnapshotsService extends AbstractLifecycleComponent { - - @Override - public UpdateIndexShardSnapshotStatusRequest newInstance() { - return new UpdateIndexShardSnapshotStatusRequest(); - } - + class UpdateSnapshotStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UpdateIndexShardSnapshotStatusRequest request, final TransportChannel channel) throws Exception { innerUpdateSnapshotState(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - - } diff --git a/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java b/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java deleted file mode 100644 index 98c6f784d3b..00000000000 --- a/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -/** - * A simple based class that always spawns. - */ -public abstract class BaseTransportRequestHandler implements TransportRequestHandler { - - /** - * Default force execution to false. - */ - @Override - public boolean isForceExecution() { - return false; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java new file mode 100644 index 00000000000..422cb6d27b6 --- /dev/null +++ b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticsearchIllegalStateException; + +import java.lang.reflect.Constructor; + +/** + * + */ +public class RequestHandlerRegistry { + + private final String action; + private final Constructor requestConstructor; + private final TransportRequestHandler handler; + private final boolean forceExecution; + private final String executor; + + RequestHandlerRegistry(String action, Class request, TransportRequestHandler handler, + String executor, boolean forceExecution) { + this.action = action; + try { + this.requestConstructor = request.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw new ElasticsearchIllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); + } + this.requestConstructor.setAccessible(true); + this.handler = handler; + this.forceExecution = forceExecution; + this.executor = executor; + } + + public String getAction() { + return action; + } + + public Request newRequest() { + try { + return requestConstructor.newInstance(); + } catch (Exception e) { + throw new ElasticsearchIllegalStateException("failed to instantiate request ", e); + } + } + + public TransportRequestHandler getHandler() { + return handler; + } + + public boolean isForceExecution() { + return forceExecution; + } + + public String getExecutor() { + return executor; + } +} diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java index fd62f30418b..5b5e58de06d 100644 --- a/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java +++ b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java @@ -24,14 +24,5 @@ package org.elasticsearch.transport; */ public interface TransportRequestHandler { - T newInstance(); - void messageReceived(T request, TransportChannel channel) throws Exception; - - String executor(); - - /** - * See {@link org.elasticsearch.common.util.concurrent.AbstractRunnable#isForceExecution()}. - */ - boolean isForceExecution(); } diff --git a/src/main/java/org/elasticsearch/transport/TransportService.java b/src/main/java/org/elasticsearch/transport/TransportService.java index 975f669a592..ad17baa908d 100644 --- a/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/src/main/java/org/elasticsearch/transport/TransportService.java @@ -61,8 +61,8 @@ public class TransportService extends AbstractLifecycleComponent serverHandlers = ImmutableMap.of(); - final Object serverHandlersMutex = new Object(); + volatile ImmutableMap requestHandlers = ImmutableMap.of(); + final Object requestHandlerMutex = new Object(); final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); @@ -324,25 +324,25 @@ public class TransportService extends AbstractLifecycleComponent void registerRequestHandler(String action, Class request, String executor, TransportRequestHandler handler) { + registerRequestHandler(action, request, executor, false, handler); + } + + /** + * Registers a new request handler + * @param action The action the request handler is associated with + * @param request The request class that will be used to constrcut new instances for streaming + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, Class request, String executor, boolean forceExecution, TransportRequestHandler handler) { + synchronized (requestHandlerMutex) { + RequestHandlerRegistry reg = new RequestHandlerRegistry<>(action, request, handler, executor, forceExecution); + RequestHandlerRegistry replaced = requestHandlers.get(reg.getAction()); + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + if (replaced != null) { + logger.warn("registered two transport handlers for action {}, handlers: {}, {}", reg.getAction(), reg.getHandler(), replaced.getHandler()); } } } public void removeHandler(String action) { - synchronized (serverHandlersMutex) { - serverHandlers = MapBuilder.newMapBuilder(serverHandlers).remove(action).immutableMap(); + synchronized (requestHandlerMutex) { + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).remove(action).immutableMap(); } } - protected TransportRequestHandler getHandler(String action) { - return serverHandlers.get(action); + protected RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); } protected class Adapter implements TransportServiceAdapter { @@ -460,8 +480,8 @@ public class TransportService extends AbstractLifecycleComponent implem transportServiceAdapter.onRequestReceived(requestId, action); final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, requestId, version); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(sourceTransport.boundAddress.publishAddress()); request.readFrom(stream); - if (ThreadPool.Names.SAME.equals(handler.executor())) { + if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new AbstractRunnable() { + threadPool.executor(reg.getExecutor()).execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override diff --git a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 426030aef2a..0201829fe08 100644 --- a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -209,18 +209,18 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { transportServiceAdapter.onRequestReceived(requestId, action); final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, profileName); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException(action); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); request.readFrom(buffer); - if (ThreadPool.Names.SAME.equals(handler.executor())) { + if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new RequestHandler(handler, request, transportChannel, action)); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); } } catch (Throwable e) { try { @@ -260,27 +260,25 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { } class RequestHandler extends AbstractRunnable { - private final TransportRequestHandler handler; + private final RequestHandlerRegistry reg; private final TransportRequest request; private final NettyTransportChannel transportChannel; - private final String action; - public RequestHandler(TransportRequestHandler handler, TransportRequest request, NettyTransportChannel transportChannel, String action) { - this.handler = handler; + public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) { + this.reg = reg; this.request = request; this.transportChannel = transportChannel; - this.action = action; } @SuppressWarnings({"unchecked"}) @Override protected void doRun() throws Exception { - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override @@ -290,7 +288,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); logger.warn("Actual Exception", e); } } diff --git a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java index 4cf46150904..90118aab371 100644 --- a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java +++ b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java @@ -914,8 +914,8 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { } @Override - public void registerHandler(String action, TransportRequestHandler handler) { - super.registerHandler(action, new InterceptingRequestHandler(action, handler)); + public void registerRequestHandler(String action, Class request, String executor, boolean forceExecution, TransportRequestHandler handler) { + super.registerRequestHandler(action, request, executor, forceExecution, new InterceptingRequestHandler(action, handler)); } private class InterceptingRequestHandler implements TransportRequestHandler { @@ -928,11 +928,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { this.action = action; } - @Override - public TransportRequest newInstance() { - return requestHandler.newInstance(); - } - @Override public void messageReceived(TransportRequest request, TransportChannel channel) throws Exception { synchronized (InterceptingTransportService.this) { @@ -949,16 +944,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { } requestHandler.messageReceived(request, channel); } - - @Override - public String executor() { - return requestHandler.executor(); - } - - @Override - public boolean isForceExecution() { - return requestHandler.isForceExecution(); - } } } } diff --git a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java index efbfdf395f2..9503e053331 100644 --- a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java +++ b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java @@ -515,17 +515,7 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, null, threadPool, new ShardStateAction(settings, clusterService, transportService, null, null), - new ActionFilters(new HashSet())); - } - - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected Request newReplicaRequestInstance() { - return new Request(); + new ActionFilters(new HashSet()), Request.class, Request.class, ThreadPool.Names.SAME); } @Override @@ -533,11 +523,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { return new Response(); } - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - @Override protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { boolean executedBefore = shardRequest.request.processedOnPrimary.getAndSet(true); @@ -546,8 +531,8 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - shardRequest.request.processedOnReplicas.incrementAndGet(); + protected void shardOperationOnReplica(ShardId shardId, Request request) { + request.processedOnReplicas.incrementAndGet(); } @Override diff --git a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java index 2abd71a7662..5a127c44a60 100644 --- a/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java +++ b/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java @@ -73,17 +73,7 @@ public class BenchmarkNettyLargeMessages { transportServiceClient.connectToNode(bigNode); transportServiceClient.connectToNode(smallNode); - transportServiceServer.registerHandler("benchmark", new BaseTransportRequestHandler() { - @Override - public BenchmarkMessageRequest newInstance() { - return new BenchmarkMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + transportServiceServer.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new BenchmarkMessageResponse(request)); diff --git a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java index be3717b65d3..4fb24db7f91 100644 --- a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java @@ -80,17 +80,7 @@ public class TransportBenchmark { final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT); - serverTransportService.registerHandler("benchmark", new BaseTransportRequestHandler() { - @Override - public BenchmarkMessageRequest newInstance() { - return new BenchmarkMessageRequest(); - } - - @Override - public String executor() { - return executor; - } - + serverTransportService.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, executor, new TransportRequestHandler() { @Override public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new BenchmarkMessageResponse(request)); diff --git a/src/test/java/org/elasticsearch/test/transport/MockTransportService.java b/src/test/java/org/elasticsearch/test/transport/MockTransportService.java index cfb7284f749..d7cc1ed1ca1 100644 --- a/src/test/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/src/test/java/org/elasticsearch/test/transport/MockTransportService.java @@ -228,10 +228,10 @@ public class MockTransportService extends TransportService { } // poor mans request cloning... - TransportRequestHandler handler = MockTransportService.this.getHandler(action); + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest = handler.newInstance(); + final TransportRequest clonedRequest = reg.newRequest(); clonedRequest.readFrom(new BytesStreamInput(bStream.bytes())); threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { diff --git a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java index 38ef0285179..09bdd79c70a 100644 --- a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java +++ b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java @@ -125,17 +125,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testHelloWorld() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -221,17 +211,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase serviceA.disconnectFromNode(nodeA); } final AtomicReference exception = new AtomicReference<>(); - serviceA.registerHandler("localNode", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("localNode", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { try { @@ -273,17 +253,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVoidMessageCompressed() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public TransportRequest.Empty newInstance() { - return TransportRequest.Empty.INSTANCE; - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { try { @@ -330,17 +300,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testHelloWorldCompressed() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -389,17 +349,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testErrorMessage() { - serviceA.registerHandler("sayHelloException", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloException", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -463,20 +413,9 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); - serviceA.registerHandler("foobar", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("foobar", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { - try { latch2.await(); logger.info("Stop ServiceB now"); @@ -500,17 +439,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception { - serviceA.registerHandler("sayHelloTimeoutNoResponse", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloTimeoutNoResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -559,17 +488,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testTimeoutSendExceptionWithDelayedResponse() throws Exception { - serviceA.registerHandler("sayHelloTimeoutDelayedResponse", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { TimeValue sleep = TimeValue.parseTimeValue(request.message, null); @@ -658,29 +577,14 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test @TestLogging(value = "test. transport.tracer:TRACE") public void testTracerLog() throws InterruptedException { - TransportRequestHandler handler = new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(""); - } - + TransportRequestHandler handler = new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new StringMessageResponse("")); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }; - TransportRequestHandler handlerWithError = new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(""); - } - + TransportRequestHandler handlerWithError = new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { if (request.timeout() > 0) { @@ -689,11 +593,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase channel.sendResponse(new RuntimeException("")); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }; final Semaphore requestCompleted = new Semaphore(0); @@ -720,10 +619,10 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase } }; - serviceA.registerHandler("test", handler); - serviceA.registerHandler("testError", handlerWithError); - serviceB.registerHandler("test", handler); - serviceB.registerHandler("testError", handlerWithError); + serviceA.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler); + serviceA.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError); + serviceB.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler); + serviceB.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError); final Tracer tracer = new Tracer(); serviceA.addTracer(tracer); @@ -983,12 +882,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from0to1() throws Exception { - serviceB.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version1Request newInstance() { - return new Version1Request(); - } - + serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -998,11 +892,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value2 = 2; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version0Request version0Request = new Version0Request(); @@ -1035,12 +924,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from1to0() throws Exception { - serviceA.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version0Request newInstance() { - return new Version0Request(); - } - + serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1048,11 +932,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value1 = 1; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version1Request version1Request = new Version1Request(); @@ -1088,12 +967,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from1to1() throws Exception { - serviceB.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version1Request newInstance() { - return new Version1Request(); - } - + serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1103,11 +977,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value2 = 2; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version1Request version1Request = new Version1Request(); @@ -1143,12 +1012,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from0to0() throws Exception { - serviceA.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version0Request newInstance() { - return new Version0Request(); - } - + serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1156,11 +1020,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value1 = 1; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version0Request version0Request = new Version0Request(); @@ -1193,17 +1052,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testMockFailToSendNoConnectRule() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -1262,17 +1111,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testMockUnresponsiveRule() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -1335,28 +1174,13 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase final CountDownLatch latch = new CountDownLatch(2); final AtomicReference addressA = new AtomicReference<>(); final AtomicReference addressB = new AtomicReference<>(); - serviceB.registerHandler("action1", new TransportRequestHandler() { - @Override - public TestRequest newInstance() { - return new TestRequest(); - } - + serviceB.registerRequestHandler("action1", TestRequest.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(TestRequest request, TransportChannel channel) throws Exception { addressA.set(request.remoteAddress()); channel.sendResponse(new TestResponse()); latch.countDown(); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public boolean isForceExecution() { - return false; - } }); serviceA.sendRequest(nodeB, "action1", new TestRequest(), new TransportResponseHandler() { @Override diff --git a/src/test/java/org/elasticsearch/transport/ActionNamesTests.java b/src/test/java/org/elasticsearch/transport/ActionNamesTests.java index f1e98801d41..69be9f8fdf2 100644 --- a/src/test/java/org/elasticsearch/transport/ActionNamesTests.java +++ b/src/test/java/org/elasticsearch/transport/ActionNamesTests.java @@ -19,14 +19,6 @@ package org.elasticsearch.transport; -import com.google.common.collect.Lists; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.get.GetIndexAction; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; -import org.elasticsearch.action.exists.ExistsAction; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; -import org.elasticsearch.search.action.SearchServiceTransportAction; -import org.elasticsearch.repositories.VerifyNodeRepositoryAction; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -55,7 +47,7 @@ public class ActionNamesTests extends ElasticsearchIntegrationTest { @SuppressWarnings("unchecked") public void testActionNamesCategories() throws NoSuchFieldException, IllegalAccessException { TransportService transportService = internalCluster().getInstance(TransportService.class); - for (String action : transportService.serverHandlers.keySet()) { + for (String action : transportService.requestHandlers.keySet()) { assertThat("action doesn't belong to known category", action, either(startsWith("indices:admin")).or(startsWith("indices:monitor")) .or(startsWith("indices:data/read")).or(startsWith("indices:data/write")) .or(startsWith("cluster:admin")).or(startsWith("cluster:monitor")) diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index 9e323f809ee..8fe32cfc27e 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -30,8 +30,6 @@ import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; -import org.hamcrest.Matchers; -import org.junit.Before; import org.junit.Test; import java.io.IOException; @@ -75,17 +73,7 @@ public class NettyScheduledPingTests extends ElasticsearchTestCase { assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0l)); assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0l)); - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public TransportRequest.Empty newInstance() { - return TransportRequest.Empty.INSTANCE; - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { try { diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java index 785aaf40abc..5edc6b0dfa7 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java @@ -34,10 +34,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.*; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; @@ -115,21 +112,21 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, name); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException(action); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); request.readFrom(buffer); if (request.hasHeader("ERROR")) { throw new ElasticsearchException((String) request.getHeader("ERROR")); } - if (handler.executor() == ThreadPool.Names.SAME) { + if (reg.getExecutor() == ThreadPool.Names.SAME) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new RequestHandler(handler, request, transportChannel, action)); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); } } catch (Throwable e) { try { @@ -144,27 +141,25 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { } class RequestHandler extends AbstractRunnable { - private final TransportRequestHandler handler; + private final RequestHandlerRegistry reg; private final TransportRequest request; private final NettyTransportChannel transportChannel; - private final String action; - public RequestHandler(TransportRequestHandler handler, TransportRequest request, NettyTransportChannel transportChannel, String action) { - this.handler = handler; + public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) { + this.reg = reg; this.request = request; this.transportChannel = transportChannel; - this.action = action; } @SuppressWarnings({"unchecked"}) @Override protected void doRun() throws Exception { - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override @@ -174,7 +169,7 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); logger.warn("Actual Exception", e); } } } From 209b8cf952226e8b5b099a0978c0f9b3c69e63f1 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Fri, 24 Apr 2015 15:04:25 +0200 Subject: [PATCH 108/236] TEST: assert we can create an instance of request --- .../java/org/elasticsearch/transport/RequestHandlerRegistry.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 422cb6d27b6..24cea195224 100644 --- a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -43,6 +43,7 @@ public class RequestHandlerRegistry { throw new ElasticsearchIllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); } this.requestConstructor.setAccessible(true); + assert newRequest() != null; this.handler = handler; this.forceExecution = forceExecution; this.executor = executor; From fca05edbd4fdf19884bd087007163a74922012dd Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 24 Apr 2015 09:51:01 -0400 Subject: [PATCH 109/236] add constant only used once to make it harder to read the code --- src/main/java/org/elasticsearch/bootstrap/Bootstrap.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 6d1652f2525..56643105dd5 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -92,8 +92,14 @@ public class Bootstrap { } } + /** + * option for elasticsearch.yml etc to turn off our security manager completely, + * for example if you want to have your own configuration or just disable. + */ + static final String SECURITY_SETTING = "security.manager.enabled"; + private void setupSecurity(Settings settings, Environment environment) throws Exception { - if (settings.getAsBoolean("security.manager.enabled", true)) { + if (settings.getAsBoolean(SECURITY_SETTING, true)) { Security.configure(environment); } } From a3d03fdeb8333590b304a9b7d72d1e137e082511 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Fri, 24 Apr 2015 10:09:30 -0400 Subject: [PATCH 110/236] style changes to Bootstrap --- .../elasticsearch/bootstrap/Bootstrap.java | 33 +++++++++++-------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 56643105dd5..19bc81b9972 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -58,13 +58,13 @@ public class Bootstrap { private static volatile CountDownLatch keepAliveLatch; private static Bootstrap bootstrap; - private void setup(boolean addShutdownHook, Tuple tuple) throws Exception { - setupSecurity(tuple.v1(), tuple.v2()); - if (tuple.v1().getAsBoolean("bootstrap.mlockall", false)) { + private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { + setupSecurity(settings, environment); + if (settings.getAsBoolean("bootstrap.mlockall", false)) { Natives.tryMlockall(); } - NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(tuple.v1()).loadConfigSettings(false); + NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(settings).loadConfigSettings(false); node = nodeBuilder.build(); if (addShutdownHook) { Runtime.getRuntime().addShutdownHook(new Thread() { @@ -75,7 +75,7 @@ public class Bootstrap { }); } - if (tuple.v1().getAsBoolean("bootstrap.ctrlhandler", true)) { + if (settings.getAsBoolean("bootstrap.ctrlhandler", true)) { Natives.addConsoleCtrlHandler(new ConsoleCtrlHandler() { @Override public boolean handle(int code) { @@ -105,10 +105,10 @@ public class Bootstrap { } @SuppressForbidden(reason = "Exception#printStackTrace()") - private static void setupLogging(Tuple tuple) { + private static void setupLogging(Settings settings, Environment environment) { try { - tuple.v1().getClassLoader().loadClass("org.apache.log4j.Logger"); - LogConfigurator.configure(tuple.v1()); + settings.getClassLoader().loadClass("org.apache.log4j.Logger"); + LogConfigurator.configure(settings); } catch (ClassNotFoundException e) { // no log4j } catch (NoClassDefFoundError e) { @@ -128,8 +128,10 @@ public class Bootstrap { */ public void init(String[] args) throws Exception { Tuple tuple = initialSettings(); - setupLogging(tuple); - setup(true, tuple); + Settings settings = tuple.v1(); + Environment environment = tuple.v2(); + setupLogging(settings, environment); + setup(true, settings, environment); } /** @@ -179,10 +181,13 @@ public class Bootstrap { foreground = false; } - Tuple tuple = null; + Settings settings = null; + Environment environment = null; try { - tuple = initialSettings(); - setupLogging(tuple); + Tuple tuple = initialSettings(); + settings = tuple.v1(); + environment = tuple.v2(); + setupLogging(settings, environment); } catch (Exception e) { String errorMessage = buildErrorMessage("Setup", e); sysError(errorMessage, true); @@ -210,7 +215,7 @@ public class Bootstrap { // fail if using broken version JVMCheck.check(); - bootstrap.setup(true, tuple); + bootstrap.setup(true, settings, environment); stage = "Startup"; bootstrap.start(); From 4d672b0369f311377f915784bdca2ed49b2b5374 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 23 Apr 2015 19:24:38 -0700 Subject: [PATCH 111/236] Mappings: Join MergeResults with MergeContext since they are almost the same MergeContext currently exists to store conflicts, and providing a mechanism to add dynamic fields. MergeResults store the same conflicts. This change merges the two classes together, as well as removes the MergeFlags construct. This is in preparation for simplifying the callback structures to dynamically add fields, which will require storing the mapping updates in the results, instead of having a sneaky callback to the DocumentMapper instance. It also just makes more sense that the "results" of a merge are conflicts that occurred, along with updates that may have occurred. For MergeFlags, any future needs for parameterizing the merge (which seems unlikely) can just be added directly to the MergeResults as simlulate is with this change. --- .../metadata/MetaDataMappingService.java | 7 +- .../index/mapper/DocumentFieldMappers.java | 2 +- .../index/mapper/DocumentMapper.java | 70 +++---------------- .../index/mapper/FieldMapperListener.java | 3 +- .../index/mapper/FieldMappersLookup.java | 2 +- .../elasticsearch/index/mapper/Mapper.java | 2 +- .../index/mapper/MapperService.java | 9 ++- .../index/mapper/MapperUtils.java | 7 +- .../elasticsearch/index/mapper/Mapping.java | 12 ++-- .../{MergeContext.java => MergeResult.java} | 19 +++-- .../mapper/core/AbstractFieldMapper.java | 51 +++++++------- .../index/mapper/core/BinaryFieldMapper.java | 8 +-- .../index/mapper/core/BooleanFieldMapper.java | 8 +-- .../index/mapper/core/ByteFieldMapper.java | 8 +-- .../mapper/core/CompletionFieldMapper.java | 16 ++--- .../index/mapper/core/DateFieldMapper.java | 8 +-- .../index/mapper/core/DoubleFieldMapper.java | 8 +-- .../index/mapper/core/FloatFieldMapper.java | 8 +-- .../index/mapper/core/IntegerFieldMapper.java | 8 +-- .../index/mapper/core/LongFieldMapper.java | 8 +-- .../index/mapper/core/NumberFieldMapper.java | 8 +-- .../index/mapper/core/ShortFieldMapper.java | 8 +-- .../index/mapper/core/StringFieldMapper.java | 8 +-- .../mapper/core/TokenCountFieldMapper.java | 8 +-- .../index/mapper/geo/GeoPointFieldMapper.java | 24 +++---- .../index/mapper/geo/GeoShapeFieldMapper.java | 16 ++--- .../index/mapper/internal/AllFieldMapper.java | 8 +-- .../internal/FieldNamesFieldMapper.java | 6 +- .../index/mapper/internal/IdFieldMapper.java | 4 +- .../mapper/internal/IndexFieldMapper.java | 6 +- .../mapper/internal/ParentFieldMapper.java | 8 +-- .../mapper/internal/RoutingFieldMapper.java | 4 +- .../mapper/internal/SizeFieldMapper.java | 6 +- .../mapper/internal/SourceFieldMapper.java | 4 +- .../index/mapper/internal/TTLFieldMapper.java | 10 +-- .../mapper/internal/TimestampFieldMapper.java | 18 ++--- .../mapper/internal/TypeFieldMapper.java | 4 +- .../index/mapper/internal/UidFieldMapper.java | 4 +- .../mapper/internal/VersionFieldMapper.java | 5 +- .../index/mapper/ip/IpFieldMapper.java | 8 +-- .../index/mapper/object/ObjectMapper.java | 24 +++---- .../index/mapper/object/RootObjectMapper.java | 4 +- .../index/mapper/FieldMappersLookupTests.java | 24 ++++--- .../mapper/copyto/CopyToMapperTests.java | 8 +-- .../core/TokenCountFieldMapperTests.java | 6 +- .../mapper/date/SimpleDateMappingTests.java | 5 +- .../mapper/externalvalues/ExternalMapper.java | 4 +- .../externalvalues/ExternalRootMapper.java | 4 +- .../mapper/geo/GeoPointFieldMapperTests.java | 10 +-- .../mapper/geo/GeoShapeFieldMapperTests.java | 10 +-- .../mapper/index/IndexTypeMapperTests.java | 4 +- .../internal/FieldNamesFieldMapperTests.java | 4 +- .../mapper/merge/TestMergeMapperTests.java | 24 +++---- .../merge/JavaMultiFieldMergeTests.java | 48 ++++++------- .../index/mapper/size/SizeMappingTests.java | 2 +- .../string/SimpleStringMappingTests.java | 13 ++-- .../timestamp/TimestampMappingTests.java | 22 +++--- .../index/mapper/ttl/TTLMappingTests.java | 27 ++++--- .../mapper/update/UpdateMappingTests.java | 9 +-- 59 files changed, 313 insertions(+), 370 deletions(-) rename src/main/java/org/elasticsearch/index/mapper/{MergeContext.java => MergeResult.java} (76%) diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 1749e6e271d..5bacae63d0c 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; @@ -48,8 +49,6 @@ import org.elasticsearch.percolator.PercolatorService; import java.util.*; import static com.google.common.collect.Maps.newHashMap; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; - /** * Service responsible for submitting mapping changes */ @@ -382,10 +381,10 @@ public class MetaDataMappingService extends AbstractComponent { newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), existingMapper == null); if (existingMapper != null) { // first, simulate - DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true); // if we have conflicts, and we are not supposed to ignore them, throw an exception if (!request.ignoreConflicts() && mergeResult.hasConflicts()) { - throw new MergeMappingException(mergeResult.conflicts()); + throw new MergeMappingException(mergeResult.buildConflicts()); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java index 9bfdce33a61..aebc4bcb5fc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -55,7 +55,7 @@ public final class DocumentFieldMappers implements Iterable> { this.searchQuoteAnalyzer = searchQuoteAnalyzer; } - public DocumentFieldMappers copyAndAllAll(Collection> newMappers) { + public DocumentFieldMappers copyAndAllAll(Collection> newMappers) { FieldMappersLookup fieldMappers = this.fieldMappers.copyAndAddAll(newMappers); FieldNameAnalyzer indexAnalyzer = this.indexAnalyzer.copyAndAddAll(Collections2.transform(newMappers, new Function, Map.Entry>() { @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 380ce645a28..205633289aa 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -88,56 +88,6 @@ import java.util.concurrent.CopyOnWriteArrayList; */ public class DocumentMapper implements ToXContent { - /** - * A result of a merge. - */ - public static class MergeResult { - - private final String[] conflicts; - - public MergeResult(String[] conflicts) { - this.conflicts = conflicts; - } - - /** - * Does the merge have conflicts or not? - */ - public boolean hasConflicts() { - return conflicts.length > 0; - } - - /** - * The merge conflicts. - */ - public String[] conflicts() { - return this.conflicts; - } - } - - public static class MergeFlags { - - public static MergeFlags mergeFlags() { - return new MergeFlags(); - } - - private boolean simulate = true; - - public MergeFlags() { - } - - /** - * A simulation run, don't perform actual modifications to the mapping. - */ - public boolean simulate() { - return simulate; - } - - public MergeFlags simulate(boolean simulate) { - this.simulate = simulate; - return this; - } - } - /** * A listener to be called during the parse process. */ @@ -579,7 +529,7 @@ public class DocumentMapper implements ToXContent { return parser.contentType().xContent().createParser(builder.bytes()); } - public void addFieldMappers(List> fieldMappers) { + public void addFieldMappers(Collection> fieldMappers) { synchronized (mappersMutex) { this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers); } @@ -629,20 +579,20 @@ public class DocumentMapper implements ToXContent { mapping.root.traverse(listener); } - private MergeContext newMergeContext(MergeFlags mergeFlags) { - return new MergeContext(mergeFlags) { + private MergeResult newMergeContext(boolean simulate) { + return new MergeResult(simulate) { List conflicts = new ArrayList<>(); @Override - public void addFieldMappers(List> fieldMappers) { - assert mergeFlags().simulate() == false; + public void addFieldMappers(Collection> fieldMappers) { + assert simulate() == false; DocumentMapper.this.addFieldMappers(fieldMappers); } @Override public void addObjectMappers(Collection objectMappers) { - assert mergeFlags().simulate() == false; + assert simulate() == false; DocumentMapper.this.addObjectMappers(objectMappers); } @@ -664,10 +614,10 @@ public class DocumentMapper implements ToXContent { }; } - public synchronized MergeResult merge(Mapping mapping, MergeFlags mergeFlags) { - final MergeContext mergeContext = newMergeContext(mergeFlags); - final MergeResult mergeResult = this.mapping.merge(mapping, mergeContext); - if (mergeFlags.simulate() == false) { + public synchronized MergeResult merge(Mapping mapping, boolean simulate) { + final MergeResult mergeResult = newMergeContext(simulate); + this.mapping.merge(mapping, mergeResult); + if (simulate == false) { refreshSource(); } return mergeResult; diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java index 3b7da0eb8c6..3251ed5203f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import java.util.ArrayList; +import java.util.Collection; import java.util.List; /** @@ -38,7 +39,7 @@ public abstract class FieldMapperListener { public abstract void fieldMapper(FieldMapper fieldMapper); - public void fieldMappers(List> fieldMappers) { + public void fieldMappers(Collection> fieldMappers) { for (FieldMapper mapper : fieldMappers) { fieldMapper(mapper); } diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java index 2e72f0c6fff..60c1d143ead 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java @@ -49,7 +49,7 @@ class FieldMappersLookup implements Iterable> { /** * Return a new instance that contains the union of this instance and the provided mappers. */ - public FieldMappersLookup copyAndAddAll(Collection> newMappers) { + public FieldMappersLookup copyAndAddAll(Collection> newMappers) { CopyOnWriteHashMap map = this.mappers; for (FieldMapper mapper : newMappers) { diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/src/main/java/org/elasticsearch/index/mapper/Mapper.java index ae2f6acbf2d..e1aaffb4d39 100644 --- a/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -132,7 +132,7 @@ public interface Mapper extends ToXContent { */ Mapper parse(ParseContext context) throws IOException; - void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException; + void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException; void traverse(FieldMapperListener fieldMapperListener); diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 5e11580cea8..236083f1e77 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -79,7 +79,6 @@ import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; /** * @@ -336,11 +335,11 @@ public class MapperService extends AbstractIndexComponent { DocumentMapper oldMapper = mappers.get(mapper.type()); if (oldMapper != null) { - DocumentMapper.MergeResult result = oldMapper.merge(mapper.mapping(), mergeFlags().simulate(false)); + MergeResult result = oldMapper.merge(mapper.mapping(), false); if (result.hasConflicts()) { // TODO: What should we do??? if (logger.isDebugEnabled()) { - logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.conflicts())); + logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.buildConflicts())); } } fieldDataService.onMappingUpdate(); @@ -385,7 +384,7 @@ public class MapperService extends AbstractIndexComponent { } } - private void addFieldMappers(List> fieldMappers) { + private void addFieldMappers(Collection> fieldMappers) { synchronized (mappersMutex) { this.fieldMappers = this.fieldMappers.copyAndAddAll(fieldMappers); } @@ -933,7 +932,7 @@ public class MapperService extends AbstractIndexComponent { } @Override - public void fieldMappers(List> fieldMappers) { + public void fieldMappers(Collection> fieldMappers) { addFieldMappers(fieldMappers); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index df59743b0cf..09c061c60a9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.mapper.object.ObjectMapper; import java.io.IOException; import java.util.Collection; -import java.util.List; public enum MapperUtils { ; @@ -42,8 +41,8 @@ public enum MapperUtils { return mapper; } - private static MergeContext newStrictMergeContext() { - return new MergeContext(new DocumentMapper.MergeFlags().simulate(false)) { + private static MergeResult newStrictMergeContext() { + return new MergeResult(false) { @Override public boolean hasConflicts() { @@ -61,7 +60,7 @@ public enum MapperUtils { } @Override - public void addFieldMappers(List> fieldMappers) { + public void addFieldMappers(Collection> fieldMappers) { // no-op } diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 62e89bfe209..b8bbf05616b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper.MergeResult; import org.elasticsearch.index.mapper.object.RootObjectMapper; import java.io.IOException; @@ -95,11 +94,11 @@ public final class Mapping implements ToXContent { return (T) rootMappersMap.get(clazz); } - /** @see DocumentMapper#merge(DocumentMapper, org.elasticsearch.index.mapper.DocumentMapper.MergeFlags) */ - public MergeResult merge(Mapping mergeWith, MergeContext mergeContext) { + /** @see DocumentMapper#merge(Mapping, boolean) */ + public void merge(Mapping mergeWith, MergeResult mergeResult) { assert rootMappers.length == mergeWith.rootMappers.length; - root.merge(mergeWith.root, mergeContext); + root.merge(mergeWith.root, mergeResult); for (RootMapper rootMapper : rootMappers) { // root mappers included in root object will get merge in the rootObjectMapper if (rootMapper.includeInObject()) { @@ -107,15 +106,14 @@ public final class Mapping implements ToXContent { } RootMapper mergeWithRootMapper = mergeWith.rootMapper(rootMapper.getClass()); if (mergeWithRootMapper != null) { - rootMapper.merge(mergeWithRootMapper, mergeContext); + rootMapper.merge(mergeWithRootMapper, mergeResult); } } - if (mergeContext.mergeFlags().simulate() == false) { + if (mergeResult.simulate() == false) { // let the merge with attributes to override the attributes meta = mergeWith.meta; } - return new MergeResult(mergeContext.buildConflicts()); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/MergeContext.java b/src/main/java/org/elasticsearch/index/mapper/MergeResult.java similarity index 76% rename from src/main/java/org/elasticsearch/index/mapper/MergeContext.java rename to src/main/java/org/elasticsearch/index/mapper/MergeResult.java index f8ddb837517..ab685f624ef 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MergeContext.java +++ b/src/main/java/org/elasticsearch/index/mapper/MergeResult.java @@ -21,26 +21,25 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import java.util.ArrayList; import java.util.Collection; import java.util.List; -/** - * - */ -public abstract class MergeContext { +/** A container for tracking results of a mapping merge. */ +public abstract class MergeResult { - private final DocumentMapper.MergeFlags mergeFlags; + private final boolean simulate; - public MergeContext(DocumentMapper.MergeFlags mergeFlags) { - this.mergeFlags = mergeFlags; + public MergeResult(boolean simulate) { + this.simulate = simulate; } - public abstract void addFieldMappers(List> fieldMappers); + public abstract void addFieldMappers(Collection> fieldMappers); public abstract void addObjectMappers(Collection objectMappers); - public DocumentMapper.MergeFlags mergeFlags() { - return mergeFlags; + public boolean simulate() { + return simulate; } public abstract void addConflict(String mergeFailure); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 7d8712ea3db..c18e7c06563 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -39,9 +39,6 @@ import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.index.Terms; -import org.apache.lucene.queries.TermFilter; -import org.apache.lucene.queries.TermsFilter; -import org.apache.lucene.search.*; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; @@ -582,13 +579,13 @@ public abstract class AbstractFieldMapper implements FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof AbstractFieldMapper) { mergedType = ((AbstractFieldMapper) mergeWith).contentType(); } - mergeContext.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); + mergeResult.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); // different types, return return; } @@ -596,62 +593,62 @@ public abstract class AbstractFieldMapper implements FieldMapper { boolean indexed = fieldType.indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = fieldMergeWith.fieldType().indexOptions() != IndexOptions.NONE; if (indexed != mergeWithIndexed || this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different index values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different index values"); } if (this.fieldType().stored() != fieldMergeWith.fieldType().stored()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store values"); } if (!this.hasDocValues() && fieldMergeWith.hasDocValues()) { // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set // when the doc_values field data format is configured - mergeContext.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values"); } if (this.fieldType().omitNorms() && !fieldMergeWith.fieldType.omitNorms()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] cannot enable norms (`norms.enabled`)"); + mergeResult.addConflict("mapper [" + names.fullName() + "] cannot enable norms (`norms.enabled`)"); } if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tokenize values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tokenize values"); } if (this.fieldType().storeTermVectors() != fieldMergeWith.fieldType().storeTermVectors()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values"); } if (this.fieldType().storeTermVectorOffsets() != fieldMergeWith.fieldType().storeTermVectorOffsets()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values"); } if (this.fieldType().storeTermVectorPositions() != fieldMergeWith.fieldType().storeTermVectorPositions()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values"); } if (this.fieldType().storeTermVectorPayloads() != fieldMergeWith.fieldType().storeTermVectorPayloads()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values"); } // null and "default"-named index analyzers both mean the default is used if (this.indexAnalyzer == null || "default".equals(this.indexAnalyzer.name())) { if (fieldMergeWith.indexAnalyzer != null && !"default".equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } } else if (fieldMergeWith.indexAnalyzer == null || "default".equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } else if (!this.indexAnalyzer.name().equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } if (!this.names().equals(fieldMergeWith.names())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_name"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different index_name"); } if (this.similarity == null) { if (fieldMergeWith.similarity() != null) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } } else if (fieldMergeWith.similarity() == null) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } else if (!this.similarity().equals(fieldMergeWith.similarity())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } - multiFields.merge(mergeWith, mergeContext); + multiFields.merge(mergeWith, mergeResult); - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // apply changeable values this.fieldType = new FieldType(this.fieldType); this.fieldType.setOmitNorms(fieldMergeWith.fieldType.omitNorms()); @@ -917,7 +914,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { } // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { AbstractFieldMapper mergeWithMultiField = (AbstractFieldMapper) mergeWith; List> newFieldMappers = null; @@ -928,7 +925,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name()); if (mergeIntoMapper == null) { // no mapping, simply add it if not simulating - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // we disable the all in multi-field mappers if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); @@ -945,13 +942,13 @@ public abstract class AbstractFieldMapper implements FieldMapper { } } } else { - mergeIntoMapper.merge(mergeWithMapper, mergeContext); + mergeIntoMapper.merge(mergeWithMapper, mergeResult); } } // first add all field mappers if (newFieldMappers != null) { - mergeContext.addFieldMappers(newFieldMappers); + mergeResult.addFieldMappers(newFieldMappers); } // now publish mappers if (newMappersBuilder != null) { diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 5154b2074e1..83dfb2c0782 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -45,7 +45,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; @@ -245,14 +245,14 @@ public class BinaryFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } BinaryFieldMapper sourceMergeWith = (BinaryFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 18344809168..d3c5eae809f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -237,13 +237,13 @@ public class BooleanFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((BooleanFieldMapper) mergeWith).nullValue; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 58a419a6e42..5c54934ad5b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -46,7 +46,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -328,12 +328,12 @@ public class ByteFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((ByteFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((ByteFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 7e037bd533c..baba0f42f87 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -44,7 +44,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -523,22 +523,22 @@ public class CompletionFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; if (payloads != fieldMergeWith.payloads) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different payload values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different payload values"); } if (preservePositionIncrements != fieldMergeWith.preservePositionIncrements) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values"); } if (preserveSeparators != fieldMergeWith.preserveSeparators) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values"); } if(!ContextMapping.mappingsAreEqual(getContextMapping(), fieldMergeWith.getContextMapping())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'context_mapping' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'context_mapping' values"); } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.maxInputLength = fieldMergeWith.maxInputLength; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 3de8b803f41..b10c4aff1da 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -53,7 +53,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField; @@ -494,12 +494,12 @@ public class DateFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((DateFieldMapper) mergeWith).nullValue; this.dateTimeFormatter = ((DateFieldMapper) mergeWith).dateTimeFormatter; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 5ea9288c05a..7381855fb1a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -51,7 +51,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -333,12 +333,12 @@ public class DoubleFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((DoubleFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((DoubleFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 95dcddb52ec..b87da803a7e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -52,7 +52,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -339,12 +339,12 @@ public class FloatFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((FloatFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((FloatFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index f670a863dea..c6ebc8ed640 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -47,7 +47,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -330,12 +330,12 @@ public class IntegerFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((IntegerFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((IntegerFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 7a3f90b7d1f..e08f84c2192 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -47,7 +47,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -312,12 +312,12 @@ public class LongFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((LongFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((LongFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 8cccf0d6770..54dfe165171 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -53,7 +53,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -370,12 +370,12 @@ public abstract class NumberFieldMapper extends AbstractFieldM } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; this.precisionStep = nfmMergeWith.precisionStep; this.includeInAll = nfmMergeWith.includeInAll; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index bcacc56773a..9d0b960c040 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -48,7 +48,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -328,12 +328,12 @@ public class ShortFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((ShortFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((ShortFieldMapper) mergeWith).nullValueAsString; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 43da31336be..fdd994fa671 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -354,12 +354,12 @@ public class StringFieldMapper extends AbstractFieldMapper implements Al } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; this.nullValue = ((StringFieldMapper) mergeWith).nullValue; this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 1c3dfd617a3..1d4a727d5cf 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; @@ -189,12 +189,12 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index f7a39b2c952..a17941802b4 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -50,7 +50,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ObjectMapperListener; import org.elasticsearch.index.mapper.ParseContext; @@ -643,39 +643,39 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } GeoPointFieldMapper fieldMergeWith = (GeoPointFieldMapper) mergeWith; if (this.enableLatLon != fieldMergeWith.enableLatLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different lat_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different lat_lon"); } if (this.enableGeoHash != fieldMergeWith.enableGeoHash) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash"); } if (this.geoHashPrecision != fieldMergeWith.geoHashPrecision) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash_precision"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash_precision"); } if (this.enableGeohashPrefix != fieldMergeWith.enableGeohashPrefix) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash_prefix"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash_prefix"); } if (this.normalizeLat != fieldMergeWith.normalizeLat) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different normalize_lat"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different normalize_lat"); } if (this.normalizeLon != fieldMergeWith.normalizeLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different normalize_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different normalize_lon"); } if (!Objects.equal(this.precisionStep, fieldMergeWith.precisionStep)) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different precision_step"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different precision_step"); } if (this.validateLat != fieldMergeWith.validateLat) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different validate_lat"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different validate_lat"); } if (this.validateLon != fieldMergeWith.validateLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different validate_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different validate_lon"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 979346767db..d54cb4e2e6b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -43,7 +43,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.AbstractFieldMapper; @@ -281,10 +281,10 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different field type"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different field type"); return; } final GeoShapeFieldMapper fieldMergeWith = (GeoShapeFieldMapper) mergeWith; @@ -292,7 +292,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { // prevent user from changing strategies if (!(this.defaultStrategy.getClass().equals(mergeWithStrategy.getClass()))) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different strategy"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different strategy"); } final SpatialPrefixTree grid = this.defaultStrategy.getGrid(); @@ -300,17 +300,17 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { // prevent user from changing trees (changes encoding) if (!grid.getClass().equals(mergeGrid.getClass())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tree"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tree"); } // TODO we should allow this, but at the moment levels is used to build bookkeeping variables // in lucene's SpatialPrefixTree implementations, need a patch to correct that first if (grid.getMaxLevels() != mergeGrid.getMaxLevels()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tree_levels or precision"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tree_levels or precision"); } // bail if there were merge conflicts - if (mergeContext.hasConflicts() || mergeContext.mergeFlags().simulate()) { + if (mergeResult.hasConflicts() || mergeResult.simulate()) { return; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 7fd2d5859ea..b412df7af30 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -39,7 +39,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -314,11 +314,11 @@ public class AllFieldMapper extends AbstractFieldMapper implements Inter } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { - mergeContext.addConflict("mapper [" + names.fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); + mergeResult.addConflict("mapper [" + names.fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); } - super.merge(mergeWith, mergeContext); + super.merge(mergeWith, mergeResult); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 33633d3c06d..46cef4cfeb5 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -278,9 +278,9 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { FieldNamesFieldMapper fieldNamesMapperMergeWith = (FieldNamesFieldMapper)mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (fieldNamesMapperMergeWith.enabledState != enabledState && !fieldNamesMapperMergeWith.enabledState.unset()) { this.enabledState = fieldNamesMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 549023faa4d..efa7fe36c0b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -49,7 +49,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -361,7 +361,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index a530102e673..7f5249958ab 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -216,9 +216,9 @@ public class IndexFieldMapper extends AbstractFieldMapper implements Int } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { this.enabledState = indexFieldMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 963001cafb2..f1eca621076 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -44,7 +44,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -363,13 +363,13 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { ParentFieldMapper other = (ParentFieldMapper) mergeWith; if (!Objects.equal(type, other.type)) { - mergeContext.addConflict("The _parent field's type option can't be changed: [" + type + "]->[" + other.type + "]"); + mergeResult.addConflict("The _parent field's type option can't be changed: [" + type + "]->[" + other.type + "]"); } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (fieldMergeWith.customFieldDataSettings != null) { if (!Objects.equal(fieldMergeWith.customFieldDataSettings, this.customFieldDataSettings)) { diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 8aee69f8ba3..3631a3f68f9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -242,7 +242,7 @@ public class RoutingFieldMapper extends AbstractFieldMapper implements I } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java index bd954a8b756..7acc52f41cc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -175,9 +175,9 @@ public class SizeFieldMapper extends IntegerFieldMapper implements RootMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { this.enabledState = sizeFieldMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index 3814ba41ee6..2ee13f20982 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -417,9 +417,9 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 10a14169755..62c0beff862 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.AlreadyExpiredException; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -238,13 +238,13 @@ public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, R } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { - mergeContext.addConflict("_ttl cannot be disabled once it was enabled."); + mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); } else { - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.enabledState = ttlMergeWith.enabledState; } } @@ -252,7 +252,7 @@ public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, R if (ttlMergeWith.defaultTTL != -1) { // we never build the default when the field is disabled so we should also not set it // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) - if (!mergeContext.mergeFlags().simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { + if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { this.defaultTTL = ttlMergeWith.defaultTTL; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 18075ff3866..160c5c03699 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -35,7 +35,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -352,10 +352,10 @@ public class TimestampFieldMapper extends DateFieldMapper implements InternalMap } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; - super.merge(mergeWith, mergeContext); - if (!mergeContext.mergeFlags().simulate()) { + super.merge(mergeWith, mergeResult); + if (!mergeResult.simulate()) { if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { this.enabledState = timestampFieldMapperMergeWith.enabledState; } @@ -364,18 +364,18 @@ public class TimestampFieldMapper extends DateFieldMapper implements InternalMap return; } if (defaultTimestamp == null) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); } if (this.path != null) { if (path.equals(timestampFieldMapperMergeWith.path()) == false) { - mergeContext.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); + mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); } } else if (timestampFieldMapperMergeWith.path() != null) { - mergeContext.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index 206cc3a8c3c..83cdbf536b2 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -210,7 +210,7 @@ public class TypeFieldMapper extends AbstractFieldMapper implements Inte } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index 0ae2b497593..01744d37d3b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -228,7 +228,7 @@ public class UidFieldMapper extends AbstractFieldMapper implements Internal } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 4ae3eaa415a..cb143e877a8 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -24,14 +24,13 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -163,7 +162,7 @@ public class VersionFieldMapper extends AbstractFieldMapper implements Int } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // nothing to do } diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 23d373c65ab..61188cd4bbf 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -48,7 +48,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField; @@ -320,12 +320,12 @@ public class IpFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((IpFieldMapper) mergeWith).nullValue; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index fab309081ab..d0b9ab72819 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -49,7 +49,7 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperUtils; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ObjectMapperListener; import org.elasticsearch.index.mapper.ParseContext; @@ -919,32 +919,32 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } @Override - public void merge(final Mapper mergeWith, final MergeContext mergeContext) throws MergeMappingException { + public void merge(final Mapper mergeWith, final MergeResult mergeResult) throws MergeMappingException { if (!(mergeWith instanceof ObjectMapper)) { - mergeContext.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); + mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); return; } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; if (nested().isNested()) { if (!mergeWithObject.nested().isNested()) { - mergeContext.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); + mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); return; } } else { if (mergeWithObject.nested().isNested()) { - mergeContext.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); + mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); return; } } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (mergeWithObject.dynamic != null) { this.dynamic = mergeWithObject.dynamic; } } - doMerge(mergeWithObject, mergeContext); + doMerge(mergeWithObject, mergeResult); List mappersToPut = new ArrayList<>(); FieldMapperListener.Aggregator newFieldMappers = new FieldMapperListener.Aggregator(); @@ -954,20 +954,20 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name()); if (mergeIntoMapper == null) { // no mapping, simply add it if not simulating - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { mappersToPut.add(mergeWithMapper); mergeWithMapper.traverse(newFieldMappers); mergeWithMapper.traverse(newObjectMappers); } } else { - mergeIntoMapper.merge(mergeWithMapper, mergeContext); + mergeIntoMapper.merge(mergeWithMapper, mergeResult); } } if (!newFieldMappers.mappers.isEmpty()) { - mergeContext.addFieldMappers(newFieldMappers.mappers); + mergeResult.addFieldMappers(newFieldMappers.mappers); } if (!newObjectMappers.mappers.isEmpty()) { - mergeContext.addObjectMappers(newObjectMappers.mappers); + mergeResult.addObjectMappers(newObjectMappers.mappers); } // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock) for (Mapper mapper : mappersToPut) { @@ -975,7 +975,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } } - protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) { + protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index 56d2b96429c..740b38cc08b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -260,9 +260,9 @@ public class RootObjectMapper extends ObjectMapper { } @Override - protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) { + protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // merge them List mergedTemplates = Lists.newArrayList(Arrays.asList(this.dynamicTemplates)); for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { diff --git a/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java b/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java index c915a3cf8db..75c4bd51a87 100644 --- a/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java @@ -57,7 +57,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testNewField() { FieldMappersLookup lookup = new FieldMappersLookup(); FakeFieldMapper f = new FakeFieldMapper("foo", "bar"); - FieldMappersLookup lookup2 = lookup.copyAndAddAll(Lists.newArrayList(f)); + FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f)); assertNull(lookup.fullName("foo")); assertNull(lookup.indexName("bar")); @@ -76,9 +76,9 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FieldMappersLookup lookup = new FieldMappersLookup(); FakeFieldMapper f = new FakeFieldMapper("foo", "bar"); FakeFieldMapper other = new FakeFieldMapper("blah", "blah"); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f, other)); + lookup = lookup.copyAndAddAll(newList(f, other)); FakeFieldMapper f2 = new FakeFieldMapper("foo", "bar"); - FieldMappersLookup lookup2 = lookup.copyAndAddAll(Lists.newArrayList(f2)); + FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f2)); FieldMappers mappers = lookup2.fullName("foo"); assertNotNull(mappers); @@ -93,7 +93,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testIndexName() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "foo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); FieldMappers mappers = lookup.indexName("foo"); assertNotNull(mappers); @@ -105,7 +105,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2)); + lookup = lookup.copyAndAddAll(newList(f1, f2)); List names = lookup.simpleMatchToIndexNames("b*"); assertTrue(names.contains("baz")); assertTrue(names.contains("boo")); @@ -115,7 +115,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2)); + lookup = lookup.copyAndAddAll(newList(f1, f2)); List names = lookup.simpleMatchToFullName("b*"); assertTrue(names.contains("foo")); assertTrue(names.contains("bar")); @@ -126,7 +126,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f2 = new FakeFieldMapper("foo", "realbar"); FakeFieldMapper f3 = new FakeFieldMapper("baz", "realfoo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2, f3)); + lookup = lookup.copyAndAddAll(newList(f1, f2, f3)); assertNotNull(lookup.smartName("foo")); assertEquals(2, lookup.smartName("foo").mappers().size()); @@ -138,7 +138,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testIteratorImmutable() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); try { Iterator> itr = lookup.iterator(); @@ -154,12 +154,12 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testGetMapper() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); assertEquals(f1, lookup.get("foo")); assertNull(lookup.get("bar")); // get is only by full name FakeFieldMapper f2 = new FakeFieldMapper("foo", "foo"); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f2)); + lookup = lookup.copyAndAddAll(newList(f2)); try { lookup.get("foo"); fail("get should have enforced foo is unique"); @@ -168,6 +168,10 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { } } + static List> newList(FieldMapper... mapper) { + return Lists.newArrayList(mapper); + } + // this sucks how much must be overriden just do get a dummy field mapper... static class FakeFieldMapper extends AbstractFieldMapper { static Settings dummySettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java index fb5918373cc..19c25e07450 100644 --- a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -45,7 +46,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -229,11 +229,11 @@ public class CopyToMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapperAfter = parser.parse(mappingAfter); - DocumentMapper.MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapperBefore.merge(docMapperAfter.mapping(), mergeFlags().simulate(false)); + docMapperBefore.merge(docMapperAfter.mapping(), false); fields = docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields(); diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index 800a47d9869..ae1aeccae91 100644 --- a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -32,7 +33,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; /** @@ -64,12 +64,12 @@ public class TokenCountFieldMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index 3990ff86df3..9036ed97736 100644 --- a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -365,9 +366,9 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { Map config = getConfigurationViaXContent(initialDateFieldMapper); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); - DocumentMapper.MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false); - assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.conflicts()), mergeResult.hasConflicts(), is(false)); + assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false)); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index 8df9f1e7d2a..e51f4d4fc50 100755 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ObjectMapperListener; import org.elasticsearch.index.mapper.ParseContext; @@ -219,7 +219,7 @@ public class ExternalMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // ignore this for now } diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java index 4ec787accb8..dd2c78fd735 100644 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java @@ -44,9 +44,9 @@ public class ExternalRootMapper implements RootMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (!(mergeWith instanceof ExternalRootMapper)) { - mergeContext.addConflict("Trying to merge " + mergeWith + " with " + this); + mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index e4f12589dc5..084c6b7d3cd 100644 --- a/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -30,7 +31,6 @@ import org.junit.Test; import java.util.ArrayList; import java.util.Arrays; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -486,11 +486,11 @@ public class GeoPointFieldMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(2)); + assertThat(mergeResult.buildConflicts().length, equalTo(2)); // todo better way of checking conflict? - assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.conflicts())))); + assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); // correct mapping and ensure no failures stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") @@ -498,7 +498,7 @@ public class GeoPointFieldMapperTests extends ElasticsearchSingleNodeTest { .field("validate", true).field("normalize", true).endObject().endObject() .endObject().endObject().string(); stage2 = parser.parse(stage2Mapping); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index f0aad36239b..b959bb41ab6 100644 --- a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -35,7 +36,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.isIn; @@ -337,11 +337,11 @@ public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest { .field("orientation", "cw").endObject().endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), false); // check correct conflicts assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(3)); - ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.conflicts())); + assertThat(mergeResult.buildConflicts().length, equalTo(3)); + ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())); assertThat("mapper [shape] has different strategy", isIn(conflicts)); assertThat("mapper [shape] has different tree", isIn(conflicts)); assertThat("mapper [shape] has different tree_levels or precision", isIn(conflicts)); @@ -364,7 +364,7 @@ public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest { .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m") .field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject().string(); stage2 = parser.parse(stage2Mapping); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); // verify mapping changes, and ensure no failures assertThat(mergeResult.hasConflicts(), equalTo(false)); diff --git a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java index b18c678bf00..baa77ed7616 100644 --- a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java @@ -102,7 +102,7 @@ public class IndexTypeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper mapperDisabled = parser.parse(mappingWithIndexDisabled); - mapperEnabled.merge(mapperDisabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperEnabled.merge(mapperDisabled.mapping(), false); assertThat(mapperEnabled.IndexFieldMapper().enabled(), is(false)); } @@ -118,7 +118,7 @@ public class IndexTypeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.indexMapper().enabled(), is(false)); } diff --git a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index ef8f0c1d259..dfd8705612e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -162,11 +162,11 @@ public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper mapperEnabled = parser.parse(enabledMapping); DocumentMapper mapperDisabled = parser.parse(disabledMapping); - mapperEnabled.merge(mapperDisabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperEnabled.merge(mapperDisabled.mapping(), false); assertFalse(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled()); mapperEnabled = parser.parse(enabledMapping); - mapperDisabled.merge(mapperEnabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperDisabled.merge(mapperEnabled.mapping(), false); assertTrue(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled()); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index ad3556f7873..eb43416891b 100644 --- a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -23,12 +23,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; /** @@ -51,13 +51,13 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); // since we are simulating, we should not have the age mapping assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); // now merge, don't simulate - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); // there is still merge failures assertThat(mergeResult.hasConflicts(), equalTo(false)); // but we have the age in @@ -76,7 +76,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - DocumentMapper.MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -93,14 +93,14 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().endObject().string(); DocumentMapper nestedMapper = parser.parse(nestedMapping); - DocumentMapper.MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(1)); - assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); - mergeResult = nestedMapper.merge(objectMapper.mapping(), mergeFlags().simulate(true)); - assertThat(mergeResult.conflicts().length, equalTo(1)); - assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); + mergeResult = nestedMapper.merge(objectMapper.mapping(), true); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); } @Test @@ -117,7 +117,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper.MergeResult mergeResult = existing.merge(changed.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = existing.merge(changed.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("keyword")); @@ -137,7 +137,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper.MergeResult mergeResult = existing.merge(changed.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = existing.merge(changed.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("standard")); diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 8cc6694013f..b68c49d7956 100644 --- a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -33,7 +34,6 @@ import java.util.Arrays; import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath; import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; /** @@ -62,10 +62,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); DocumentMapper docMapper2 = parser.parse(mapping); - DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper2.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -85,10 +85,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper3.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper3.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -103,10 +103,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper4.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper4.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -138,10 +138,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json"); DocumentMapper docMapper2 = parser.parse(mapping); - DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper2.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -161,10 +161,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper3.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper3.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -177,17 +177,17 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json"); DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values")); + mergeResult = docMapper.merge(docMapper4.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(false)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true)); + mergeResult = docMapper.merge(docMapper4.mapping(), false); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); - assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); diff --git a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 2012b0d7713..4b560ae011b 100644 --- a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -114,7 +114,7 @@ public class SizeMappingTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.SizeFieldMapper().enabled(), is(false)); } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 57c98584442..f9547fbefe6 100644 --- a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -41,8 +41,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapper.MergeFlags; -import org.elasticsearch.index.mapper.DocumentMapper.MergeResult; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; @@ -500,8 +499,8 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .endObject().endObject().endObject().endObject().string(); - MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), MergeFlags.mergeFlags().simulate(false)); - assertFalse(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts()); + MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false); + assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts()); doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder() .startObject() @@ -515,10 +514,10 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest { updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", true).endObject() .endObject().endObject().endObject().endObject().string(); - mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), MergeFlags.mergeFlags()); + mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), true); assertTrue(mergeResult.hasConflicts()); - assertEquals(1, mergeResult.conflicts().length); - assertTrue(mergeResult.conflicts()[0].contains("cannot enable norms")); + assertEquals(1, mergeResult.buildConflicts().length); + assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms")); } public void testTermsFilter() throws Exception { diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 08d8af1afa4..c5adf8cb50e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -141,7 +141,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.timestampFieldMapper().enabled(), is(false)); } @@ -502,8 +502,8 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); - assertThat(mergeResult.conflicts().length, equalTo(0)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false); + assertThat(mergeResult.buildConflicts().length, equalTo(0)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getFormat(indexSettings), equalTo("array")); } @@ -576,13 +576,13 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true); String[] expectedConflicts = {"mapper [_timestamp] has different index values", "mapper [_timestamp] has different store values", "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02", "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar", "mapper [_timestamp] has different tokenize values"}; - for (String conflict : mergeResult.conflicts()) { + for (String conflict : mergeResult.buildConflicts()) { assertThat(conflict, isIn(expectedConflicts)); } - assertThat(mergeResult.conflicts().length, equalTo(expectedConflicts.length)); + assertThat(mergeResult.buildConflicts().length, equalTo(expectedConflicts.length)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.LAZY)); assertTrue(docMapper.timestampFieldMapper().enabled()); assertThat(docMapper.timestampFieldMapper().fieldDataType().getFormat(indexSettings), equalTo("doc_values")); @@ -610,7 +610,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true); List expectedConflicts = new ArrayList<>(); expectedConflicts.add("mapper [_timestamp] has different index values"); expectedConflicts.add("mapper [_timestamp] has different tokenize values"); @@ -620,7 +620,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { expectedConflicts.add("mapper [_timestamp] has different doc_values values"); } - for (String conflict : mergeResult.conflicts()) { + for (String conflict : mergeResult.buildConflicts()) { assertThat(conflict, isIn(expectedConflicts)); } } @@ -671,10 +671,10 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper = parser.parse(mapping1); docMapper.refreshSource(); docMapper = parser.parse(docMapper.mappingSource().string()); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); - assertThat(mergeResult.conflicts().length, equalTo(conflict == null ? 0:1)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true); + assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0:1)); if (conflict != null) { - assertThat(mergeResult.conflicts()[0], containsString(conflict)); + assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index 2c9868b4ced..92b9ba83859 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -116,8 +116,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl); DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false); - DocumentMapper.MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), mergeFlags); + MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true)); @@ -143,8 +142,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(updatedMapping); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false); - DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), mergeFlags); + MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); @@ -158,8 +156,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(true); - DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), mergeFlags); + MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); @@ -197,7 +194,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { public void testNoConflictIfNothingSetAndDisabledLater() throws Exception { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDisabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(randomBoolean())); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean()); assertFalse(mergeResult.hasConflicts()); } @@ -205,7 +202,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { public void testNoConflictIfNothingSetAndEnabledLater() throws Exception { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(randomBoolean())); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean()); assertFalse(mergeResult.hasConflicts()); } @@ -214,7 +211,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithTtlEnabled); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -227,7 +224,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { CompressedString mappingAfterCreation = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterCreation, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -241,7 +238,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithTtl); CompressedString mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDifferentDefault.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDifferentDefault.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -253,7 +250,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled(); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -265,7 +262,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -276,7 +273,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { mappingWithoutTtl = getMappingWithTtlDisabled("6d"); indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -286,7 +283,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { // check if switching simulate flag off works if nothing was applied in the beginning indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); diff --git a/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index aa227fd7cce..7c12cd14c36 100644 --- a/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -79,9 +80,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest { private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", ImmutableSettings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), false); // assure we have no conflicts - assertThat(mergeResult.conflicts().length, equalTo(0)); + assertThat(mergeResult.buildConflicts().length, equalTo(0)); // make sure mappings applied CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); @@ -103,9 +104,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest { IndexService indexService = createIndex("test", ImmutableSettings.settingsBuilder().build(), "type", mapping); CompressedString mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), true); // assure we have conflicts - assertThat(mergeResult.conflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); // make sure simulate flag actually worked - no mappings applied CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); From 54cf885d148c02d42f1e96dc2d80e30b281fbab4 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 24 Apr 2015 11:00:54 -0600 Subject: [PATCH 112/236] Disable security manager when running with `mvn exec:exec` --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index a6c0d2f8517..28144f32a1f 100644 --- a/pom.xml +++ b/pom.xml @@ -438,7 +438,6 @@ 1.4.0 - exec exec @@ -447,6 +446,7 @@ ${jvm.executable} + -Des.security.manager.enabled=false -classpath org.elasticsearch.bootstrap.Bootstrap From 389ce39919f425b6c8ee395f9862ebd57f21ea18 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 24 Apr 2015 12:46:13 +0200 Subject: [PATCH 113/236] CommitStats doesn't need to allow for null values in commit user data Lucene forbids writing those and MapBuilder.immutableMap doesn't like them either, as discovered by @brwe Closes #10774 --- src/main/java/org/elasticsearch/index/engine/CommitStats.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/src/main/java/org/elasticsearch/index/engine/CommitStats.java index d1e4ed7a2b2..de0474c5d7e 100644 --- a/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -78,7 +78,7 @@ public final class CommitStats implements Streamable, ToXContent { public void readFrom(StreamInput in) throws IOException { MapBuilder builder = MapBuilder.newMapBuilder(); for (int i = in.readVInt(); i > 0; i--) { - builder.put(in.readString(), in.readOptionalString()); + builder.put(in.readString(), in.readString()); } userData = builder.immutableMap(); generation = in.readLong(); @@ -90,7 +90,7 @@ public final class CommitStats implements Streamable, ToXContent { out.writeVInt(userData.size()); for (Map.Entry entry : userData.entrySet()) { out.writeString(entry.getKey()); - out.writeOptionalString(entry.getValue()); + out.writeString(entry.getValue()); } out.writeLong(generation); out.writeString(id); From 95e9b86505dae9f4b4aefc3bc2f444c4f133b91b Mon Sep 17 00:00:00 2001 From: Oliver Eilhard Date: Fri, 24 Apr 2015 09:41:12 +0200 Subject: [PATCH 114/236] Mustache tags syntax Hi there. I've been experimenting with the search templates recently and I'm a bit confused. Shouldn't the Mustache tags be written like `{{tagname}}` instead of `{tagname}`? Your using `{{...}}` [here](http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html) BTW. Using the first example in that page seems to indicate that something's wrong, or am I missing something? ``` $ curl 'localhost:9200/test/_search' -d '{"query":{"template":{"query":{"match":{"text":"{keywords}"}},"params":{"keywords":"value1_foo"}}}}' {"took":1,"timed_out":false,"_shards":{"total":1,"successful":1,"failed":0},"hits":{"total":0,"max_score":null,"hits":[]}} $ curl 'localhost:9200/test/_search' -d '{"query":{"template":{"query":{"match":{"text":"{{keywords}}"}},"params":{"keywords":"value1_foo"}}}}' {"took":1,"timed_out":false,"_shards":{"total":1,"successful":1,"failed":0},"hits":{"total":1,"max_score":1.0,"hits":[{"_index":"test","_type":"testtype","_id":"1","_score":1.0,"_source":{"text":"value1_foo"}}]}} ``` --- docs/reference/query-dsl/queries/template-query.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/queries/template-query.asciidoc b/docs/reference/query-dsl/queries/template-query.asciidoc index 6810b0663ea..1b22b90066e 100644 --- a/docs/reference/query-dsl/queries/template-query.asciidoc +++ b/docs/reference/query-dsl/queries/template-query.asciidoc @@ -12,7 +12,7 @@ GET /_search { "query": { "template": { - "query": { "match": { "text": "{query_string}" }}}, + "query": { "match": { "text": "{{query_string}}" }}}, "params" : { "query_string" : "all about search" } @@ -45,7 +45,7 @@ GET /_search { "query": { "template": { - "query": "{ \"match\": { \"text\": \"{query_string}\" }}}", <1> + "query": "{ \"match\": { \"text\": \"{{query_string}}\" }}}", <1> "params" : { "query_string" : "all about search" } @@ -85,7 +85,7 @@ Alternatively, you can register a query template in the special `.scripts` index ------------------------------------------ PUT /_search/template/my_template { - "template": { "match": { "text": "{query_string}" }}}, + "template": { "match": { "text": "{{query_string}}" }}}, } ------------------------------------------ From f27f5aaa2205b1a711cc0f12744807ea08eebae6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 24 Apr 2015 12:30:26 -0700 Subject: [PATCH 115/236] Upgrade to lucene-5.2-snapshot-1675927 --- pom.xml | 2 +- .../org/elasticsearch/index/analysis/AnalysisFactoryTests.java | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 28144f32a1f..3fca8ccce01 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ 5.2.0 - 1675363 + 1675927 5.2.0-snapshot-${lucene.snapshot.revision} 2.1.14 auto diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java index 500834fbee1..eac199db7ed 100644 --- a/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java @@ -164,6 +164,8 @@ public class AnalysisFactoryTests extends ElasticsearchTestCase { put("hyphenatedwords", Void.class); // repeats anything marked as keyword put("keywordrepeat", Void.class); + // like limittokencount, but by offset + put("limittokenoffset", Void.class); // like limittokencount, but by position put("limittokenposition", Void.class); // ??? From 3e5b8a21b4d2e8a85c4037dd2296b7f6861b0a46 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 23 Apr 2015 19:05:31 +0200 Subject: [PATCH 116/236] Internal: Wait for required mappings to be available on the replica before indexing. Due to timing issues, mappings that are required to index a document might not be available on the replica at indexing time. In that case the replica starts listening to cluster state changes and re-parses the document until no dynamic mappings updates are generated. --- .../action/ActionWriteResponse.java | 8 ++ .../action/bulk/TransportShardBulkAction.java | 28 ++-- .../action/index/IndexResponse.java | 13 ++ .../action/index/TransportIndexAction.java | 35 ++--- ...nsportShardReplicationOperationAction.java | 103 ++++++++++++-- .../elasticsearch/cluster/ClusterService.java | 5 +- .../cluster/ClusterStateObserver.java | 43 +++--- .../service/InternalClusterService.java | 10 +- .../org/elasticsearch/common/Strings.java | 32 ++++- .../discovery/zen/ZenDiscovery.java | 6 +- .../discovery/zen/ZenDiscoveryUnitTest.java | 12 +- .../indices/state/RareClusterStateTests.java | 129 ++++++++++++++++++ 12 files changed, 338 insertions(+), 86 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java index 5ddefaf99b2..11240cc2cbc 100644 --- a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java +++ b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java @@ -23,15 +23,18 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Collections; /** * Base class for write action responses. @@ -156,6 +159,11 @@ public abstract class ActionWriteResponse extends ActionResponse { return builder; } + @Override + public String toString() { + return Strings.toString(this); + } + public static ShardInfo readShardInfo(StreamInput in) throws IOException { ShardInfo shardInfo = new ShardInfo(); shardInfo.readFrom(in); diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 6fac03ac5c9..d5009544a47 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -291,6 +291,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } + } else { + throw new ElasticsearchIllegalStateException("Unexpected index operation: " + item.request()); } assert item.getPrimaryResponse() != null; @@ -532,7 +534,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation @Override - protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) throws Exception { + protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardId.id()); for (int i = 0; i < request.items().length; i++) { @@ -548,28 +550,18 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation if (indexRequest.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { - // mappings updates on the _river are not validated synchronously so we can't - // assume they are here when indexing on a replica - indexService.mapperService().merge(indexRequest.type(), new CompressedString(index.parsedDoc().dynamicMappingsUpdate().toBytes()), true); - } else { - throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + index.parsedDoc().dynamicMappingsUpdate() + "]"); - } + Mapping update = index.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } indexShard.index(index); } else { Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { - // mappings updates on the _river are not validated synchronously so we can't - // assume they are here when indexing on a replica - indexService.mapperService().merge(indexRequest.type(), new CompressedString(create.parsedDoc().dynamicMappingsUpdate().toBytes()), true); - } else { - throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + create.parsedDoc().dynamicMappingsUpdate() + "]"); - } + Mapping update = create.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } indexShard.create(create); } @@ -592,6 +584,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation throw e; } } + } else { + throw new ElasticsearchIllegalStateException("Unexpected index operation: " + item.request()); } } diff --git a/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 0074d87b563..5727b2b673b 100644 --- a/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -105,4 +105,17 @@ public class IndexResponse extends ActionWriteResponse { out.writeLong(version); out.writeBoolean(created); } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("IndexResponse["); + builder.append("index=").append(index); + builder.append(",type=").append(type); + builder.append(",id=").append(id); + builder.append(",version=").append(version); + builder.append(",created=").append(created); + builder.append(",shards=").append(getShardInfo()); + return builder.append("]").toString(); + } } diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 5a8c96f352c..2fd801c025d 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.index; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; @@ -54,8 +53,6 @@ import org.elasticsearch.river.RiverIndexName; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.io.IOException; - /** * Performs the index operation. *

    @@ -73,6 +70,8 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi private final TransportCreateIndexAction createIndexAction; private final MappingUpdatedAction mappingUpdatedAction; + private final ClusterService clusterService; + @Inject public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, @@ -83,6 +82,7 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi this.mappingUpdatedAction = mappingUpdatedAction; this.autoCreateIndex = new AutoCreateIndex(settings); this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); + this.clusterService = clusterService; } @Override @@ -201,6 +201,7 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi version = index.version(); created = index.created(); } else { + assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); Engine.Create create = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); Mapping update = create.parsedDoc().dynamicMappingsUpdate(); @@ -244,34 +245,24 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } @Override - protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) throws IOException { + protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); if (request.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { - // mappings updates on the _river are not validated synchronously so we can't - // assume they are here when indexing on a replica - indexService.mapperService().merge(request.type(), new CompressedString(index.parsedDoc().dynamicMappingsUpdate().toBytes()), true); - } else { - throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + index.parsedDoc().dynamicMappingsUpdate() + "]"); - } + Mapping update = index.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } indexShard.index(index); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, - request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - if (indexService.index().name().equals(RiverIndexName.Conf.indexName(settings))) { - // mappings updates on the _river are not validated synchronously so we can't - // assume they are here when indexing on a replica - indexService.mapperService().merge(request.type(), new CompressedString(create.parsedDoc().dynamicMappingsUpdate().toBytes()), true); - } else { - throw new ElasticsearchIllegalStateException("Index operations on replicas should not trigger dynamic mappings updates: [" + create.parsedDoc().dynamicMappingsUpdate() + "]"); - } + assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); + Engine.Create create = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); + Mapping update = create.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } indexShard.create(create); } diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index bde8a67b098..ce26311d5fa 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -21,10 +21,11 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; -import org.elasticsearch.action.*; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterService; @@ -35,11 +36,13 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -48,12 +51,21 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Map; @@ -112,7 +124,7 @@ public abstract class TransportShardReplicationOperationAction shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; - protected abstract void shardOperationOnReplica(ShardId shardId, ReplicaRequest shardRequest) throws Exception; + protected abstract void shardOperationOnReplica(ShardId shardId, ReplicaRequest shardRequest); protected abstract ShardIterator shards(ClusterState clusterState, InternalRequest request) throws ElasticsearchException; @@ -203,12 +215,77 @@ public abstract class TransportShardReplicationOperationAction { @Override public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { - try { - shardOperationOnReplica(request.internalShardId, request); - } catch (Throwable t) { - failReplicaIfNeeded(request.internalShardId.getIndex(), request.internalShardId.id(), t); - throw t; + new AsyncReplicaAction(request, channel).run(); + } + } + + protected static class RetryOnReplicaException extends IndexShardException { + + public RetryOnReplicaException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public RetryOnReplicaException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } + } + + private final class AsyncReplicaAction extends AbstractRunnable { + private final ReplicaRequest request; + private final TransportChannel channel; + // important: we pass null as a timeout as failing a replica is + // something we want to avoid at all costs + private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger); + + + AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) { + this.request = request; + this.channel = channel; + } + + @Override + public void onFailure(Throwable t) { + if (t instanceof RetryOnReplicaException) { + logger.trace("Retrying operation on replica", t); + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + threadPool.executor(executor).execute(AsyncReplicaAction.this); + } + + @Override + public void onClusterServiceClose() { + responseWithFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + throw new AssertionError("Cannot happen: there is not timeout"); + } + }); + } else { + try { + failReplicaIfNeeded(request.internalShardId.getIndex(), request.internalShardId.id(), t); + } catch (Throwable unexpected) { + logger.error("{} unexpected error while failing replica", request.internalShardId.id(), unexpected); + } finally { + responseWithFailure(t); + } } + } + + protected void responseWithFailure(Throwable t) { + try { + channel.sendResponse(t); + } catch (IOException responseException) { + logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException); + logger.warn("actual Exception", t); + } + } + + @Override + protected void doRun() throws Exception { + shardOperationOnReplica(request.internalShardId, request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/src/main/java/org/elasticsearch/cluster/ClusterService.java b/src/main/java/org/elasticsearch/cluster/ClusterService.java index f456edd8ddb..805419ccc99 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.service.PendingClusterTask; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.unit.TimeValue; @@ -95,8 +96,10 @@ public interface ClusterService extends LifecycleComponent { * Adds a cluster state listener that will timeout after the provided timeout, * and is executed after the clusterstate has been successfully applied ie. is * in state {@link org.elasticsearch.cluster.ClusterState.ClusterStateStatus#APPLIED} + * NOTE: a {@code null} timeout means that the listener will never be removed + * automatically */ - void add(TimeValue timeout, TimeoutClusterStateListener listener); + void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); /** * Submits a task that will update the cluster state. diff --git a/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index a8909636932..28df5aa8017 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -42,18 +42,18 @@ public class ClusterStateObserver { return changedEvent.previousState().version() != changedEvent.state().version(); } }; - private ClusterService clusterService; + + private final ClusterService clusterService; volatile TimeValue timeOutValue; final AtomicReference lastObservedState; + final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); // observingContext is not null when waiting on cluster state changes final AtomicReference observingContext = new AtomicReference(null); - volatile long startTime; + volatile Long startTime; volatile boolean timedOut; - volatile TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); - public ClusterStateObserver(ClusterService clusterService, ESLogger logger) { this(clusterService, new TimeValue(60000), logger); @@ -65,10 +65,12 @@ public class ClusterStateObserver { * will fail any existing or new #waitForNextChange calls. */ public ClusterStateObserver(ClusterService clusterService, TimeValue timeout, ESLogger logger) { - this.timeOutValue = timeout; this.clusterService = clusterService; this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state())); - this.startTime = System.currentTimeMillis(); + this.timeOutValue = timeout; + if (timeOutValue != null) { + this.startTime = System.currentTimeMillis(); + } this.logger = logger; } @@ -108,19 +110,24 @@ public class ClusterStateObserver { if (observingContext.get() != null) { throw new ElasticsearchException("already waiting for a cluster state change"); } - long timeoutTimeLeft; + + Long timeoutTimeLeft; if (timeOutValue == null) { timeOutValue = this.timeOutValue; - long timeSinceStart = System.currentTimeMillis() - startTime; - timeoutTimeLeft = timeOutValue.millis() - timeSinceStart; - if (timeoutTimeLeft <= 0l) { - // things have timeout while we were busy -> notify - logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart)); - // update to latest, in case people want to retry - timedOut = true; - lastObservedState.set(new ObservedState(clusterService.state())); - listener.onTimeout(timeOutValue); - return; + if (timeOutValue != null) { + long timeSinceStart = System.currentTimeMillis() - startTime; + timeoutTimeLeft = timeOutValue.millis() - timeSinceStart; + if (timeoutTimeLeft <= 0l) { + // things have timeout while we were busy -> notify + logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart)); + // update to latest, in case people want to retry + timedOut = true; + lastObservedState.set(new ObservedState(clusterService.state())); + listener.onTimeout(timeOutValue); + return; + } + } else { + timeoutTimeLeft = null; } } else { this.startTime = System.currentTimeMillis(); @@ -143,7 +150,7 @@ public class ClusterStateObserver { if (!observingContext.compareAndSet(null, context)) { throw new ElasticsearchException("already waiting for a cluster state change"); } - clusterService.add(new TimeValue(timeoutTimeLeft), clusterStateListener); + clusterService.add(timeoutTimeLeft == null ? null : new TimeValue(timeoutTimeLeft), clusterStateListener); } } diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index ea6866c420f..eb527e07fe4 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -230,7 +230,7 @@ public class InternalClusterService extends AbstractLifecycleComponent implemen static class ProcessClusterState { final ClusterState clusterState; - final PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed; volatile boolean processed; - ProcessClusterState(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + ProcessClusterState(ClusterState clusterState) { this.clusterState = clusterState; - this.newStateProcessed = newStateProcessed; } } @@ -738,7 +736,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster")); } else { - final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState, newStateProcessed); + final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState); processNewClusterStates.add(processClusterState); assert newClusterState.nodes().masterNode() != null : "received a cluster state without a master"; diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index 4d7811fa023..b276f5787a9 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -105,7 +105,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { int numUpdates = scaledRandomIntBetween(50, 100); LinkedList queue = new LinkedList<>(); for (int i = 0; i < numUpdates; i++) { - queue.add(new ProcessClusterState(ClusterState.builder(clusterName).version(i).nodes(nodes).build(), null)); + queue.add(new ProcessClusterState(ClusterState.builder(clusterName).version(i).nodes(nodes).build())); } ProcessClusterState mostRecent = queue.get(numUpdates - 1); Collections.shuffle(queue, getRandom()); @@ -121,15 +121,15 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { DiscoveryNodes nodes2 = DiscoveryNodes.builder().masterNodeId("b").build(); LinkedList queue = new LinkedList<>(); - ProcessClusterState thirdMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(1).nodes(nodes1).build(), null); + ProcessClusterState thirdMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(1).nodes(nodes1).build()); queue.offer(thirdMostRecent); - ProcessClusterState secondMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(2).nodes(nodes1).build(), null); + ProcessClusterState secondMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(2).nodes(nodes1).build()); queue.offer(secondMostRecent); - ProcessClusterState mostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(3).nodes(nodes1).build(), null); + ProcessClusterState mostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(3).nodes(nodes1).build()); queue.offer(mostRecent); Collections.shuffle(queue, getRandom()); - queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(4).nodes(nodes2).build(), null)); - queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(5).nodes(nodes1).build(), null)); + queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(4).nodes(nodes2).build())); + queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(5).nodes(nodes1).build())); assertThat(ZenDiscovery.selectNextStateToProcess(queue), sameInstance(mostRecent.clusterState)); diff --git a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java index 2875a90824d..a46a273b2ba 100644 --- a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java +++ b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java @@ -20,6 +20,10 @@ package org.elasticsearch.indices.state; import com.google.common.collect.ImmutableMap; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; @@ -27,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -35,6 +40,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -42,10 +51,16 @@ import org.junit.Test; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; /** */ @@ -115,4 +130,118 @@ public class RareClusterStateTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareSearch("test").get(), 0); } + public void testDelayedMappingPropagationOnReplica() throws Exception { + // Here we want to test that everything goes well if the mappings that + // are needed for a document are not available on the replica at the + // time of indexing it + final List nodeNames = internalCluster().startNodesAsync(2).get(); + assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String otherNode = null; + for (String node : nodeNames) { + if (node.equals(master) == false) { + otherNode = node; + break; + } + } + assertNotNull(otherNode); + + // Force allocation of the primary on the master node by first only allocating on the master + // and then allowing all nodes so that the replica gets allocated on the other node + assertAcked(prepareCreate("index").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", master)).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("index").setSettings(ImmutableSettings.builder() + .put("index.routing.allocation.include._name", "")).get()); + ensureGreen(); + + // Check routing tables + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertEquals(master, state.nodes().masterNode().name()); + List shards = state.routingTable().allShards("index"); + assertThat(shards, hasSize(2)); + for (ShardRouting shard : shards) { + if (shard.primary()) { + // primary must be on the master + assertEquals(state.nodes().masterNodeId(), shard.currentNodeId()); + } else { + assertTrue(shard.active()); + } + } + + // Block cluster state processing on the replica + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, getRandom()); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + final AtomicReference putMappingResponse = new AtomicReference<>(); + client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener() { + @Override + public void onResponse(PutMappingResponse response) { + putMappingResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + putMappingResponse.set(e); + } + }); + // Wait for mappings to be available on master + assertBusy(new Runnable() { + @Override + public void run() { + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndexService indexService = indicesService.indexServiceSafe("index"); + assertNotNull(indexService); + final MapperService mapperService = indexService.mapperService(); + DocumentMapper mapper = mapperService.documentMapper("type"); + assertNotNull(mapper); + assertNotNull(mapper.mappers().getMapper("field")); + } + }); + + final AtomicReference docIndexResponse = new AtomicReference<>(); + client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { + @Override + public void onResponse(IndexResponse response) { + docIndexResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + docIndexResponse.set(e); + } + }); + + // Wait for document to be indexed on primary + assertBusy(new Runnable() { + @Override + public void run() { + assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists()); + } + }); + + // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed + // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled + // and not just because it takes time to replicate the indexing request to the replica + Thread.sleep(100); + assertThat(putMappingResponse.get(), equalTo(null)); + assertThat(docIndexResponse.get(), equalTo(null)); + + // Now make sure the indexing request finishes successfully + disruption.stopDisrupting(); + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); + PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); + assertTrue(resp.isAcknowledged()); + assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); + IndexResponse docResp = (IndexResponse) docIndexResponse.get(); + assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), + 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded + } + }); + } + } From 9f20c1c64295241865fc71d81cb2932c81fd2dc8 Mon Sep 17 00:00:00 2001 From: Mathias Fussenegger Date: Fri, 24 Apr 2015 22:34:32 +0200 Subject: [PATCH 117/236] change BigArrays to not extend AbstractComponent In order to avoid the ``getLogger(getClass())`` calls in the AbstractComponent constructor. Seems like BigArrays used to be a Singleton but it actually no longer is one. Every time a SearchContext is created a new BigArrays instance is created via the ``withCircuitBreaking`` call. --- .../org/elasticsearch/common/util/BigArrays.java | 13 ++++++------- .../common/bytes/PagedBytesReferenceTest.java | 2 +- .../elasticsearch/common/util/BigArraysTests.java | 6 +++--- .../http/netty/NettyHttpServerPipeliningTest.java | 2 +- .../test/cache/recycler/MockBigArrays.java | 13 +++++-------- .../transport/NettySizeHeaderFrameDecoderTests.java | 2 +- .../netty/NettyTransportMultiPortTests.java | 2 +- 7 files changed, 18 insertions(+), 22 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/util/BigArrays.java b/src/main/java/org/elasticsearch/common/util/BigArrays.java index 8467c3c5fcb..693b552d8ed 100644 --- a/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -39,9 +39,9 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Arrays; /** Utility class to work with arrays. */ -public class BigArrays extends AbstractComponent { +public class BigArrays { - public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(ImmutableSettings.EMPTY, null, null); + public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(null, null); /** Page size in bytes: 16KB */ public static final int PAGE_SIZE_IN_BYTES = 1 << 14; @@ -366,13 +366,12 @@ public class BigArrays extends AbstractComponent { final boolean checkBreaker; @Inject - public BigArrays(Settings settings, PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) { + public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) { // Checking the breaker is disabled if not specified - this(settings, recycler, breakerService, false); + this(recycler, breakerService, false); } - public BigArrays(Settings settings, PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService, boolean checkBreaker) { - super(settings); + public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService, boolean checkBreaker) { this.checkBreaker = checkBreaker; this.recycler = recycler; this.breakerService = breakerService; @@ -415,7 +414,7 @@ public class BigArrays extends AbstractComponent { * explicitly enabled, instead of only accounting enabled */ public BigArrays withCircuitBreaking() { - return new BigArrays(this.settings, this.recycler, this.breakerService, true); + return new BigArrays(this.recycler, this.breakerService, true); } private T resizeInPlace(T array, long newSize) { diff --git a/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java b/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java index 586d78ce538..1ffbb8d9a0d 100644 --- a/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java +++ b/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java @@ -54,7 +54,7 @@ public class PagedBytesReferenceTest extends ElasticsearchTestCase { @Before public void setUp() throws Exception { super.setUp(); - bigarrays = new BigArrays(ImmutableSettings.EMPTY, null, new NoneCircuitBreakerService()); + bigarrays = new BigArrays(null, new NoneCircuitBreakerService()); } @Override diff --git a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 2013edb53a4..2e9497e5fd9 100644 --- a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -39,7 +39,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { public static BigArrays randombigArrays() { final PageCacheRecycler recycler = randomBoolean() ? null : ElasticsearchSingleNodeTest.getInstanceFromNode(PageCacheRecycler.class); - return new MockBigArrays(ImmutableSettings.EMPTY, recycler, new NoneCircuitBreakerService()); + return new MockBigArrays(recycler, new NoneCircuitBreakerService()); } private BigArrays bigArrays; @@ -339,7 +339,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1) .build(), new NodeSettingsService(ImmutableSettings.EMPTY)); - BigArrays bigArrays = new BigArrays(ImmutableSettings.EMPTY, null, hcbs).withCircuitBreaking(); + BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { create.invoke(bigArrays, size); @@ -359,7 +359,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize) .build(), new NodeSettingsService(ImmutableSettings.EMPTY)); - BigArrays bigArrays = new BigArrays(ImmutableSettings.EMPTY, null, hcbs).withCircuitBreaking(); + BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); BigArray array = (BigArray) create.invoke(bigArrays, size); diff --git a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java index 623ce887619..c626dc1c966 100644 --- a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java +++ b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java @@ -76,7 +76,7 @@ public class NettyHttpServerPipeliningTest extends ElasticsearchTestCase { networkService = new NetworkService(ImmutableSettings.EMPTY); threadPool = new ThreadPool("test"); mockPageCacheRecycler = new MockPageCacheRecycler(ImmutableSettings.EMPTY, threadPool); - bigArrays = new MockBigArrays(ImmutableSettings.EMPTY, mockPageCacheRecycler, new NoneCircuitBreakerService()); + bigArrays = new MockBigArrays(mockPageCacheRecycler, new NoneCircuitBreakerService()); } @After diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java index 557792fc3cf..08f86a7004e 100644 --- a/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java +++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; @@ -87,18 +86,16 @@ public class MockBigArrays extends BigArrays { } private final Random random; - private final Settings settings; private final PageCacheRecycler recycler; private final CircuitBreakerService breakerService; @Inject - public MockBigArrays(Settings settings, PageCacheRecycler recycler, CircuitBreakerService breakerService) { - this(settings, recycler, breakerService, false); + public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService) { + this(recycler, breakerService, false); } - public MockBigArrays(Settings settings, PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) { - super(settings, recycler, breakerService, checkBreaker); - this.settings = settings; + public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) { + super(recycler, breakerService, checkBreaker); this.recycler = recycler; this.breakerService = breakerService; long seed; @@ -114,7 +111,7 @@ public class MockBigArrays extends BigArrays { @Override public BigArrays withCircuitBreaking() { - return new MockBigArrays(this.settings, this.recycler, this.breakerService, true); + return new MockBigArrays(this.recycler, this.breakerService, true); } @Override diff --git a/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index 928cd2e7955..5a0e64d992c 100644 --- a/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -61,7 +61,7 @@ public class NettySizeHeaderFrameDecoderTests extends ElasticsearchTestCase { threadPool = new ThreadPool(settings, new NodeSettingsService(settings)); NetworkService networkService = new NetworkService(settings); - BigArrays bigArrays = new MockBigArrays(settings, new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT); nettyTransport.start(); TransportService transportService = new TransportService(nettyTransport, threadPool); diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 20a1731e0c9..aeb085a07b3 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -170,7 +170,7 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase { } private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) { - BigArrays bigArrays = new MockBigArrays(settings, new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); + BigArrays bigArrays = new MockBigArrays(new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT); nettyTransport.start(); From 26189ee2e62afd819b793f5e6f3a6f7e0382775b Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 24 Apr 2015 22:38:43 -0400 Subject: [PATCH 118/236] Rename helpers to follow naming conventions --- ...stHelpers.java => ReducerHelperTests.java} | 2 +- .../reducers/moving/avg/MovAvgTests.java | 31 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) rename src/test/java/org/elasticsearch/search/aggregations/reducers/{ReducerTestHelpers.java => ReducerHelperTests.java} (98%) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java similarity index 98% rename from src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java rename to src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java index 8496b93e7ea..0b0f720344f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerTestHelpers.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java @@ -33,7 +33,7 @@ import java.util.ArrayList; * Provides helper methods and classes for use in Reducer tests, such as creating mock histograms or computing * simple metrics */ -public class ReducerTestHelpers extends ElasticsearchTestCase { +public class ReducerHelperTests extends ElasticsearchTestCase { /** * Generates a mock histogram to use for testing. Each MockBucket holds a doc count, key and document values diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index eaedfe4e597..cd6ac6cf490 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.reducers.moving.avg; -import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.EvictingQueue; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -33,7 +32,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; import org.elasticsearch.search.aggregations.reducers.BucketHelpers; -import org.elasticsearch.search.aggregations.reducers.ReducerTestHelpers; +import org.elasticsearch.search.aggregations.reducers.ReducerHelperTests; import org.elasticsearch.search.aggregations.reducers.SimpleValue; import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; @@ -75,7 +74,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { static double beta; static BucketHelpers.GapPolicy gapPolicy; static ValuesSourceMetricsAggregationBuilder metric; - static List mockHisto; + static List mockHisto; static Map> testValues; @@ -124,7 +123,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; metric = randomMetric("the_metric", VALUE_FIELD); - mockHisto = ReducerTestHelpers.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); + mockHisto = ReducerHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); testValues = new HashMap<>(8); @@ -134,7 +133,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } } - for (ReducerTestHelpers.MockBucket mockBucket : mockHisto) { + for (ReducerHelperTests.MockBucket mockBucket : mockHisto) { for (double value : mockBucket.docValues) { builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() .field(INTERVAL_FIELD, mockBucket.key) @@ -166,7 +165,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { ArrayList values = new ArrayList<>(numBuckets); EvictingQueue window = EvictingQueue.create(windowSize); - for (ReducerTestHelpers.MockBucket mockBucket : mockHisto) { + for (ReducerHelperTests.MockBucket mockBucket : mockHisto) { double metricValue; double[] docValues = mockBucket.docValues; @@ -180,12 +179,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { // otherwise insert a zero instead of the true value metricValue = 0.0; } else { - metricValue = ReducerTestHelpers.calculateMetric(docValues, metric); + metricValue = ReducerHelperTests.calculateMetric(docValues, metric); } } else { // If this isn't a gap, or is a _count, just insert the value - metricValue = target.equals(MetricTarget.VALUE) ? ReducerTestHelpers.calculateMetric(docValues, metric) : mockBucket.count; + metricValue = target.equals(MetricTarget.VALUE) ? ReducerHelperTests.calculateMetric(docValues, metric) : mockBucket.count; } window.offer(metricValue); @@ -336,7 +335,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List expectedValues = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); - Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); Iterator expectedCountsIter = expectedCounts.iterator(); Iterator expectedValuesIter = expectedValues.iterator(); @@ -344,7 +343,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); Histogram.Bucket actual = actualIter.next(); - ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); Double expectedCount = expectedCountsIter.next(); Double expectedValue = expectedValuesIter.next(); @@ -388,7 +387,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List expectedValues = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); - Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); Iterator expectedCountsIter = expectedCounts.iterator(); Iterator expectedValuesIter = expectedValues.iterator(); @@ -396,7 +395,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); Histogram.Bucket actual = actualIter.next(); - ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); Double expectedCount = expectedCountsIter.next(); Double expectedValue = expectedValuesIter.next(); @@ -440,7 +439,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List expectedValues = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); - Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); Iterator expectedCountsIter = expectedCounts.iterator(); Iterator expectedValuesIter = expectedValues.iterator(); @@ -448,7 +447,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); Histogram.Bucket actual = actualIter.next(); - ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); Double expectedCount = expectedCountsIter.next(); Double expectedValue = expectedValuesIter.next(); @@ -492,7 +491,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List expectedValues = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); - Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); Iterator expectedCountsIter = expectedCounts.iterator(); Iterator expectedValuesIter = expectedValues.iterator(); @@ -500,7 +499,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); Histogram.Bucket actual = actualIter.next(); - ReducerTestHelpers.MockBucket expected = expectedBucketIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); Double expectedCount = expectedCountsIter.next(); Double expectedValue = expectedValuesIter.next(); From f7d4baacfb0dc571f2f215a8a41020cc2a4bde50 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 20 Apr 2015 11:31:01 +0200 Subject: [PATCH 119/236] Remove working directory This commit removes the working directory and its associated environment variable "WORK_DIR" --- bin/service.bat | 4 +-- config/elasticsearch.yml | 4 --- docs/reference/setup/as-a-service.asciidoc | 1 - pom.xml | 8 ----- .../org/elasticsearch/bootstrap/Security.java | 12 +------ .../org/elasticsearch/env/Environment.java | 31 ------------------- .../java/org/elasticsearch/node/Node.java | 5 ++- src/packaging/common/env/elasticsearch | 3 -- src/packaging/common/scripts/postrm | 7 ----- src/packaging/deb/init.d/elasticsearch | 7 ++--- .../deb/systemd/elasticsearch.service | 2 -- src/packaging/rpm/init.d/elasticsearch | 7 +---- .../rpm/systemd/elasticsearch.service | 3 +- .../packaging/scripts/30_deb_package.bats | 1 - .../packaging/scripts/40_rpm_package.bats | 1 - .../scripts/packaging_test_utils.bash | 2 -- 16 files changed, 8 insertions(+), 90 deletions(-) diff --git a/bin/service.bat b/bin/service.bat index 282483c3cb8..a20d6e252ad 100644 --- a/bin/service.bat +++ b/bin/service.bat @@ -137,13 +137,11 @@ set JVM_SS=256 if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data -if "%WORK_DIR%" == "" set WORK_DIR=%ES_HOME% - if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config if "%CONF_FILE%" == "" set CONF_FILE=%ES_HOME%\config\elasticsearch.yml -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.work="%WORK_DIR%";-Des.default.path.conf="%CONF_DIR%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.conf="%CONF_DIR%" set JVM_OPTS=%JAVA_OPTS: =;% diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index 35383a4c5ac..3384a5ee616 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -154,10 +154,6 @@ # #path.data: /path/to/data1,/path/to/data2 -# Path to temporary files: -# -#path.work: /path/to/work - # Path to log files: # #path.logs: /path/to/logs diff --git a/docs/reference/setup/as-a-service.asciidoc b/docs/reference/setup/as-a-service.asciidoc index c75e409c841..e325c1165f9 100644 --- a/docs/reference/setup/as-a-service.asciidoc +++ b/docs/reference/setup/as-a-service.asciidoc @@ -21,7 +21,6 @@ Each package features a configuration file, which allows you to set the followin `MAX_MAP_COUNT`:: Maximum number of memory map areas a process may have. If you use `mmapfs` as index store type, make sure this is set to a high value. For more information, check the https://github.com/torvalds/linux/blob/master/Documentation/sysctl/vm.txt[linux kernel documentation] about `max_map_count`. This is set via `sysctl` before starting elasticsearch. Defaults to `65535` `LOG_DIR`:: Log directory, defaults to `/var/log/elasticsearch` `DATA_DIR`:: Data directory, defaults to `/var/lib/elasticsearch` -`WORK_DIR`:: Work directory, defaults to `/tmp/elasticsearch` `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` and `logging.yml` files), defaults to `/etc/elasticsearch` `CONF_FILE`:: Path to configuration file, defaults to `/etc/elasticsearch/elasticsearch.yml` `ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"` diff --git a/pom.xml b/pom.xml index 3fca8ccce01..1a0213d32d1 100644 --- a/pom.xml +++ b/pom.xml @@ -57,7 +57,6 @@ /var/lib/elasticsearch elasticsearch elasticsearch - /tmp/elasticsearch /var/log/elasticsearch ${packaging.elasticsearch.home.dir}/plugins /var/run/elasticsearch @@ -1210,7 +1209,6 @@ ${packaging.elasticsearch.data.dir} ${packaging.elasticsearch.log.dir} - ${packaging.elasticsearch.work.dir} ${packaging.elasticsearch.plugins.dir} ${packaging.elasticsearch.pid.dir} @@ -1408,12 +1406,6 @@ ${packaging.elasticsearch.user} ${packaging.elasticsearch.group} - - ${packaging.elasticsearch.work.dir} - 755 - ${packaging.elasticsearch.user} - ${packaging.elasticsearch.group} - ${packaging.elasticsearch.plugins.dir} 755 diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 7ac7e3b5e95..afa8362771e 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -20,19 +20,11 @@ package org.elasticsearch.bootstrap; import com.google.common.io.ByteStreams; - import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.env.Environment; -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; +import java.io.*; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; @@ -83,8 +75,6 @@ class Security { paths.add(environment.configFile()); paths.add(environment.logsFile()); paths.add(environment.pluginsFile()); - paths.add(environment.workFile()); - paths.add(environment.workWithClusterFile()); for (Path path : environment.dataFiles()) { paths.add(path); } diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index b19407cf262..cab04792b5d 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -43,10 +43,6 @@ public class Environment { private final Path homeFile; - private final Path workFile; - - private final Path workWithClusterFile; - private final Path[] dataFiles; private final Path[] dataWithClusterFiles; @@ -97,13 +93,6 @@ public class Environment { pluginsFile = homeFile.resolve("plugins"); } - if (settings.get("path.work") != null) { - workFile = PathUtils.get(cleanPath(settings.get("path.work"))); - } else { - workFile = homeFile.resolve("work"); - } - workWithClusterFile = workFile.resolve(ClusterName.clusterNameFromSettings(settings).value()); - String[] dataPaths = settings.getAsArray("path.data"); if (dataPaths.length > 0) { dataFiles = new Path[dataPaths.length]; @@ -138,26 +127,6 @@ public class Environment { return homeFile; } - /** - * The work location, path to temp files. - * - * Note, currently, we don't use it in ES at all, we should strive to see if we can keep it like that, - * but if we do, we have the infra for it. - */ - public Path workFile() { - return workFile; - } - - /** - * The work location with the cluster name as a sub directory. - * - * Note, currently, we don't use it in ES at all, we should strive to see if we can keep it like that, - * but if we do, we have the infra for it. - */ - public Path workWithClusterFile() { - return workWithClusterFile; - } - /** * The data location. */ diff --git a/src/main/java/org/elasticsearch/node/Node.java b/src/main/java/org/elasticsearch/node/Node.java index bf3a81487b8..b3be8cd5a87 100644 --- a/src/main/java/org/elasticsearch/node/Node.java +++ b/src/main/java/org/elasticsearch/node/Node.java @@ -143,9 +143,8 @@ public class Node implements Releasable { if (logger.isDebugEnabled()) { Environment env = tuple.v2(); - logger.debug("using home [{}], config [{}], data [{}], logs [{}], work [{}], plugins [{}]", - env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(), - env.workFile(), env.pluginsFile()); + logger.debug("using home [{}], config [{}], data [{}], logs [{}], plugins [{}]", + env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(), env.pluginsFile()); } this.pluginsService = new PluginsService(tuple.v1(), tuple.v2()); diff --git a/src/packaging/common/env/elasticsearch b/src/packaging/common/env/elasticsearch index 9b3138bdd71..cdf05bb900a 100644 --- a/src/packaging/common/env/elasticsearch +++ b/src/packaging/common/env/elasticsearch @@ -17,9 +17,6 @@ # Elasticsearch logs directory #LOG_DIR=${packaging.elasticsearch.log.dir} -# Elasticsearch work directory -#WORK_DIR=${packaging.elasticsearch.work.dir} - # Elasticsearch PID directory #PID_DIR=${packaging.elasticsearch.pid.dir} diff --git a/src/packaging/common/scripts/postrm b/src/packaging/common/scripts/postrm index acff96cf8a2..1b44d3a8b38 100644 --- a/src/packaging/common/scripts/postrm +++ b/src/packaging/common/scripts/postrm @@ -54,7 +54,6 @@ esac ES_USER="${packaging.elasticsearch.user}" ES_GROUP="${packaging.elasticsearch.group}" LOG_DIR="${packaging.elasticsearch.log.dir}" -WORK_DIR="${packaging.elasticsearch.work.dir}" PLUGINS_DIR="${packaging.elasticsearch.plugins.dir}" PID_DIR="${packaging.elasticsearch.pid.dir}" DATA_DIR="${packaging.elasticsearch.data.dir}" @@ -89,12 +88,6 @@ if [ "$REMOVE_DIRS" = "true" ]; then echo " OK" fi - if [ -d "$WORK_DIR" ]; then - echo -n "Deleting work directory..." - rm -rf "$WORK_DIR" - echo " OK" - fi - if [ -d "$PLUGINS_DIR" ]; then echo -n "Deleting plugins directory..." rm -rf "$PLUGINS_DIR" diff --git a/src/packaging/deb/init.d/elasticsearch b/src/packaging/deb/init.d/elasticsearch index ce2c81dfb8a..336030310cc 100755 --- a/src/packaging/deb/init.d/elasticsearch +++ b/src/packaging/deb/init.d/elasticsearch @@ -82,9 +82,6 @@ LOG_DIR=/var/log/$NAME # Elasticsearch data directory DATA_DIR=/var/lib/$NAME -# Elasticsearch work directory -WORK_DIR=/tmp/$NAME - # Elasticsearch configuration directory CONF_DIR=/etc/$NAME @@ -107,7 +104,7 @@ fi # Define other required variables PID_FILE=/var/run/$NAME.pid DAEMON=$ES_HOME/bin/elasticsearch -DAEMON_OPTS="-d -p $PID_FILE --default.config=$CONF_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.work=$WORK_DIR --default.path.conf=$CONF_DIR" +DAEMON_OPTS="-d -p $PID_FILE --default.config=$CONF_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR" export ES_HEAP_SIZE export ES_HEAP_NEWSIZE @@ -150,7 +147,7 @@ case "$1" in fi # Prepare environment - mkdir -p "$LOG_DIR" "$DATA_DIR" "$WORK_DIR" && chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR" "$WORK_DIR" + mkdir -p "$LOG_DIR" "$DATA_DIR" && chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR" touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE" if [ -n "$MAX_OPEN_FILES" ]; then diff --git a/src/packaging/deb/systemd/elasticsearch.service b/src/packaging/deb/systemd/elasticsearch.service index 3a529989244..1945f9fb12a 100644 --- a/src/packaging/deb/systemd/elasticsearch.service +++ b/src/packaging/deb/systemd/elasticsearch.service @@ -9,7 +9,6 @@ Environment=CONF_FILE=${packaging.elasticsearch.conf.dir}/elasticsearch.yml Environment=ES_HOME=${packaging.elasticsearch.home.dir} Environment=LOG_DIR=${packaging.elasticsearch.log.dir} Environment=DATA_DIR=${packaging.elasticsearch.data.dir} -Environment=WORK_DIR=${packaging.elasticsearch.work.dir} Environment=CONF_DIR=${packaging.elasticsearch.conf.dir} EnvironmentFile=-${packaging.env.file} User=elasticsearch @@ -19,7 +18,6 @@ ExecStart=/usr/share/elasticsearch/bin/elasticsearch \ -Des.default.path.home=$ES_HOME \ -Des.default.path.logs=$LOG_DIR \ -Des.default.path.data=$DATA_DIR \ - -Des.default.path.work=$WORK_DIR \ -Des.default.path.conf=$CONF_DIR # See MAX_OPEN_FILES in sysconfig LimitNOFILE=65535 diff --git a/src/packaging/rpm/init.d/elasticsearch b/src/packaging/rpm/init.d/elasticsearch index c90ec5a1ba3..1b666c64578 100644 --- a/src/packaging/rpm/init.d/elasticsearch +++ b/src/packaging/rpm/init.d/elasticsearch @@ -39,7 +39,6 @@ MAX_OPEN_FILES=${packaging.os.max.open.files} MAX_MAP_COUNT=${packaging.os.max.map.count} LOG_DIR="${packaging.elasticsearch.log.dir}" DATA_DIR="${packaging.elasticsearch.data.dir}" -WORK_DIR="${packaging.elasticsearch.work.dir}" CONF_DIR="${packaging.elasticsearch.conf.dir}" CONF_FILE="${packaging.elasticsearch.conf.dir}/elasticsearch.yml" @@ -97,15 +96,11 @@ start() { if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi - if [ -n "$WORK_DIR" ]; then - mkdir -p "$WORK_DIR" - chown "$ES_USER":"$ES_GROUP" "$WORK_DIR" - fi export ES_GC_LOG_FILE echo -n $"Starting $prog: " # if not running, start it up here, usually something like "daemon $exec" - daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR + daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR retval=$? echo [ $retval -eq 0 ] && touch $lockfile diff --git a/src/packaging/rpm/systemd/elasticsearch.service b/src/packaging/rpm/systemd/elasticsearch.service index 3d2226313bf..f5bb286b8fa 100644 --- a/src/packaging/rpm/systemd/elasticsearch.service +++ b/src/packaging/rpm/systemd/elasticsearch.service @@ -8,13 +8,12 @@ Environment=CONF_FILE=${packaging.elasticsearch.conf.dir}/elasticsearch.yml Environment=ES_HOME=${packaging.elasticsearch.home.dir} Environment=LOG_DIR=${packaging.elasticsearch.log.dir} Environment=DATA_DIR=${packaging.elasticsearch.data.dir} -Environment=WORK_DIR=${packaging.elasticsearch.work.dir} Environment=CONF_DIR=${packaging.elasticsearch.conf.dir} EnvironmentFile=-${packaging.env.file} User=elasticsearch Group=elasticsearch PIDFile=/var/run/elasticsearch/elasticsearch.pid -ExecStart=/usr/share/elasticsearch/bin/elasticsearch -d -p /var/run/elasticsearch/elasticsearch.pid -Des.default.config=$CONF_FILE -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.work=$WORK_DIR -Des.default.path.conf=$CONF_DIR +ExecStart=/usr/share/elasticsearch/bin/elasticsearch -d -p /var/run/elasticsearch/elasticsearch.pid -Des.default.config=$CONF_FILE -Des.default.path.home=$ES_HOME -Des.default.path.logs=$LOG_DIR -Des.default.path.data=$DATA_DIR -Des.default.path.conf=$CONF_DIR # See MAX_OPEN_FILES in sysconfig LimitNOFILE=65535 # See MAX_LOCKED_MEMORY in sysconfig, use "infinity" when MAX_LOCKED_MEMORY=unlimited and using bootstrap.mlockall: true diff --git a/src/test/resources/packaging/scripts/30_deb_package.bats b/src/test/resources/packaging/scripts/30_deb_package.bats index 7130d275d8c..13f2d3275ea 100644 --- a/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/src/test/resources/packaging/scripts/30_deb_package.bats @@ -128,7 +128,6 @@ setup() { # Those directories are deleted when removing the package # see postrm file assert_file_not_exist "/var/log/elasticsearch" - assert_file_not_exist "/tmp/elasticsearch" assert_file_not_exist "/usr/share/elasticsearch/plugins" assert_file_not_exist "/var/run/elasticsearch" diff --git a/src/test/resources/packaging/scripts/40_rpm_package.bats b/src/test/resources/packaging/scripts/40_rpm_package.bats index 6be482867f9..af9692fa903 100644 --- a/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -126,7 +126,6 @@ setup() { # Those directories are deleted when removing the package # see postrm file assert_file_not_exist "/var/log/elasticsearch" - assert_file_not_exist "/tmp/elasticsearch" assert_file_not_exist "/usr/share/elasticsearch/plugins" assert_file_not_exist "/var/run/elasticsearch" diff --git a/src/test/resources/packaging/scripts/packaging_test_utils.bash b/src/test/resources/packaging/scripts/packaging_test_utils.bash index 6ef1874c4ab..0033e240ee5 100644 --- a/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -192,8 +192,6 @@ verify_package_installation() { assert_file "/var/lib/elasticsearch" d elasticsearch 755 # Log dir assert_file "/var/log/elasticsearch" d elasticsearch 755 - # Work dir - assert_file "/tmp/elasticsearch" d elasticsearch 755 # Plugins dir assert_file "/usr/share/elasticsearch/plugins" d elasticsearch 755 # PID dir From 2579cc31b1c2432c748ac8333dac97c33a67c9d0 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Sat, 25 Apr 2015 16:49:49 +0200 Subject: [PATCH 120/236] Docs: Note that include_in_parent/root does not apply to geo-shape fields Closes #10653 --- docs/reference/mapping/types/nested-type.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/mapping/types/nested-type.asciidoc b/docs/reference/mapping/types/nested-type.asciidoc index e2845bddf93..21b1110faf3 100644 --- a/docs/reference/mapping/types/nested-type.asciidoc +++ b/docs/reference/mapping/types/nested-type.asciidoc @@ -139,6 +139,10 @@ Nested fields may contain other nested fields. The `include_in_parent` object refers to the direct parent of the field, while the `include_in_root` parameter refers only to the topmost ``root'' object or document. +NOTE: The `include_in_parent` and `include_in_root` options do not apply +to <>, which are only ever +indexed inside the nested document. + Nested docs will automatically use the root doc `_all` field only. .Internal Implementation From 86c2c202fe558ec688e989e5507703a4b930ce35 Mon Sep 17 00:00:00 2001 From: Adam Mollenkopf Date: Mon, 20 Apr 2015 10:45:23 -0700 Subject: [PATCH 121/236] Docs: Update extendedstats-aggregation.asciidoc code snippet should show ExtendedStats, not Stats Closes #10683 --- .../aggregations/metrics/extendedstats-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc index f19b69ff89f..20d8db036d9 100644 --- a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -31,7 +31,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStat [source,java] -------------------------------------------------- // sr is here your SearchResponse object -Stats agg = sr.getAggregations().get("agg"); +ExtendedStats agg = sr.getAggregations().get("agg"); double min = agg.getMin(); double max = agg.getMax(); double avg = agg.getAvg(); From cf177c32d4e8ec8f984dd25da046b6bd1612a19f Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Sat, 25 Apr 2015 19:27:55 +0200 Subject: [PATCH 122/236] Docs: Fixed pattern-capture token filter example Closes #10690 --- .../tokenfilters/pattern-capture-tokenfilter.asciidoc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc index 4091296a76e..7c919b56b98 100644 --- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc @@ -82,7 +82,7 @@ curl -XPUT localhost:9200/test/ -d ' "type" : "pattern_capture", "preserve_original" : 1, "patterns" : [ - "(\\w+)", + "([^@]+)", "(\\p{L}+)", "(\\d+)", "@(.+)" @@ -108,9 +108,10 @@ When the above analyzer is used on an email address like: john-smith_123@foo-bar.com -------------------------------------------------- -it would produce the following tokens: [ `john-smith_123`, -`foo-bar.com`, `john`, `smith_123`, `smith`, `123`, `foo`, -`foo-bar.com`, `bar`, `com` ] +it would produce the following tokens: + + john-smith_123@foo-bar.com, john-smith_123, + john, smith, 123, foo-bar.com, foo, bar, com Multiple patterns are required to allow overlapping captures, but also means that patterns are less dense and easier to understand. From dce920b75f523fbec3ffe729225c0e388b95809a Mon Sep 17 00:00:00 2001 From: Mehdi Mollaverdi Date: Tue, 21 Apr 2015 09:47:36 +1000 Subject: [PATCH 123/236] Docs: The name of scroll ID attribute in the response is "_scroll_id" rather than "scroll_id" Closes #10691 --- docs/reference/search/request/scroll.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 8f33d1a6bd8..b96033719d2 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -49,7 +49,7 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' ' -------------------------------------------------- -The result from the above request includes a `scroll_id`, which should +The result from the above request includes a `_scroll_id`, which should be passed to the `scroll` API in order to retrieve the next batch of results. @@ -84,7 +84,7 @@ curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlB -------------------------------------------------- IMPORTANT: The initial search request and each subsequent scroll request -returns a new `scroll_id` -- only the most recent `scroll_id` should be +returns a new `_scroll_id` -- only the most recent `_scroll_id` should be used. NOTE: If the request specifies aggregations, only the initial search response From 768e1c20120b921090a7e9bc439aa42c49999bbb Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 26 Apr 2015 13:44:29 +0200 Subject: [PATCH 124/236] [TEST] Fix test to take care of refresh failures after successful commit --- .../index/engine/InternalEngineTests.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 5323e36b8c3..3673b7f889d 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.engine; +import com.carrotsearch.randomizedtesting.annotations.Repeat; +import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.ImmutableMap; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; @@ -1760,6 +1762,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertThat(topDocs.totalHits, equalTo(numDocs)); } engine.close(); + boolean recoveredButFailed = false; final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents @@ -1777,7 +1780,16 @@ public class InternalEngineTests extends ElasticsearchTestCase { started = true; break; } catch (EngineCreationFailureException ex) { - // skip + // sometimes we fail after we committed the recovered docs during the finaly refresh call + // that means hte index is consistent and recovered so we can't assert on the num recovered ops below. + try (IndexReader reader = DirectoryReader.open(directory.getDelegate())) { + if (reader.numDocs() == numDocs) { + recoveredButFailed = true; + break; + } else { + // skip - we just failed + } + } } } @@ -1796,8 +1808,10 @@ public class InternalEngineTests extends ElasticsearchTestCase { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } - TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); - assertEquals(numDocs, parser.recoveredOps.get()); + if (recoveredButFailed == false) { + TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); + assertEquals(numDocs, parser.recoveredOps.get()); + } } @Test From 2373c2b43c2258cc22f753877c54c3ac97dcd3c3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 26 Apr 2015 14:03:44 +0200 Subject: [PATCH 125/236] [TRANSLOG] Fail #snapshot if translog is closed If the translog is closed while a snapshot opertion is in progress we must fail the snapshot operation otherwise we end up in an endless loop. Closes #10807 --- .../index/translog/fs/FsTranslog.java | 24 ++++++++++++------- .../translog/AbstractSimpleTranslogTests.java | 11 +++++++++ 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 2479c1a0de2..757655fa248 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -50,6 +50,7 @@ import java.nio.file.*; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; @@ -93,7 +94,7 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog private final ApplySettings applySettings = new ApplySettings(); - + private final AtomicBoolean closed = new AtomicBoolean(false); @Inject public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, @@ -140,14 +141,16 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public void close() throws IOException { - if (indexSettingsService != null) { - indexSettingsService.removeListener(applySettings); - } - rwl.writeLock().lock(); - try { - IOUtils.close(this.trans, this.current); - } finally { - rwl.writeLock().unlock(); + if (closed.compareAndSet(false, true)) { + if (indexSettingsService != null) { + indexSettingsService.removeListener(applySettings); + } + rwl.writeLock().lock(); + try { + IOUtils.close(this.trans, this.current); + } finally { + rwl.writeLock().unlock(); + } } } @@ -355,6 +358,9 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public FsChannelSnapshot snapshot() throws TranslogException { while (true) { + if (closed.get()) { + throw new TranslogException(shardId, "translog is already closed"); + } FsChannelSnapshot snapshot = current.snapshot(); if (snapshot != null) { return snapshot; diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java index e24e992c5a3..9c1f628a71b 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -332,6 +332,17 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase snapshot.close(); } + public void testSnapshotOnClosedTranslog() throws IOException { + assertTrue(Files.exists(translogDir.resolve("translog-1"))); + translog.add(new Translog.Create("test", "1", new byte[]{1})); + translog.close(); + try { + Translog.Snapshot snapshot = translog.snapshot(); + } catch (TranslogException ex) { + assertEquals(ex.getMessage(), "translog is already closed"); + } + } + @Test public void deleteOnRollover() throws IOException { translog.add(new Translog.Create("test", "1", new byte[]{1})); From f87fb958302ac54541b49461d65edbfc32c76b9b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 26 Apr 2015 14:26:22 +0200 Subject: [PATCH 126/236] [TEST] fail if exception is not thrown --- .../index/translog/AbstractSimpleTranslogTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java index 9c1f628a71b..9b45c4de10b 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -338,6 +338,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.close(); try { Translog.Snapshot snapshot = translog.snapshot(); + fail("translog is closed"); } catch (TranslogException ex) { assertEquals(ex.getMessage(), "translog is already closed"); } From 2c510f0689b34e71db91f14fba3f03463c379e24 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 26 Apr 2015 15:13:06 +0200 Subject: [PATCH 127/236] Allow double-closing of FSTranslog the translog might be reused across engines which is currently a problem in the design such that we have to allow calls to `close` more than once. This moves the closed check for snapshot on the actual file to exit the loop. Relates to #10807 --- .../translog/fs/BufferingFsTranslogFile.java | 5 ++++ .../index/translog/fs/FsTranslog.java | 28 +++++++++---------- .../index/translog/fs/FsTranslogFile.java | 2 ++ .../translog/fs/SimpleFsTranslogFile.java | 6 ++++ .../translog/AbstractSimpleTranslogTests.java | 2 +- 5 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java index 6fb3988b829..ebd5e125353 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java @@ -239,6 +239,11 @@ public class BufferingFsTranslogFile implements FsTranslogFile { return channelReference.file(); } + @Override + public boolean closed() { + return this.closed.get(); + } + class WrapperOutputStream extends OutputStream { @Override diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 757655fa248..58ce5ab5807 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -94,8 +94,6 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog private final ApplySettings applySettings = new ApplySettings(); - private final AtomicBoolean closed = new AtomicBoolean(false); - @Inject public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, BigArrays bigArrays, ShardPath shardPath) throws IOException { @@ -141,16 +139,14 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public void close() throws IOException { - if (closed.compareAndSet(false, true)) { - if (indexSettingsService != null) { - indexSettingsService.removeListener(applySettings); - } - rwl.writeLock().lock(); - try { - IOUtils.close(this.trans, this.current); - } finally { - rwl.writeLock().unlock(); - } + if (indexSettingsService != null) { + indexSettingsService.removeListener(applySettings); + } + rwl.writeLock().lock(); + try { + IOUtils.close(this.trans, this.current); + } finally { + rwl.writeLock().unlock(); } } @@ -358,13 +354,15 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public FsChannelSnapshot snapshot() throws TranslogException { while (true) { - if (closed.get()) { - throw new TranslogException(shardId, "translog is already closed"); - } + FsTranslogFile current = this.current; FsChannelSnapshot snapshot = current.snapshot(); if (snapshot != null) { return snapshot; } + if (current.closed() && this.current == current) { + // check if we are closed and if we are still current - then this translog is closed and we can exit + throw new TranslogException(shardId, "current translog is already closed"); + } Thread.yield(); } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java index 7cfe8744660..a6539847c45 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java @@ -82,4 +82,6 @@ public interface FsTranslogFile extends Closeable { TranslogStream getStream(); public Path getPath(); + + public boolean closed(); } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java index d4d508b83e2..199847d0779 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java @@ -182,4 +182,10 @@ public class SimpleFsTranslogFile implements FsTranslogFile { public void updateBufferSize(int bufferSize) throws TranslogException { // nothing to do here... } + + @Override + public boolean closed() { + return this.closed.get(); + } + } diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java index 9b45c4de10b..1a5aa984455 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -340,7 +340,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase Translog.Snapshot snapshot = translog.snapshot(); fail("translog is closed"); } catch (TranslogException ex) { - assertEquals(ex.getMessage(), "translog is already closed"); + assertEquals(ex.getMessage(), "current translog is already closed"); } } From 7de8b7008e352f14056348988ac2f7f472d70dda Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Sun, 26 Apr 2015 15:51:52 +0200 Subject: [PATCH 128/236] Docs: Tidied docs for field-stats --- docs/reference/search/field-stats.asciidoc | 59 +++++++++++++++++----- 1 file changed, 46 insertions(+), 13 deletions(-) diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc index 65dc2c2a6bf..7cac3e7e5c3 100644 --- a/docs/reference/search/field-stats.asciidoc +++ b/docs/reference/search/field-stats.asciidoc @@ -25,27 +25,60 @@ curl -XGET "http://localhost:9200/index1,index2/_field_stats?fields=rating" Supported request options: -* `fields` - A list of fields to compute stats for. -* `level` - Defines if field stats should be returned on a per index level or on a cluster wide level. Valid values are - `indices` and `cluster`. Defaults to `cluster`. +[horizontal] +`fields`:: + +A list of fields to compute stats for. + +`level`:: + +Defines if field stats should be returned on a per index level or on a cluster +wide level. Valid values are `indices` and `cluster`. Defaults to `cluster`. === Field statistics The field stats api is supported on string based, number based and date based fields and can return the following statistics per field: -* `max_doc` - The total number of documents. -* `doc_count` - The number of documents that have at least one term for this field, or -1 if this measurement isn't available on one or more shards. -* `density` - The percentage of documents that have at least one value for this field. This is a derived statistic and is based on the `max_doc` and `doc_count`. -* `sum_doc_freq` - The sum of each term's document frequency in this field, or -1 if this measurement isn't available on one or more shards. - Document frequency is the number of documents containing a particular term. -* `sum_total_term_freq` - The sum of the term frequencies of all terms in this field across all documents, or -1 if this measurement isn't available on one or more shards. - Term frequency is the total number of occurrences of a term in a particular document and field. -* `min_value` - The lowest value in the field represented in a displayable form. -* `max_value` - The highest value in the field represented in a displayable form. +[horizontal] +`max_doc`:: -Note that for all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked +The total number of documents. + +`doc_count`:: + +The number of documents that have at least one term for this field, or -1 if +this measurement isn't available on one or more shards. + +`density`:: + +The percentage of documents that have at least one value for this field. This +is a derived statistic and is based on the `max_doc` and `doc_count`. + +`sum_doc_freq`:: + +The sum of each term's document frequency in this field, or -1 if this +measurement isn't available on one or more shards. Document frequency is the +number of documents containing a particular term. + +`sum_total_term_freq`:: + +The sum of the term frequencies of all terms in this field across all +documents, or `-1` if this measurement isn't available on one or more shards. +Term frequency is the total number of occurrences of a term in a particular +document and field. + +`min_value`:: + +The lowest value in the field represented in a displayable form. + +`max_value`:: + +The highest value in the field represented in a displayable form. + +NOTE: For all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked as deleted are are only taken into account when the segments these documents reside on are merged away. +[float] === Example [source,js] From 690c16e81a0f688d72dfd76af329582a4a7d400e Mon Sep 17 00:00:00 2001 From: Mark Mulder Date: Thu, 23 Apr 2015 14:09:03 +0200 Subject: [PATCH 129/236] Docs: Fix minor spelling mistakes in Match Query doc Closes #10751 --- docs/reference/query-dsl/queries/match-query.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/query-dsl/queries/match-query.asciidoc b/docs/reference/query-dsl/queries/match-query.asciidoc index e2a8178135c..2ebb8e934e2 100644 --- a/docs/reference/query-dsl/queries/match-query.asciidoc +++ b/docs/reference/query-dsl/queries/match-query.asciidoc @@ -87,10 +87,10 @@ if one of the low frequency (below the cutoff) terms in the case of an operator match. This query allows handling `stopwords` dynamically at runtime, is domain -independent and doesn't require on a stopword file. It prevent scoring / +independent and doesn't require a stopword file. It prevents scoring / iterating high frequency terms and only takes the terms into account if a -more significant / lower frequency terms match a document. Yet, if all of -the query terms are above the given `cutoff_frequency` the query is +more significant / lower frequency term matches a document. Yet, if all +of the query terms are above the given `cutoff_frequency` the query is automatically transformed into a pure conjunction (`and`) query to ensure fast execution. @@ -98,7 +98,7 @@ The `cutoff_frequency` can either be relative to the total number of documents if in the range `[0..1)` or absolute if greater or equal to `1.0`. -Here is an example showing a query composed of stopwords exclusivly: +Here is an example showing a query composed of stopwords exclusively: [source,js] -------------------------------------------------- From f1a0e2216ae2d52cf68a9a551cae8a37a93ef8c4 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Sun, 26 Apr 2015 17:30:38 +0200 Subject: [PATCH 130/236] Docs: Mentioned script_id and script_file parameters across all aggs Closes #10760 --- docs/reference/search/aggregations.asciidoc | 4 +++- .../aggregations/bucket/range-aggregation.asciidoc | 2 ++ .../aggregations/bucket/terms-aggregation.asciidoc | 3 +++ .../aggregations/metrics/avg-aggregation.asciidoc | 2 ++ .../metrics/cardinality-aggregation.asciidoc | 3 +++ .../metrics/extendedstats-aggregation.asciidoc | 2 ++ .../aggregations/metrics/max-aggregation.asciidoc | 1 + .../aggregations/metrics/min-aggregation.asciidoc | 1 + .../metrics/percentile-aggregation.asciidoc | 2 ++ .../metrics/percentile-rank-aggregation.asciidoc | 12 +++++++----- .../aggregations/metrics/stats-aggregation.asciidoc | 2 ++ .../aggregations/metrics/sum-aggregation.asciidoc | 2 ++ .../metrics/valuecount-aggregation.asciidoc | 3 +++ 13 files changed, 33 insertions(+), 6 deletions(-) diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc index e7803a27e9c..6733177b3a6 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/search/aggregations.asciidoc @@ -68,6 +68,8 @@ Some aggregations work on values extracted from the aggregated documents. Typica a specific document field which is set using the `field` key for the aggregations. It is also possible to define a <> which will generate the values (per document). +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + When both `field` and `script` settings are configured for the aggregation, the script will be treated as a `value script`. While normal scripts are evaluated on a document level (i.e. the script has access to all the data associated with the document), value scripts are evaluated on the *value* level. In this mode, the values are extracted @@ -129,7 +131,7 @@ See <> for more details. [float] === Returning only aggregation results -There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by +There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by setting `size=0`. For example: [source,js] diff --git a/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc index d2590579f87..f7bfcab0644 100644 --- a/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc @@ -128,6 +128,8 @@ It is also possible to customize the key for each range: ==== Script +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + [source,js] -------------------------------------------------- { diff --git a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc index d494c2ce271..12f04c0a2bb 100644 --- a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc @@ -441,6 +441,9 @@ Generating the terms using a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + + ==== Value Script [source,js] diff --git a/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc index 2e4d8747d03..3f029984ba8 100644 --- a/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc @@ -47,6 +47,8 @@ Computing the average grade based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new average: diff --git a/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc index 645cb46e631..d4d3ee67015 100644 --- a/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc @@ -152,3 +152,6 @@ however since hashes need to be computed on the fly. } } -------------------------------------------------- + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + diff --git a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc index 73340af8e67..07d25fac65d 100644 --- a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -91,6 +91,8 @@ Computing the grades stats based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats: diff --git a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc index 0a2481e6d2d..facefc1201d 100644 --- a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc @@ -44,6 +44,7 @@ Computing the max price value across all document, this time using a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. ==== Value Script diff --git a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc index 57dbceaa585..1383cc08322 100644 --- a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc @@ -44,6 +44,7 @@ Computing the min price value across all document, this time using a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. ==== Value Script diff --git a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc index bc5d6b3b560..253663a9cfe 100644 --- a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc @@ -113,6 +113,8 @@ a script to convert them on-the-fly: script to generate values which percentiles are calculated on <2> Scripting supports parameterized input just like any other script +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + [[search-aggregations-metrics-percentile-aggregation-approximation]] ==== Percentiles are (usually) approximate diff --git a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc index 0fd51f723b8..d327fc66303 100644 --- a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -8,16 +8,16 @@ be generated by a provided script. [NOTE] ================================================== -Please see <> -and <> for advice +Please see <> +and <> for advice regarding approximation and memory use of the percentile ranks aggregation ================================================== -Percentile rank show the percentage of observed values which are below certain +Percentile rank show the percentage of observed values which are below certain value. For example, if a value is greater than or equal to 95% of the observed values it is said to be at the 95th percentile rank. -Assume your data consists of website load times. You may have a service agreement that +Assume your data consists of website load times. You may have a service agreement that 95% of page loads completely within 15ms and 99% of page loads complete within 30ms. Let's look at a range of percentiles representing load time: @@ -55,7 +55,7 @@ The response will look like this: } -------------------------------------------------- -From this information you can determine you are hitting the 99% load time target but not quite +From this information you can determine you are hitting the 99% load time target but not quite hitting the 95% load time target @@ -84,3 +84,5 @@ a script to convert them on-the-fly: <1> The `field` parameter is replaced with a `script` parameter, which uses the script to generate values which percentile ranks are calculated on <2> Scripting supports parameterized input just like any other script + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. diff --git a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc index 486182c9ef6..7fbdecd6011 100644 --- a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc @@ -53,6 +53,8 @@ Computing the grades stats based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use a value script to get the new stats: diff --git a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc index 0c136490662..8857ff306ee 100644 --- a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc @@ -55,6 +55,8 @@ Computing the intraday return based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script Computing the sum of squares over all stock tick changes: diff --git a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc index 900b8bca83d..ed5e23ee339 100644 --- a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc @@ -34,6 +34,7 @@ The name of the aggregation (`grades_count` above) also serves as the key by whi retrieved from the returned response. ==== Script + Counting the values generated by a script: [source,js] @@ -46,3 +47,5 @@ Counting the values generated by a script: } } -------------------------------------------------- + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. From 37ed61807f5310ccd8a7e72ec7b5a117148f3acc Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Sun, 26 Apr 2015 18:49:15 +0200 Subject: [PATCH 131/236] Docs: Updated the experimental annotations in the docs as follows: * Removed the docs for `index.compound_format` and `index.compound_on_flush` - these are expert settings which should probably be removed (see https://github.com/elastic/elasticsearch/issues/10778) * Removed the docs for `index.index_concurrency` - another expert setting * Labelled the segments verbose output as experimental * Marked the `compression`, `precision_threshold` and `rehash` options as experimental in the cardinality and percentile aggs * Improved the experimental text on `significant_terms`, `execution_hint` in the terms agg, and `terminate_after` param on count and search * Removed the experimental flag on the `geobounds` agg * Marked the settings in the `merge` and `store` modules as experimental, rather than the modules themselves Closes #10782 --- docs/reference/index-modules.asciidoc | 25 +------------------ docs/reference/index-modules/merge.asciidoc | 8 +----- docs/reference/index-modules/store.asciidoc | 5 ++-- docs/reference/indices/segments.asciidoc | 4 +-- .../indices/update-settings.asciidoc | 15 ++--------- docs/reference/modules/scripting.asciidoc | 5 +--- .../significantterms-aggregation.asciidoc | 2 +- .../bucket/terms-aggregation.asciidoc | 2 +- .../metrics/cardinality-aggregation.asciidoc | 8 +++--- .../metrics/geobounds-aggregation.asciidoc | 2 -- .../metrics/percentile-aggregation.asciidoc | 2 +- docs/reference/search/count.asciidoc | 3 ++- docs/reference/search/request-body.asciidoc | 3 ++- docs/reference/search/uri-request.asciidoc | 3 ++- 14 files changed, 22 insertions(+), 65 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index eb85f8d231e..f74eda35bed 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -15,29 +15,6 @@ all the relevant modules settings can be provided when creating an index There are specific index level settings that are not associated with any specific module. These include: -[[index-compound-format]]`index.compound_format`:: - - experimental[] - Should the compound file format be used (boolean setting). - The compound format was created to reduce the number of open - file handles when using file based storage. However, by default it is set - to `false` as the non-compound format gives better performance. It is important - that OS is configured to give Elasticsearch ``enough'' file handles. - See <>. -+ -Alternatively, `compound_format` can be set to a number between `0` and -`1`, where `0` means `false`, `1` means `true` and a number inbetween -represents a percentage: if the merged segment is less than this -percentage of the total index, then it is written in compound format, -otherwise it is written in non-compound format. - -[[index-compound-on-flush]]`index.compound_on_flush`:: - - experimental[] - Should a new segment (create by indexing, not by merging) be written - in compound format or non-compound format? Defaults to `true`. - This is a dynamic setting. - `index.refresh_interval`:: A time setting controlling how often the refresh operation will be executed. Defaults to `1s`. Can be set to `-1` @@ -59,7 +36,7 @@ otherwise it is written in non-compound format. When `checksum`, check for physical corruption. When `true`, check for both physical and logical corruption. This is much more expensive in terms of CPU and memory usage. - When `fix`, check for both physical and logical corruption, and segments + When `fix`, check for both physical and logical corruption, and segments that were reported as corrupted will be automatically removed. Default value is `false`, which performs no checks. diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc index 3ad2dd5c0a8..036d2b8d16f 100644 --- a/docs/reference/index-modules/merge.asciidoc +++ b/docs/reference/index-modules/merge.asciidoc @@ -1,7 +1,7 @@ [[index-modules-merge]] == Merge -experimental[] +experimental[All of the settings exposed in the `merge` module are expert only and may be removed in the future] A shard in elasticsearch is a Lucene index, and a Lucene index is broken down into segments. Segments are internal storage elements in the index @@ -72,12 +72,6 @@ This policy has the following settings: Higher values favor selecting merges that reclaim deletions. A value of `0.0` means deletions don't impact merge selection. Defaults to `2.0`. -`index.compound_format`:: - - Should the index be stored in compound format or not. Defaults to `false`. - See <> in - <>. - For normal merging, this policy first computes a "budget" of how many segments are allowed to be in the index. If the index is over-budget, then the policy sorts segments by decreasing size (proportionally considering percent diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index b34536db811..12fcf0c3509 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -1,8 +1,6 @@ [[index-modules-store]] == Store -experimental[] - The store module allows you to control how index data is stored. The index can either be stored in-memory (no persistence) or on-disk @@ -20,6 +18,7 @@ heap space* using the "Memory" (see below) storage type. It translates to the fact that there is no need for extra large JVM heaps (with their own consequences) for storing the index in memory. +experimental[All of the settings exposed in the `store` module are expert only and may be removed in the future] [float] [[file-system]] @@ -28,7 +27,7 @@ own consequences) for storing the index in memory. File system based storage is the default storage used. There are different implementations or _storage types_. The best one for the operating environment will be automatically chosen: `mmapfs` on -Windows 64bit, `simplefs` on Windows 32bit, and `default` +Windows 64bit, `simplefs` on Windows 32bit, and `default` (hybrid `niofs` and `mmapfs`) for the rest. This can be overridden for all indices by adding this to the diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index f432697f54d..f2d51068a64 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -78,7 +78,7 @@ compound:: Whether the segment is stored in a compound file. When true, this To add additional information that can be used for debugging, use the `verbose` flag. -NOTE: The format of additional verbose information is experimental and can change at any time. +experimental[The format of the additional verbose information is experimental and can change at any time] [source,js] -------------------------------------------------- @@ -108,7 +108,7 @@ Response: }, ... ] - + } ... } diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 3810bfbdca0..d4888103eb2 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -12,7 +12,7 @@ of the request includes the updated settings, for example: { "index" : { "number_of_replicas" : 4 - } + } } -------------------------------------------------- @@ -25,7 +25,7 @@ curl -XPUT 'localhost:9200/my_index/_settings' -d ' { "index" : { "number_of_replicas" : 4 - } + } }' -------------------------------------------------- @@ -61,9 +61,6 @@ settings API: `index.refresh_interval`:: The async refresh interval of a shard. -`index.index_concurrency`:: - experimental[] Defaults to `8`. - `index.translog.flush_threshold_ops`:: When to flush based on operations. @@ -151,14 +148,6 @@ settings API: `index.translog.fs.type`:: experimental[] Either `simple` or `buffered` (default). -`index.compound_format`:: - experimental[] See <> in - <>. - -`index.compound_on_flush`:: - experimental[] See <> in - <>. - <>:: All the settings for slow log. diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 26b50c97ac1..2725e05a6e0 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -424,10 +424,7 @@ automatically loaded. [float] === Lucene Expressions Scripts -[WARNING] -======================== -This feature is *experimental* and subject to change in future versions. -======================== +experimental[The Lucene expressions module is undergoing significant development and the exposed functionality is likely to change in the future] Lucene's expressions module provides a mechanism to compile a `javascript` expression to bytecode. This allows very fast execution, diff --git a/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc index 9cf729765f9..1e329db1df4 100644 --- a/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc @@ -3,7 +3,7 @@ An aggregation that returns interesting or unusual occurrences of terms in a set. -experimental[] +experimental[The `significant_terms` aggregation can be very heavy when run on large indices. Work is in progress to provide more lightweight sampling techniques. As a result, the API for this feature may change in non-backwards compatible ways] .Example use cases: * Suggesting "H5N1" when users search for "bird flu" in text diff --git a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc index 12f04c0a2bb..58a6ca2449c 100644 --- a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc @@ -613,7 +613,7 @@ this would typically be too costly in terms of RAM. [[search-aggregations-bucket-terms-aggregation-execution-hint]] ==== Execution hint -experimental[] +experimental[The automated execution optimization is experimental, so this parameter is provided temporarily as a way to override the default behaviour] There are different mechanisms by which terms aggregations can be executed: diff --git a/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc index d4d3ee67015..07943a06c2d 100644 --- a/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc @@ -23,10 +23,10 @@ match a query: ==== Precision control -experimental[] - This aggregation also supports the `precision_threshold` and `rehash` options: +experimental[The `precision_threshold` and `rehash` options are specific to the current internal implementation of the `cardinality` agg, which may change in the future] + [source,js] -------------------------------------------------- { @@ -42,14 +42,14 @@ This aggregation also supports the `precision_threshold` and `rehash` options: } -------------------------------------------------- -<1> experimental[] The `precision_threshold` options allows to trade memory for accuracy, and +<1> The `precision_threshold` options allows to trade memory for accuracy, and defines a unique count below which counts are expected to be close to accurate. Above this value, counts might become a bit more fuzzy. The maximum supported value is 40000, thresholds above this number will have the same effect as a threshold of 40000. Default value depends on the number of parent aggregations that multiple create buckets (such as terms or histograms). -<2> experimental[] If you computed a hash on client-side, stored it into your documents and want +<2> If you computed a hash on client-side, stored it into your documents and want Elasticsearch to use them to compute counts using this hash function without rehashing values, it is possible to specify `rehash: false`. Default value is `true`. Please note that the hash must be indexed as a long when `rehash` is diff --git a/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc index 548436b93b6..ade59477ee3 100644 --- a/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-metrics-geobounds-aggregation]] === Geo Bounds Aggregation -experimental[] - A metric aggregation that computes the bounding box containing all geo_point values for a field. diff --git a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc index 253663a9cfe..6bd10110077 100644 --- a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc @@ -155,7 +155,7 @@ it. It would not be the case on more skewed distributions. [[search-aggregations-metrics-percentile-aggregation-compression]] ==== Compression -experimental[] +experimental[The `compression` parameter is specific to the current internal implementation of percentiles, and may change in the future] Approximate algorithms must balance memory utilization with estimation accuracy. This balance can be controlled using a `compression` parameter: diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index a04c3950987..258e3c94b40 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -64,7 +64,8 @@ query. |default_operator |The default operator to be used, can be `AND` or `OR`. Defaults to `OR`. -|terminate_after |experimental[] The maximum count for each shard, upon +|terminate_after |experimental[The API for this feature may change in the future] +The maximum count for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 4dbb7e006dc..fadfbb191f5 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -77,7 +77,8 @@ And here is a sample response: `terminate_after`:: - experimental[] The maximum number of documents to collect for each shard, + experimental[The API for this feature may change in the future] + The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. Defaults to no diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index b1509a3da68..a367dc679db 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -82,7 +82,8 @@ scores and return them as part of each hit. within the specified time value and bail with the hits accumulated up to that point when expired. Defaults to no timeout. -|`terminate_after` |experimental[] The maximum number of documents to collect for +|`terminate_after` |experimental[The API for this feature may change in the future] +The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. From f64739788baefb52d960cca0b6b8d26ec49966a5 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Sun, 26 Apr 2015 16:54:17 +0200 Subject: [PATCH 132/236] Build: Update package repositories when creating a release In order to automatically sign and and upload our debian and RPM packages, this commit incorporates signing into the build process and adds the necessary steps to the release process. In order to do this the pom.xml has been adapted and the RPM and jdeb maven plugins have been updated, so the packages are signed on build. However the repositories need to signed as well. Syncing the repos requires downloading the current repo, adding the new packages and syncing it back. The following environment variables are now required as part of the build * GPG_KEY_ID - the key ID of the key used for signing * GPG_PASSPHRASE - your GPG passphrase * S3_BUCKET_SYNC_TO: S3 bucket to sync new repo into The following environment variables are optional * S3_BUCKET_SYNC_FROM: S3 bucket to get existing packages from * GPG_KEYRING - home of gnupg, defaults to ~/.gnupg The following command line tools are needed * createrepo (creates RPM repositories) * expect (used by the maven rpm plugin) * apt-ftparchive (creates DEB repositories) * gpg (signs packages and repo files) * s3cmd (syncing between the different S3 buckets) The current approach would also work for users who want to run their own repositories, all they need to change are a couple of environment variables. Minor implementation detail: Right now the branch name is used as version for the repositories (like 1.4/1.5/1.6) - if we ever change our branch naming scheme, the script needs to be fixed. --- dev-tools/build_release.py | 38 ++++- dev-tools/build_repositories.sh | 247 ++++++++++++++++++++++++++++++++ pom.xml | 13 +- 3 files changed, 294 insertions(+), 4 deletions(-) create mode 100755 dev-tools/build_repositories.sh diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 345aaa5d5dd..02620c5ed34 100644 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -57,7 +57,9 @@ Once it's done it will print all the remaining steps. - Python 3k for script execution - Boto for S3 Upload ($ apt-get install python-boto) - RPM for RPM building ($ apt-get install rpm) - - S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) + - S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) + - GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING) + - S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM) """ env = os.environ @@ -246,10 +248,13 @@ def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None): print('Running Backwards compatibility tests against version [%s]' % (bwc_version)) run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version) run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"') - run_mvn('clean %s -DskipTests' % (target)) + gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true' % (target, env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE')) + if env.get('GPG_KEYRING'): + gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING') + run_mvn('clean %s -DskipTests %s' % (target, gpg_args)) success = False try: - run_mvn('-DskipTests rpm:rpm') + run_mvn('-DskipTests rpm:rpm %s' % (gpg_args)) success = True finally: if not success: @@ -502,6 +507,14 @@ def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=Tru # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact))) +def publish_repositories(version, dry_run=True): + if dry_run: + print('Skipping package repository update') + else: + print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version) + # src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this + run('dev-tools/build_repositories.sh %s', src_branch) + def print_sonatype_notice(): settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') if os.path.isfile(settings): @@ -536,6 +549,16 @@ def check_s3_credentials(): if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None): raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') +def check_gpg_credentials(): + if not env.get('GPG_KEY_ID', None) or not env.get('GPG_PASSPHRASE', None): + raise RuntimeError('Could not find "GPG_KEY_ID" / "GPG_PASSPHRASE" in the env variables please export in order to sign the packages (also make sure that GPG_KEYRING is set when not in ~/.gnupg)') + +def check_command_exists(name, cmd): + try: + subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + raise RuntimeError('Could not run command %s - please make sure it is installed' % (name)) + VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java' POM_FILE = 'pom.xml' @@ -628,9 +651,16 @@ if __name__ == '__main__': if os.path.exists(LOG): raise RuntimeError('please remove old release log %s first' % LOG) + + check_gpg_credentials() + check_command_exists('gpg', 'gpg --version') + check_command_exists('expect', 'expect -v') if not dry_run: check_s3_credentials() + check_command_exists('createrepo', 'createrepo --version') + check_command_exists('s3cmd', 's3cmd --version') + check_command_exists('apt-ftparchive', 'apt-ftparchive --version') print('WARNING: dryrun is set to "false" - this will push and publish the release') input('Press Enter to continue...') @@ -687,6 +717,8 @@ if __name__ == '__main__': merge_tag_push(remote, src_branch, release_version, dry_run) print(' publish artifacts to S3 -- dry_run: %s' % dry_run) publish_artifacts(artifacts_and_checksum, dry_run=dry_run) + print(' Updating package repositories -- dry_run: %s' % dry_run) + publish_repositories(src_branch, dry_run=dry_run) cherry_pick_command = '.' if version_head_hash: cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash) diff --git a/dev-tools/build_repositories.sh b/dev-tools/build_repositories.sh new file mode 100755 index 00000000000..a7c7dae9311 --- /dev/null +++ b/dev-tools/build_repositories.sh @@ -0,0 +1,247 @@ +#!/bin/bash + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + + +# This tool uploads the debian and RPM packages to the specified S3 buckets +# The packages get signed as well +# A requirement is the sync of the existing repository + +set -e + +################### +## environment variables +## +## required +## +## GPG_PASSPHRASE: Passphrase of your GPG key +## GPG_KEY_ID: Key id of your GPG key +## AWS_ACCESS_KEY_ID: AWS access key id +## AWS_SECRET_ACCESS_KEY: AWS secret access key +## S3_BUCKET_SYNC_TO Bucket to write packages to, defaults to packages.elasticsearch.org/elasticsearch +## +## +## optional +## +## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org/elasticsearch +## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging +## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/ +## +################### + + + +################### +## configuration +################### + +# No trailing slashes! +if [ -z $S3_BUCKET_SYNC_FROM ] ; then + S3_BUCKET_SYNC_FROM="packages.elasticsearch.org/elasticsearch" +fi +if [ ! -z $GPG_KEYRING ] ; then + GPG_HOMEDIR="--homedir ${GPG_KEYRING}" +fi + +################### +## parameters +################### + +# Must be major and minor version, i.e. 1.5 instead of 1.5.0 +version=$1 + +################### +## prerequisites +################### + +if [ "$#" != "1" ] || [ "x$1" == "x-h" ] || [ "x$1" == "x--help" ] ; then + echo "Usage: $0 version" + echo + echo " version: The elasticsearch major and minor version, i.e. 1.5" + exit +fi + +echo "Checking for correct environment" + +error="" + +if [ -z "$GPG_PASSPHRASE" ] ; then + echo "Environment variable GPG_PASSPHRASE is not set" + error="true" +fi + +if [ -z "$S3_BUCKET_SYNC_TO" ] ; then + echo "Environment variable S3_BUCKET_SYNC_TO is not set" + error="true" +fi + +if [ -z "$GPG_KEY_ID" ] ; then + echo "Environment variable GPG_KEY_ID is not set" + error="true" +fi + +if [ -z "$AWS_ACCESS_KEY_ID" ] ; then + echo "Environment variable AWS_ACCESS_KEY_ID is not set" + error="true" +fi + +if [ -z "$AWS_SECRET_ACCESS_KEY" ] ; then + echo "Environment variable AWS_SECRET_ACCESS_KEY is not set" + error="true" +fi + +if [ "x$error" == "xtrue" ] ; then + echo "Please set all of the above environment variables first. Exiting..." + exit +fi + +echo "Checking for available command line tools:" + +check_for_command() { + echo -n " $1" + if [ -z "`which $1`" ]; then + echo "NO" + error="true" + else + echo "ok" + fi +} + +error="" +check_for_command "createrepo" +check_for_command "s3cmd" +check_for_command "apt-ftparchive" +check_for_command "gpg" +check_for_command "expect" # needed for the RPM plugin + +if [ "x$error" == "xtrue" ] ; then + echo "Please install all of the above tools first. Exiting..." + exit +fi + +################### +## setup +################### +tempdir=`mktemp -d /tmp/elasticsearch-repo.XXXX` +mkdir -p $tempdir + +# create custom s3cmd conf, in case s3cmd does not support --aws-secret-key like on ubuntu +( cat < $tempdir/.s3cmd +s3cmd="s3cmd -c $tempdir/.s3cmd" + +################### +## RPM +################### + +centosdir=$tempdir/repository/elasticsearch/$version/centos +mkdir -p $centosdir + +echo "RPM: Syncing repository for version $version into $centosdir" +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/centos/ $centosdir + +rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm +echo "RPM: Copying $rpm into $centosdor" +cp $rpm $centosdir + +echo "RPM: Running createrepo in $centosdir" +createrepo --update $centosdir + +echo "RPM: Resigning repomd.xml" +rm -f $centosdir/repodata/repomd.xml.asc +gpg $GPG_HOMEDIR --passphrase "$GPG_PASSPHRASE" -a -b -o $centosdir/repodata/repomd.xml.asc $centosdir/repodata/repomd.xml + +echo "RPM: Syncing back repository for $version into S3 bucket $S3_BUCKET_SYNC_TO" +$s3cmd sync -P $centosdir/ s3://$S3_BUCKET_SYNC_TO/elasticsearch/$version/centos/ + +################### +## DEB +################### + +deb=target/releases/elasticsearch*.deb + +echo "DEB: Creating repository directory structure" + +if [ -z $tempdir ] ; then + echo "DEB: Could not create tempdir directory name, exiting" + exit +fi + +debbasedir=$tempdir/repository/elasticsearch/$version/debian +mkdir -p $debbasedir + + +echo "DEB: Syncing debian repository of version $version to $debbasedir" +# sync all former versions into directory +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/debian/ $debbasedir + +# create directories in case of a new release so that syncing did not create this structure +mkdir -p $debbasedir/dists/stable/main/binary-all +mkdir -p $debbasedir/dists/stable/main/binary-i386 +mkdir -p $debbasedir/dists/stable/main/binary-amd64 +mkdir -p $debbasedir/.cache +mkdir -p $debbasedir/pool/main + +# create elasticsearch-1.4.conf +( cat < $tempdir/elasticsearch-$version-releases.conf + +# create packages file using apt-ftparchive +mkdir -p $debbasedir/dists/stable/main/binary-all +mkdir -p $debbasedir/pool/main/e/elasticsearch + +echo "DEB: Copying $deb to elasticsearch repo directory" +cp $deb $debbasedir/pool/main/e/elasticsearch + +echo "DEB: Creating new Packages and Release files" +cd $debbasedir +apt-ftparchive packages pool > dists/stable/main/binary-all/Packages +cat dists/stable/main/binary-all/Packages | gzip -9 > dists/stable/main/binary-all/Packages.gz +cp dists/stable/main/binary-all/Packages* dists/stable/main/binary-i386/ +cp dists/stable/main/binary-all/Packages* dists/stable/main/binary-amd64/ +apt-ftparchive -c $tempdir/elasticsearch-$version-releases.conf release $debbasedir/dists/stable/ > $debbasedir/dists/stable/Release + +echo "DEB: Signing newly created release file at $debbasedir/dists/stable/Release.gpg" +rm -f $debbasedir/dists/stable/Release.gpg +gpg $GPG_HOMEDIR --passphrase "$GPG_PASSPHRASE" -a -b -o $debbasedir/dists/stable/Release.gpg $debbasedir/dists/stable/Release + +# upload to S3 +echo "DEB: Uploading to S3 bucket to $S3_BUCKET_SYNC_TO" +$s3cmd sync -P $debbasedir/ s3://$S3_BUCKET_SYNC_TO/elasticsearch/$version/debian/ + +# back to original dir +cd - + +# delete directories unless configured otherwise +if [ -z $KEEP_DIRECTORIES ] ; then + echo "Done! Deleting repository directories at $tempdir" + rm -fr $tempdir +else + echo "Done! Keeping repository directories at $tempdir" +fi diff --git a/pom.xml b/pom.xml index 1a0213d32d1..86dcb460825 100644 --- a/pom.xml +++ b/pom.xml @@ -60,7 +60,8 @@ /var/log/elasticsearch ${packaging.elasticsearch.home.dir}/plugins /var/run/elasticsearch - + false + dpkg-sig @@ -1081,6 +1082,11 @@ jdeb + ${deb.sign} + ${gpg.keyring} + ${gpg.key} + ${gpg.passphrase} + ${deb.sign.method} @@ -1245,6 +1251,11 @@ 755 root root + ${gpg.key} + ${gpg.keyring} + + ${gpg.passphrase} + From 9e81e4c09bdf2162dce758c993638e96d28e3b7e Mon Sep 17 00:00:00 2001 From: Christine Date: Mon, 20 Apr 2015 12:41:21 -0400 Subject: [PATCH 133/236] Docs: Update bool-filter.asciidoc from, to deprecated in favour of gt, lt Closes #10682 --- docs/reference/query-dsl/filters/bool-filter.asciidoc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/query-dsl/filters/bool-filter.asciidoc b/docs/reference/query-dsl/filters/bool-filter.asciidoc index c81350b032a..5f6b86021ab 100644 --- a/docs/reference/query-dsl/filters/bool-filter.asciidoc +++ b/docs/reference/query-dsl/filters/bool-filter.asciidoc @@ -12,8 +12,8 @@ accept a filter. { "filtered" : { "query" : { - "queryString" : { - "default_field" : "message", + "queryString" : { + "default_field" : "message", "query" : "elasticsearch" } }, @@ -24,7 +24,7 @@ accept a filter. }, "must_not" : { "range" : { - "age" : { "from" : 10, "to" : 20 } + "age" : { "gte" : 10, "lt" : 20 } } }, "should" : [ @@ -38,6 +38,6 @@ accept a filter. } } } -} +} -------------------------------------------------- From 84636557e14327448a80d70eb11662265d1e4eeb Mon Sep 17 00:00:00 2001 From: navins Date: Sun, 26 Apr 2015 00:14:57 +0800 Subject: [PATCH 134/236] Docs: correct three mis-match of brackets Closes #10806 --- docs/reference/query-dsl/queries/template-query.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/queries/template-query.asciidoc b/docs/reference/query-dsl/queries/template-query.asciidoc index 1b22b90066e..0eb43b3642c 100644 --- a/docs/reference/query-dsl/queries/template-query.asciidoc +++ b/docs/reference/query-dsl/queries/template-query.asciidoc @@ -12,7 +12,7 @@ GET /_search { "query": { "template": { - "query": { "match": { "text": "{{query_string}}" }}}, + "query": { "match": { "text": "{{query_string}}" }}, "params" : { "query_string" : "all about search" } @@ -45,7 +45,7 @@ GET /_search { "query": { "template": { - "query": "{ \"match\": { \"text\": \"{{query_string}}\" }}}", <1> + "query": "{ \"match\": { \"text\": \"{{query_string}}\" }}", <1> "params" : { "query_string" : "all about search" } @@ -85,7 +85,7 @@ Alternatively, you can register a query template in the special `.scripts` index ------------------------------------------ PUT /_search/template/my_template { - "template": { "match": { "text": "{{query_string}}" }}}, + "template": { "match": { "text": "{{query_string}}" }}, } ------------------------------------------ From fe331b57b795e287829ab1fe2a281622c459d62a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Sun, 26 Apr 2015 21:08:57 +0200 Subject: [PATCH 135/236] [TEST] Don't use extraFS files as legacy files in tests --- src/test/java/org/elasticsearch/index/store/StoreTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/index/store/StoreTest.java b/src/test/java/org/elasticsearch/index/store/StoreTest.java index 3a60b62ea3a..c4e7ae9a7ea 100644 --- a/src/test/java/org/elasticsearch/index/store/StoreTest.java +++ b/src/test/java/org/elasticsearch/index/store/StoreTest.java @@ -270,7 +270,7 @@ public class StoreTest extends ElasticsearchTestCase { Store.LegacyChecksums checksums = new Store.LegacyChecksums(); Map legacyMeta = new HashMap<>(); for (String file : store.directory().listAll()) { - if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { + if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) { continue; } BytesRef hash = new BytesRef(); From 91e2bb193ce0655ed4e80fede321abcba281caac Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Sun, 26 Apr 2015 21:31:36 +0200 Subject: [PATCH 136/236] TransportClient: Ensure netty I/O thread is not blocked Whenever a transport client executes a request, it uses a built-in RetryListener which tries to execute the request on another node. However, if a connection error occurs, the onFailure() callback of the listener is triggered, the netty I/O thread might still be used to whatever failure has been added. This commit offloads the onFailure handling to the generic thread pool. --- .../TransportClientNodesService.java | 40 +++++++++++++++---- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index cb237087886..ced572ecd15 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -39,9 +39,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -198,7 +200,7 @@ public class TransportClientNodesService extends AbstractComponent { ImmutableList nodes = this.nodes; ensureNodesAreAvailable(nodes); int index = getNodeNumber(); - RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); + RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index, threadPool, logger); DiscoveryNode node = nodes.get((index) % nodes.size()); try { callback.doWithNode(node, retryListener); @@ -212,15 +214,20 @@ public class TransportClientNodesService extends AbstractComponent { private final NodeListenerCallback callback; private final ActionListener listener; private final ImmutableList nodes; + private final ESLogger logger; private final int index; + private ThreadPool threadPool; private volatile int i; - public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, int index) { + public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, + int index, ThreadPool threadPool, ESLogger logger) { this.callback = callback; this.listener = listener; this.nodes = nodes; this.index = index; + this.threadPool = threadPool; + this.logger = logger; } @Override @@ -233,19 +240,38 @@ public class TransportClientNodesService extends AbstractComponent { if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { int i = ++this.i; if (i >= nodes.size()) { - listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); + runFailureInListenerThreadPool(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); } else { try { callback.doWithNode(nodes.get((index + i) % nodes.size()), this); - } catch(Throwable t) { - //this exception can't come from the TransportService as it doesn't throw exceptions at all - listener.onFailure(t); + } catch(final Throwable t) { + // this exception can't come from the TransportService as it doesn't throw exceptions at all + runFailureInListenerThreadPool(t); } } } else { - listener.onFailure(e); + runFailureInListenerThreadPool(e); } } + + // need to ensure to not block the netty I/O thread, in case of retry due to the node sampling + private void runFailureInListenerThreadPool(final Throwable t) { + threadPool.executor(ThreadPool.Names.LISTENER).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onFailure(t); + } + + @Override + public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Could not execute failure listener: [{}]", t, t.getMessage()); + } else { + logger.error("Could not execute failure listener: [{}]", t.getMessage()); + } + } + }); + } } public void close() { From d746e14cf3f6d9b25a2c6018eedca249555cf44b Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Sun, 26 Apr 2015 21:04:41 -0400 Subject: [PATCH 137/236] Add support for cluster state diffs Adds support for calculating and sending diffs instead of full cluster state of the most frequently changing elements - cluster state, meta data and routing table. Closes #6295 --- .../reroute/ClusterRerouteResponse.java | 2 +- .../cluster/state/ClusterStateResponse.java | 2 +- .../state/TransportClusterStateAction.java | 15 +- .../indices/alias/get/GetAliasesResponse.java | 2 +- .../indices/create/CreateIndexRequest.java | 10 +- .../admin/indices/get/GetIndexResponse.java | 6 +- .../mapping/get/GetMappingsResponse.java | 4 +- .../get/GetIndexTemplatesResponse.java | 2 +- .../template/put/PutIndexTemplateRequest.java | 10 +- .../cluster/AbstractDiffable.java | 108 +++ .../elasticsearch/cluster/ClusterState.java | 272 ++++++-- .../java/org/elasticsearch/cluster/Diff.java | 42 ++ .../org/elasticsearch/cluster/Diffable.java | 42 ++ .../elasticsearch/cluster/DiffableUtils.java | 283 ++++++++ ...ompatibleClusterStateVersionException.java | 35 + .../cluster/block/ClusterBlocks.java | 75 ++- .../cluster/metadata/AliasMetaData.java | 85 ++- .../cluster/metadata/IndexMetaData.java | 234 ++++--- .../metadata/IndexTemplateMetaData.java | 105 +-- .../cluster/metadata/MappingMetaData.java | 48 +- .../cluster/metadata/MetaData.java | 266 +++++--- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/RepositoriesMetaData.java | 236 +++---- .../cluster/metadata/RepositoryMetaData.java | 21 + .../cluster/metadata/RestoreMetaData.java | 220 +++--- .../cluster/metadata/SnapshotMetaData.java | 223 ++++--- .../cluster/node/DiscoveryNodes.java | 73 +- .../cluster/routing/IndexRoutingTable.java | 72 +- .../routing/IndexShardRoutingTable.java | 22 + .../cluster/routing/RoutingTable.java | 90 ++- .../service/InternalClusterService.java | 8 +- .../ClusterDynamicSettingsModule.java | 1 + .../common/io/stream/StreamableReader.java | 30 + .../common/io/stream/Writeable.java | 30 + .../elasticsearch/discovery/Discovery.java | 3 +- .../discovery/DiscoveryService.java | 5 +- .../discovery/DiscoverySettings.java | 13 + .../discovery/local/LocalDiscovery.java | 46 +- .../discovery/zen/ZenDiscovery.java | 8 +- .../publish/PublishClusterStateAction.java | 194 ++++-- .../org/elasticsearch/gateway/Gateway.java | 2 +- .../gateway/LocalAllocateDangledIndices.java | 2 +- .../TransportNodesListGatewayMetaState.java | 2 +- .../get/RestGetRepositoriesAction.java | 2 +- .../indices/get/RestGetIndicesAction.java | 2 +- .../warmer/get/RestGetWarmerAction.java | 2 +- .../search/warmer/IndexWarmersMetaData.java | 318 +++++---- .../ClusterStateDiffPublishingTests.java | 625 ++++++++++++++++++ .../cluster/ClusterStateDiffTests.java | 534 +++++++++++++++ .../ClusterSerializationTests.java | 2 +- .../cluster/serialization/DiffableTests.java | 127 ++++ .../common/xcontent/XContentTestUtils.java | 100 +++ .../discovery/ZenUnicastDiscoveryTests.java | 1 + .../discovery/zen/ZenDiscoveryTests.java | 10 +- .../timestamp/TimestampMappingTests.java | 12 +- .../store/IndicesStoreIntegrationTests.java | 7 + .../template/SimpleIndexTemplateTests.java | 1 + .../DedicatedClusterSnapshotRestoreTests.java | 218 +++--- .../test/ElasticsearchIntegrationTest.java | 38 +- .../test/ElasticsearchTestCase.java | 14 + 60 files changed, 3827 insertions(+), 1137 deletions(-) create mode 100644 src/main/java/org/elasticsearch/cluster/AbstractDiffable.java create mode 100644 src/main/java/org/elasticsearch/cluster/Diff.java create mode 100644 src/main/java/org/elasticsearch/cluster/Diffable.java create mode 100644 src/main/java/org/elasticsearch/cluster/DiffableUtils.java create mode 100644 src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java create mode 100644 src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java create mode 100644 src/main/java/org/elasticsearch/common/io/stream/Writeable.java create mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java create mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java create mode 100644 src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java create mode 100644 src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 79b31f620d5..28f9cb1db90 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -68,7 +68,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - ClusterState.Builder.writeTo(state, out); + state.writeTo(out); writeAcknowledged(out); RoutingExplanations.writeTo(explanations, out); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index 861a84a9e71..e9aa9b723fa 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -62,6 +62,6 @@ public class ClusterStateResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); - ClusterState.Builder.writeTo(clusterState, out); + clusterState.writeTo(out); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index fc1db98c35e..150a15eacfd 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.state; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -39,11 +37,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; - -import static com.google.common.collect.Lists.newArrayList; -import static org.elasticsearch.cluster.metadata.MetaData.lookupFactorySafe; - /** * */ @@ -84,6 +77,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); + builder.uuid(currentState.uuid()); if (request.nodes()) { builder.nodes(currentState.nodes()); } @@ -122,10 +116,9 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio } // Filter our metadata that shouldn't be returned by API - for(ObjectCursor type : currentState.metaData().customs().keys()) { - Custom.Factory factory = lookupFactorySafe(type.value); - if(!factory.context().contains(MetaData.XContentContext.API)) { - mdBuilder.removeCustom(type.value); + for(ObjectObjectCursor custom : currentState.metaData().customs()) { + if(!custom.value.context().contains(MetaData.XContentContext.API)) { + mdBuilder.removeCustom(custom.key); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 765a9395afc..106e864a367 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -74,7 +74,7 @@ public class GetAliasesResponse extends ActionResponse { out.writeString(entry.key); out.writeVInt(entry.value.size()); for (AliasMetaData aliasMetaData : entry.value) { - AliasMetaData.Builder.writeTo(aliasMetaData, out); + aliasMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 67a23d9675b..c2d379d71d0 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -405,11 +405,11 @@ public class CreateIndexRequest extends AcknowledgedRequest aliases((Map) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { found = true; try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -457,7 +457,7 @@ public class CreateIndexRequest extends AcknowledgedRequest int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); customs.put(type, customIndexMetaData); } int aliasesSize = in.readVInt(); @@ -481,7 +481,7 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeVInt(customs.size()); for (Map.Entry entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 35e6cfa4804..7080a694a11 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -134,7 +134,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -181,7 +181,7 @@ public class GetIndexResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor mappingEntry : indexEntry.value) { out.writeString(mappingEntry.key); - MappingMetaData.writeTo(mappingEntry.value, out); + mappingEntry.value.writeTo(out); } } out.writeVInt(aliases.size()); @@ -189,7 +189,7 @@ public class GetIndexResponse extends ActionResponse { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { - AliasMetaData.Builder.writeTo(aliasEntry, out); + aliasEntry.writeTo(out); } } out.writeVInt(settings.size()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index b27577f8da3..30e9e24c493 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -59,7 +59,7 @@ public class GetMappingsResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } @@ -75,7 +75,7 @@ public class GetMappingsResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor typeEntry : indexEntry.value) { out.writeString(typeEntry.key); - MappingMetaData.writeTo(typeEntry.value, out); + typeEntry.value.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 56de19872f2..2ce6d8d2c1a 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -60,7 +60,7 @@ public class GetIndexTemplatesResponse extends ActionResponse { super.writeTo(out); out.writeVInt(indexTemplates.size()); for (IndexTemplateMetaData indexTemplate : indexTemplates) { - IndexTemplateMetaData.Builder.writeTo(indexTemplate, out); + indexTemplate.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b728abf934e..608b7fa82b2 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -293,10 +293,10 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -441,7 +441,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java new file mode 100644 index 00000000000..4e6da2bd569 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamableReader; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or + * nothing is object remained the same. + */ +public abstract class AbstractDiffable> implements Diffable { + + @Override + public Diff diff(T previousState) { + if (this.get().equals(previousState)) { + return new CompleteDiff<>(); + } else { + return new CompleteDiff<>(get()); + } + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new CompleteDiff<>(this, in); + } + + public static > Diff readDiffFrom(StreamableReader reader, StreamInput in) throws IOException { + return new CompleteDiff(reader, in); + } + + private static class CompleteDiff> implements Diff { + + @Nullable + private final T part; + + /** + * Creates simple diff with changes + */ + public CompleteDiff(T part) { + this.part = part; + } + + /** + * Creates simple diff without changes + */ + public CompleteDiff() { + this.part = null; + } + + /** + * Read simple diff from the stream + */ + public CompleteDiff(StreamableReader reader, StreamInput in) throws IOException { + if (in.readBoolean()) { + this.part = reader.readFrom(in); + } else { + this.part = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (part != null) { + out.writeBoolean(true); + part.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public T apply(T part) { + if (this.part != null) { + return this.part; + } else { + return part; + } + } + } + + @SuppressWarnings("unchecked") + public T get() { + return (T) this; + } +} + diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index ef4d67740dc..b092a121c07 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -56,7 +57,9 @@ import java.util.Map; /** * */ -public class ClusterState implements ToXContent { +public class ClusterState implements ToXContent, Diffable { + + public static final ClusterState PROTO = builder(ClusterName.DEFAULT).build(); public static enum ClusterStateStatus { UNKNOWN((byte) 0), @@ -75,47 +78,43 @@ public class ClusterState implements ToXContent { } } - public interface Custom { + public interface Custom extends Diffable, ToXContent { - interface Factory { - - String type(); - - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customState, StreamOutput out) throws IOException; - - void toXContent(T customState, XContentBuilder builder, ToXContent.Params params); - } + String type(); } - private final static Map customFactories = new HashMap<>(); + private final static Map customPrototypes = new HashMap<>(); /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { + @SuppressWarnings("unchecked") + T proto = (T)customPrototypes.get(type); + if (proto == null) { + throw new ElasticsearchIllegalArgumentException("No custom state prototype registered for type [" + type + "]"); } - return factory; + return proto; } + public static final String UNKNOWN_UUID = "_na_"; public static final long UNKNOWN_VERSION = -1; private final long version; + private final String uuid; + private final RoutingTable routingTable; private final DiscoveryNodes nodes; @@ -128,17 +127,20 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; + private final boolean wasReadFromDiff; + // built on demand private volatile RoutingNodes routingNodes; private volatile ClusterStateStatus status; - public ClusterState(long version, ClusterState state) { - this(state.clusterName, version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs()); + public ClusterState(long version, String uuid, ClusterState state) { + this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); } - public ClusterState(ClusterName clusterName, long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs) { + public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { this.version = version; + this.uuid = uuid; this.clusterName = clusterName; this.metaData = metaData; this.routingTable = routingTable; @@ -146,6 +148,7 @@ public class ClusterState implements ToXContent { this.blocks = blocks; this.customs = customs; this.status = ClusterStateStatus.UNKNOWN; + this.wasReadFromDiff = wasReadFromDiff; } public ClusterStateStatus status() { @@ -165,6 +168,14 @@ public class ClusterState implements ToXContent { return version(); } + /** + * This uuid is automatically generated for for each version of cluster state. It is used to make sure that + * we are applying diffs to the right previous state. + */ + public String uuid() { + return this.uuid; + } + public DiscoveryNodes nodes() { return this.nodes; } @@ -217,6 +228,11 @@ public class ClusterState implements ToXContent { return this.clusterName; } + // Used for testing and logging to determine how this cluster state was send over the wire + boolean wasReadFromDiff() { + return wasReadFromDiff; + } + /** * Returns a built (on demand) routing nodes view of the routing table. NOTE, the routing nodes * are mutable, use them just for read operations @@ -232,6 +248,8 @@ public class ClusterState implements ToXContent { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("version: ").append(version).append("\n"); + sb.append("uuid: ").append(uuid).append("\n"); + sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); sb.append(nodes().prettyPrint()); sb.append(routingTable().prettyPrint()); @@ -303,14 +321,13 @@ public class ClusterState implements ToXContent { } } - - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); if (metrics.contains(Metric.VERSION)) { builder.field("version", version); + builder.field("uuid", uuid); } if (metrics.contains(Metric.MASTER_NODE)) { @@ -435,7 +452,7 @@ public class ClusterState implements ToXContent { for (ObjectObjectCursor cursor : metaData.customs()) { builder.startObject(cursor.key); - MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -474,7 +491,7 @@ public class ClusterState implements ToXContent { builder.startObject("nodes"); for (RoutingNode routingNode : readOnlyRoutingNodes()) { - builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); + builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); for (ShardRouting shardRouting : routingNode) { shardRouting.toXContent(builder, params); } @@ -487,7 +504,7 @@ public class ClusterState implements ToXContent { if (metrics.contains(Metric.CUSTOMS)) { for (ObjectObjectCursor cursor : customs) { builder.startObject(cursor.key); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -507,21 +524,25 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; private long version = 0; + private String uuid = UNKNOWN_UUID; private MetaData metaData = MetaData.EMPTY_META_DATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; + private boolean fromDiff; public Builder(ClusterState state) { this.clusterName = state.clusterName; this.version = state.version(); + this.uuid = state.uuid(); this.nodes = state.nodes(); this.routingTable = state.routingTable(); this.metaData = state.metaData(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); + this.fromDiff = false; } public Builder(ClusterName clusterName) { @@ -575,6 +596,17 @@ public class ClusterState implements ToXContent { return this; } + public Builder incrementVersion() { + this.version = version + 1; + this.uuid = UNKNOWN_UUID; + return this; + } + + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -589,13 +621,26 @@ public class ClusterState implements ToXContent { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + + public Builder fromDiff(boolean fromDiff) { + this.fromDiff = fromDiff; + return this; + } + public ClusterState build() { - return new ClusterState(clusterName, version, metaData, routingTable, nodes, blocks, customs.build()); + if (UNKNOWN_UUID.equals(uuid)) { + uuid = Strings.randomBase64UUID(); + } + return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), fromDiff); } public static byte[] toBytes(ClusterState state) throws IOException { BytesStreamOutput os = new BytesStreamOutput(); - writeTo(state, os); + state.writeTo(os); return os.bytes().toBytes(); } @@ -607,39 +652,152 @@ public class ClusterState implements ToXContent { return readFrom(new BytesStreamInput(data), localNode); } - public static void writeTo(ClusterState state, StreamOutput out) throws IOException { - state.clusterName.writeTo(out); - out.writeLong(state.version()); - MetaData.Builder.writeTo(state.metaData(), out); - RoutingTable.Builder.writeTo(state.routingTable(), out); - DiscoveryNodes.Builder.writeTo(state.nodes(), out); - ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out); - out.writeVInt(state.customs().size()); - for (ObjectObjectCursor cursor : state.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } - } - /** * @param in input stream * @param localNode used to set the local node in the cluster state. can be null. */ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - ClusterName clusterName = ClusterName.readClusterName(in); + return PROTO.readFrom(in, localNode); + } + + } + + @Override + public Diff diff(ClusterState previousState) { + return new ClusterStateDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new ClusterStateDiff(in, this); + } + + public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + ClusterName clusterName = ClusterName.readClusterName(in); + Builder builder = new Builder(clusterName); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.metaData = MetaData.Builder.readFrom(in); + builder.routingTable = RoutingTable.Builder.readFrom(in); + builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); + builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public ClusterState readFrom(StreamInput in) throws IOException { + return readFrom(in, nodes.localNode()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeLong(version); + out.writeString(uuid); + metaData.writeTo(out); + routingTable.writeTo(out); + nodes.writeTo(out); + blocks.writeTo(out); + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + + private static class ClusterStateDiff implements Diff { + + private final long toVersion; + + private final String fromUuid; + + private final String toUuid; + + private final ClusterName clusterName; + + private final Diff routingTable; + + private final Diff nodes; + + private final Diff metaData; + + private final Diff blocks; + + private final Diff> customs; + + public ClusterStateDiff(ClusterState before, ClusterState after) { + fromUuid = before.uuid; + toUuid = after.uuid; + toVersion = after.version; + clusterName = after.clusterName; + routingTable = after.routingTable.diff(before.routingTable); + nodes = after.nodes.diff(before.nodes); + metaData = after.metaData.diff(before.metaData); + blocks = after.blocks.diff(before.blocks); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { + clusterName = ClusterName.readClusterName(in); + fromUuid = in.readString(); + toUuid = in.readString(); + toVersion = in.readLong(); + routingTable = proto.routingTable.readDiffFrom(in); + nodes = proto.nodes.readDiffFrom(in); + metaData = proto.metaData.readDiffFrom(in); + blocks = proto.blocks.readDiffFrom(in); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeString(fromUuid); + out.writeString(toUuid); + out.writeLong(toVersion); + routingTable.writeTo(out); + nodes.writeTo(out); + metaData.writeTo(out); + blocks.writeTo(out); + customs.writeTo(out); + } + + @Override + public ClusterState apply(ClusterState state) { Builder builder = new Builder(clusterName); - builder.version = in.readLong(); - builder.metaData = MetaData.Builder.readFrom(in); - builder.routingTable = RoutingTable.Builder.readFrom(in); - builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); - builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (toUuid.equals(state.uuid)) { + // no need to read the rest - cluster state didn't change + return state; } + if (fromUuid.equals(state.uuid) == false) { + throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid); + } + builder.uuid(toUuid); + builder.version(toVersion); + builder.routingTable(routingTable.apply(state.routingTable)); + builder.nodes(nodes.apply(state.nodes)); + builder.metaData(metaData.apply(state.metaData)); + builder.blocks(blocks.apply(state.blocks)); + builder.customs(customs.apply(state.customs)); + builder.fromDiff(true); return builder.build(); } } + } diff --git a/src/main/java/org/elasticsearch/cluster/Diff.java b/src/main/java/org/elasticsearch/cluster/Diff.java new file mode 100644 index 00000000000..2e571f43bca --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/Diff.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Represents difference between states of cluster state parts + */ +public interface Diff { + + /** + * Applies difference to the specified part and retunrs the resulted part + */ + T apply(T part); + + /** + * Writes the differences into the output stream + * @param out + * @throws IOException + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/cluster/Diffable.java b/src/main/java/org/elasticsearch/cluster/Diffable.java new file mode 100644 index 00000000000..7ce60047a2b --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/Diffable.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Cluster state part, changes in which can be serialized + */ +public interface Diffable extends Writeable { + + /** + * Returns serializable object representing differences between this and previousState + */ + Diff diff(T previousState); + + /** + * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput + */ + Diff readDiffFrom(StreamInput in) throws IOException; + +} diff --git a/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java new file mode 100644 index 00000000000..4e912a34f97 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -0,0 +1,283 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; + +public final class DiffableUtils { + private DiffableUtils() { + } + + /** + * Calculates diff between two ImmutableOpenMaps of Diffable objects + */ + public static > Diff> diff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + return new ImmutableOpenMapDiff<>(before, after); + } + + /** + * Calculates diff between two ImmutableMaps of Diffable objects + */ + public static > Diff> diff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + return new ImmutableMapDiff<>(before, after); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableOpenMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's + * used in custom metadata deserialization. + */ + public interface KeyedReader { + + /** + * reads an object of the type T from the stream input + */ + T readFrom(StreamInput in, String key) throws IOException; + + /** + * reads an object that respresents differences between two objects with the type T from the stream input + */ + Diff readDiffFrom(StreamInput in, String key) throws IOException; + } + + /** + * Implementation of the KeyedReader that is using a prototype object for reading operations + * + * Note: this implementation is ignoring the key. + */ + public static class PrototypeReader> implements KeyedReader { + private T proto; + + public PrototypeReader(T proto) { + this.proto = proto; + } + + @Override + public T readFrom(StreamInput in, String key) throws IOException { + return proto.readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return proto.readDiffFrom(in); + } + } + + /** + * Represents differences between two ImmutableMaps of diffable objects + * + * @param the diffable object + */ + private static class ImmutableMapDiff> extends MapDiff> { + + protected ImmutableMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableMapDiff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + for (String key : before.keySet()) { + if (!after.containsKey(key)) { + deletes.add(key); + } + } + for (ImmutableMap.Entry partIter : after.entrySet()) { + T beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + adds.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart)); + } + } + } + + @Override + public ImmutableMap apply(ImmutableMap map) { + HashMap builder = newHashMap(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return ImmutableMap.copyOf(builder); + } + } + + /** + * Represents differences between two ImmutableOpenMap of diffable objects + * + * @param the diffable object + */ + private static class ImmutableOpenMapDiff> extends MapDiff> { + + protected ImmutableOpenMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + for (ObjectCursor key : before.keys()) { + if (!after.containsKey(key.value)) { + deletes.add(key.value); + } + } + for (ObjectObjectCursor partIter : after) { + T beforePart = before.get(partIter.key); + if (beforePart == null) { + adds.put(partIter.key, partIter.value); + } else if (partIter.value.equals(beforePart) == false) { + diffs.put(partIter.key, partIter.value.diff(beforePart)); + } + } + } + + @Override + public ImmutableOpenMap apply(ImmutableOpenMap map) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return builder.build(); + } + } + + /** + * Represents differences between two maps of diffable objects + * + * This class is used as base class for different map implementations + * + * @param the diffable object + */ + private static abstract class MapDiff, M> implements Diff { + + protected final List deletes; + protected final Map> diffs; + protected final Map adds; + + protected MapDiff() { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + } + + protected MapDiff(StreamInput in, KeyedReader reader) throws IOException { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + + int diffsCount = in.readVInt(); + for (int i = 0; i < diffsCount; i++) { + String key = in.readString(); + Diff diff = reader.readDiffFrom(in, key); + diffs.put(key, diff); + } + + int addsCount = in.readVInt(); + for (int i = 0; i < addsCount; i++) { + String key = in.readString(); + T part = reader.readFrom(in, key); + adds.put(key, part); + } + } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + + out.writeVInt(diffs.size()); + for (Map.Entry> entry : diffs.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + + out.writeVInt(adds.size()); + for (Map.Entry entry : adds.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java new file mode 100644 index 00000000000..92f5897bf2e --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.ElasticsearchException; + +/** + * Thrown by {@link Diffable#readDiffAndApply(org.elasticsearch.common.io.stream.StreamInput)} method + */ +public class IncompatibleClusterStateVersionException extends ElasticsearchException { + public IncompatibleClusterStateVersionException(String msg) { + super(msg); + } + + public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { + super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); + } +} diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index bb7d332de4f..95c0ba7127e 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,10 +37,12 @@ import java.util.Set; /** * Represents current cluster level blocks to block dirty operations done against the cluster. */ -public class ClusterBlocks { +public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.of(), ImmutableMap.>of()); + public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK; + private final ImmutableSet global; private final ImmutableMap> indicesBlocks; @@ -203,6 +206,43 @@ public class ClusterBlocks { return new ClusterBlockException(builder.build()); } + @Override + public void writeTo(StreamOutput out) throws IOException { + writeBlockSet(global, out); + out.writeVInt(indicesBlocks.size()); + for (Map.Entry> entry : indicesBlocks.entrySet()) { + out.writeString(entry.getKey()); + writeBlockSet(entry.getValue(), out); + } + } + + private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { + out.writeVInt(blocks.size()); + for (ClusterBlock block : blocks) { + block.writeTo(out); + } + } + + @Override + public ClusterBlocks readFrom(StreamInput in) throws IOException { + ImmutableSet global = readBlockSet(in); + ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); + int size = in.readVInt(); + for (int j = 0; j < size; j++) { + indicesBuilder.put(in.readString().intern(), readBlockSet(in)); + } + return new ClusterBlocks(global, indicesBuilder.build()); + } + + private static ImmutableSet readBlockSet(StreamInput in) throws IOException { + ImmutableSet.Builder builder = ImmutableSet.builder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.add(ClusterBlock.readClusterBlock(in)); + } + return builder.build(); + } + static class ImmutableLevelHolder { static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.of(), ImmutableMap.>of()); @@ -313,38 +353,7 @@ public class ClusterBlocks { } public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException { - ImmutableSet global = readBlockSet(in); - ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); - int size = in.readVInt(); - for (int j = 0; j < size; j++) { - indicesBuilder.put(in.readString().intern(), readBlockSet(in)); - } - return new ClusterBlocks(global, indicesBuilder.build()); - } - - public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException { - writeBlockSet(blocks.global(), out); - out.writeVInt(blocks.indices().size()); - for (Map.Entry> entry : blocks.indices().entrySet()) { - out.writeString(entry.getKey()); - writeBlockSet(entry.getValue(), out); - } - } - - private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { - out.writeVInt(blocks.size()); - for (ClusterBlock block : blocks) { - block.writeTo(out); - } - } - - private static ImmutableSet readBlockSet(StreamInput in) throws IOException { - ImmutableSet.Builder builder = ImmutableSet.builder(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.add(ClusterBlock.readClusterBlock(in)); - } - return builder.build(); + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 008935ec026..0f7e55c8087 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +39,9 @@ import java.util.Set; /** * */ -public class AliasMetaData { +public class AliasMetaData extends AbstractDiffable { + + public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); private final String alias; @@ -146,6 +149,48 @@ public class AliasMetaData { return result; } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(alias()); + if (filter() != null) { + out.writeBoolean(true); + filter.writeTo(out); + } else { + out.writeBoolean(false); + } + if (indexRouting() != null) { + out.writeBoolean(true); + out.writeString(indexRouting()); + } else { + out.writeBoolean(false); + } + if (searchRouting() != null) { + out.writeBoolean(true); + out.writeString(searchRouting()); + } else { + out.writeBoolean(false); + } + + } + + @Override + public AliasMetaData readFrom(StreamInput in) throws IOException { + String alias = in.readString(); + CompressedString filter = null; + if (in.readBoolean()) { + filter = CompressedString.readCompressedString(in); + } + String indexRouting = null; + if (in.readBoolean()) { + indexRouting = in.readString(); + } + String searchRouting = null; + if (in.readBoolean()) { + searchRouting = in.readString(); + } + return new AliasMetaData(alias, filter, indexRouting, searchRouting); + } + public static class Builder { private final String alias; @@ -294,44 +339,12 @@ public class AliasMetaData { return builder.build(); } - public static void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { - out.writeString(aliasMetaData.alias()); - if (aliasMetaData.filter() != null) { - out.writeBoolean(true); - aliasMetaData.filter.writeTo(out); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.indexRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.indexRouting()); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.searchRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.searchRouting()); - } else { - out.writeBoolean(false); - } - + public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { + aliasMetaData.writeTo(out); } public static AliasMetaData readFrom(StreamInput in) throws IOException { - String alias = in.readString(); - CompressedString filter = null; - if (in.readBoolean()) { - filter = CompressedString.readCompressedString(in); - } - String indexRouting = null; - if (in.readBoolean()) { - indexRouting = in.readString(); - } - String searchRouting = null; - if (in.readBoolean()) { - searchRouting = in.readString(); - } - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 1543151fad0..2005de524bd 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -25,6 +25,9 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; @@ -61,60 +64,54 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class IndexMetaData { +public class IndexMetaData implements Diffable { + public static final IndexMetaData PROTO = IndexMetaData.builder("") + .settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); - public interface Custom { + public interface Custom extends Diffable, ToXContent { String type(); - interface Factory { + Custom fromMap(Map map) throws IOException; - String type(); + Custom fromXContent(XContentParser parser) throws IOException; - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - T fromMap(Map map) throws IOException; - - T fromXContent(XContentParser parser) throws IOException; - - void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - /** - * Merges from first to second, with first being more important, i.e., if something exists in first and second, - * first will prevail. - */ - T merge(T first, T second); - } + /** + * Merges from this to another, with this being more important, i.e., if something exists in this and another, + * this will prevail. + */ + Custom mergeWith(Custom another); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.FACTORY); + registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new ElasticsearchIllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -453,7 +450,9 @@ public class IndexMetaData { if (state != that.state) { return false; } - + if (!customs.equals(that.customs)) { + return false; + } return true; } @@ -467,6 +466,126 @@ public class IndexMetaData { return result; } + @Override + public Diff diff(IndexMetaData previousState) { + return new IndexMetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new IndexMetaDataDiff(in); + } + + private static class IndexMetaDataDiff implements Diff { + + private final String index; + private final long version; + private final State state; + private final Settings settings; + private final Diff> mappings; + private final Diff> aliases; + private Diff> customs; + + public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { + index = after.index; + version = after.version; + state = after.state; + settings = after.settings; + mappings = DiffableUtils.diff(before.mappings, after.mappings); + aliases = DiffableUtils.diff(before.aliases, after.aliases); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public IndexMetaDataDiff(StreamInput in) throws IOException { + index = in.readString(); + version = in.readLong(); + state = State.fromId(in.readByte()); + settings = ImmutableSettings.readSettingsFromStream(in); + mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO); + aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id); + ImmutableSettings.writeSettingsToStream(settings, out); + mappings.writeTo(out); + aliases.writeTo(out); + customs.writeTo(out); + } + + @Override + public IndexMetaData apply(IndexMetaData part) { + Builder builder = builder(index); + builder.version(version); + builder.state(state); + builder.settings(settings); + builder.mappings.putAll(mappings.apply(part.mappings)); + builder.aliases.putAll(aliases.apply(part.aliases)); + builder.customs.putAll(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public IndexMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.version(in.readLong()); + builder.state(State.fromId(in.readByte())); + builder.settings(readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); + builder.putMapping(mappingMd); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id()); + writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectCursor cursor : mappings.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -662,7 +781,7 @@ public class IndexMetaData { for (ObjectObjectCursor cursor : indexMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -709,12 +828,13 @@ public class IndexMetaData { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -743,47 +863,7 @@ public class IndexMetaData { } public static IndexMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.version(in.readLong()); - builder.state(State.fromId(in.readByte())); - builder.settings(readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - MappingMetaData mappingMd = MappingMetaData.readFrom(in); - builder.putMapping(mappingMd); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException { - out.writeString(indexMetaData.index()); - out.writeLong(indexMetaData.version()); - out.writeByte(indexMetaData.state().id()); - writeSettingsToStream(indexMetaData.settings(), out); - out.writeVInt(indexMetaData.mappings().size()); - for (ObjectCursor cursor : indexMetaData.mappings().values()) { - MappingMetaData.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.aliases().size()); - for (ObjectCursor cursor : indexMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexMetaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 582e008550d..54150ee6a1e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.Sets; -import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; @@ -42,7 +42,9 @@ import java.util.Set; /** * */ -public class IndexTemplateMetaData { +public class IndexTemplateMetaData extends AbstractDiffable { + + public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); private final String name; @@ -161,11 +163,57 @@ public class IndexTemplateMetaData { return result; } + @Override + public IndexTemplateMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.order(in.readInt()); + builder.template(in.readString()); + builder.settings(ImmutableSettings.readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeInt(order); + out.writeString(template); + ImmutableSettings.writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectObjectCursor cursor : mappings) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings"); static { - VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet()); + VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); } private String name; @@ -305,7 +353,7 @@ public class IndexTemplateMetaData { for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -347,12 +395,13 @@ public class IndexTemplateMetaData { } } else { // check if its a custom index metadata - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(currentFieldName); - if (factory == null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + IndexMetaData.Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -401,47 +450,7 @@ public class IndexTemplateMetaData { } public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.order(in.readInt()); - builder.template(in.readString()); - builder.settings(ImmutableSettings.readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException { - out.writeString(indexTemplateMetaData.name()); - out.writeInt(indexTemplateMetaData.order()); - out.writeString(indexTemplateMetaData.template()); - ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out); - out.writeVInt(indexTemplateMetaData.mappings().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.mappings()) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - out.writeVInt(indexTemplateMetaData.aliases().size()); - for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexTemplateMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { - out.writeString(cursor.key); - IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index f2ace98caeb..0959a4612c1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,9 +19,11 @@ package org.elasticsearch.cluster.metadata; +import com.google.common.collect.Maps; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; @@ -39,14 +41,18 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; +import static com.google.common.collect.Maps.newHashMap; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Mapping configuration for a type. */ -public class MappingMetaData { +public class MappingMetaData extends AbstractDiffable { + + public static final MappingMetaData PROTO = new MappingMetaData(); public static class Id { @@ -318,6 +324,15 @@ public class MappingMetaData { initMappers(withoutType); } + private MappingMetaData() { + this.type = ""; + try { + this.source = new CompressedString(""); + } catch (IOException ex) { + throw new ElasticsearchIllegalStateException("Cannot create MappingMetaData prototype", ex); + } + } + private void initMappers(Map withoutType) { if (withoutType.containsKey("_id")) { String path = null; @@ -533,34 +548,35 @@ public class MappingMetaData { } } - public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws IOException { - out.writeString(mappingMd.type()); - mappingMd.source().writeTo(out); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type()); + source().writeTo(out); // id - if (mappingMd.id().hasPath()) { + if (id().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.id().path()); + out.writeString(id().path()); } else { out.writeBoolean(false); } // routing - out.writeBoolean(mappingMd.routing().required()); - if (mappingMd.routing().hasPath()) { + out.writeBoolean(routing().required()); + if (routing().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.routing().path()); + out.writeString(routing().path()); } else { out.writeBoolean(false); } // timestamp - out.writeBoolean(mappingMd.timestamp().enabled()); - out.writeOptionalString(mappingMd.timestamp().path()); - out.writeString(mappingMd.timestamp().format()); - out.writeOptionalString(mappingMd.timestamp().defaultTimestamp()); + out.writeBoolean(timestamp().enabled()); + out.writeOptionalString(timestamp().path()); + out.writeString(timestamp().format()); + out.writeOptionalString(timestamp().defaultTimestamp()); // TODO Remove the test in elasticsearch 2.0.0 if (out.getVersion().onOrAfter(Version.V_1_5_0)) { - out.writeOptionalBoolean(mappingMd.timestamp().ignoreMissing()); + out.writeOptionalBoolean(timestamp().ignoreMissing()); } - out.writeBoolean(mappingMd.hasParentField()); + out.writeBoolean(hasParentField()); } @Override @@ -589,7 +605,7 @@ public class MappingMetaData { return result; } - public static MappingMetaData readFrom(StreamInput in) throws IOException { + public MappingMetaData readFrom(StreamInput in) throws IOException { String type = in.readString(); CompressedString source = CompressedString.readCompressedString(in); // id diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 51793b1d27b..4f20e1212cc 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -26,7 +26,9 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.*; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.Nullable; @@ -56,7 +58,9 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class MetaData implements Iterable { +public class MetaData implements Iterable, Diffable { + + public static final MetaData PROTO = builder().build(); public static final String ALL = "_all"; @@ -68,60 +72,51 @@ public class MetaData implements Iterable { GATEWAY, /* Custom metadata should be stored as part of a snapshot */ - SNAPSHOT; + SNAPSHOT } public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); - public interface Custom { + public interface Custom extends Diffable, ToXContent { - abstract class Factory { + String type(); - public abstract String type(); + Custom fromXContent(XContentParser parser) throws IOException; - public abstract T readFrom(StreamInput in) throws IOException; - - public abstract void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - public abstract T fromXContent(XContentParser parser) throws IOException; - - public abstract void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - public EnumSet context() { - return API_ONLY; - } - } + EnumSet context(); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY); - registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY); - registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY); + registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO); + registerPrototype(SnapshotMetaData.TYPE, SnapshotMetaData.PROTO); + registerPrototype(RestoreMetaData.TYPE, RestoreMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new ElasticsearchIllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } @@ -645,14 +640,14 @@ public class MetaData implements Iterable { /** * Translates the provided indices or aliases, eventually containing wildcard expressions, into actual indices. * - * @param indicesOptions how the aliases or indices need to be resolved to concrete indices + * @param indicesOptions how the aliases or indices need to be resolved to concrete indices * @param aliasesOrIndices the aliases or indices to be resolved to concrete indices * @return the obtained concrete indices - * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options - * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options - * don't allow such a case. + * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options + * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options + * don't allow such a case. * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options don't allow such a case. + * indices options don't allow such a case. */ public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { @@ -1140,14 +1135,14 @@ public class MetaData implements Iterable { // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { if (!cursor.value.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; for (ObjectObjectCursor cursor : metaData2.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -1155,6 +1150,129 @@ public class MetaData implements Iterable { return true; } + @Override + public Diff diff(MetaData previousState) { + return new MetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new MetaDataDiff(in); + } + + private static class MetaDataDiff implements Diff { + + private long version; + + private String uuid; + + private Settings transientSettings; + private Settings persistentSettings; + private Diff> indices; + private Diff> templates; + private Diff> customs; + + + public MetaDataDiff(MetaData before, MetaData after) { + uuid = after.uuid; + version = after.version; + transientSettings = after.transientSettings; + persistentSettings = after.persistentSettings; + indices = DiffableUtils.diff(before.indices, after.indices); + templates = DiffableUtils.diff(before.templates, after.templates); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public MetaDataDiff(StreamInput in) throws IOException { + uuid = in.readString(); + version = in.readLong(); + transientSettings = ImmutableSettings.readSettingsFromStream(in); + persistentSettings = ImmutableSettings.readSettingsFromStream(in); + indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO); + templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uuid); + out.writeLong(version); + ImmutableSettings.writeSettingsToStream(transientSettings, out); + ImmutableSettings.writeSettingsToStream(persistentSettings, out); + indices.writeTo(out); + templates.writeTo(out); + customs.writeTo(out); + } + + @Override + public MetaData apply(MetaData part) { + Builder builder = builder(); + builder.uuid(uuid); + builder.version(version); + builder.transientSettings(transientSettings); + builder.persistentSettings(persistentSettings); + builder.indices(indices.apply(part.indices)); + builder.templates(templates.apply(part.templates)); + builder.customs(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public MetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.transientSettings(readSettingsFromStream(in)); + builder.persistentSettings(readSettingsFromStream(in)); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexMetaData.Builder.readFrom(in), false); + } + size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexTemplateMetaData.Builder.readFrom(in)); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeString(uuid); + writeSettingsToStream(transientSettings, out); + writeSettingsToStream(persistentSettings, out); + out.writeVInt(indices.size()); + for (IndexMetaData indexMetaData : this) { + indexMetaData.writeTo(out); + } + out.writeVInt(templates.size()); + for (ObjectCursor cursor : templates.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -1226,6 +1344,11 @@ public class MetaData implements Iterable { return this; } + public Builder indices(ImmutableOpenMap indices) { + this.indices.putAll(indices); + return this; + } + public Builder put(IndexTemplateMetaData.Builder template) { return put(template.build()); } @@ -1240,6 +1363,11 @@ public class MetaData implements Iterable { return this; } + public Builder templates(ImmutableOpenMap templates) { + this.templates.putAll(templates); + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -1254,6 +1382,11 @@ public class MetaData implements Iterable { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); @@ -1306,6 +1439,11 @@ public class MetaData implements Iterable { return this; } + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Builder generateUuidIfNeeded() { if (uuid.equals("_na_")) { uuid = Strings.randomBase64UUID(); @@ -1364,10 +1502,10 @@ public class MetaData implements Iterable { } for (ObjectObjectCursor cursor : metaData.customs()) { - Custom.Factory factory = lookupFactorySafe(cursor.key); - if (factory.context().contains(context)) { + Custom proto = lookupPrototypeSafe(cursor.key); + if (proto.context().contains(context)) { builder.startObject(cursor.key); - factory.toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -1411,12 +1549,13 @@ public class MetaData implements Iterable { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token.isValue()) { @@ -1431,46 +1570,7 @@ public class MetaData implements Iterable { } public static MetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - builder.uuid = in.readString(); - builder.transientSettings(readSettingsFromStream(in)); - builder.persistentSettings(readSettingsFromStream(in)); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexMetaData.Builder.readFrom(in), false); - } - size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexTemplateMetaData.Builder.readFrom(in)); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(MetaData metaData, StreamOutput out) throws IOException { - out.writeLong(metaData.version); - out.writeString(metaData.uuid); - writeSettingsToStream(metaData.transientSettings(), out); - writeSettingsToStream(metaData.persistentSettings(), out); - out.writeVInt(metaData.indices.size()); - for (IndexMetaData indexMetaData : metaData) { - IndexMetaData.Builder.writeTo(indexMetaData, out); - } - out.writeVInt(metaData.templates.size()); - for (ObjectCursor cursor : metaData.templates.values()) { - IndexTemplateMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(metaData.customs().size()); - for (ObjectObjectCursor cursor : metaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 09864fce908..d96377f8226 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -273,7 +273,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (existing == null) { customs.put(type, custom); } else { - IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom); + IndexMetaData.Custom merged = existing.mergeWith(custom); customs.put(type, merged); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 81b11fc14b1..51cd5db086b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; @@ -39,11 +41,11 @@ import java.util.Map; /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetaData implements MetaData.Custom { +public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "repositories"; - public static final Factory FACTORY = new Factory(); + public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); private final ImmutableList repositories; @@ -80,122 +82,132 @@ public class RepositoriesMetaData implements MetaData.Custom { return null; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoriesMetaData that = (RepositoriesMetaData) o; + + return repositories.equals(that.repositories); + + } + + @Override + public int hashCode() { + return repositories.hashCode(); + } + /** - * Repository metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; + /** + * {@inheritDoc} + */ + @Override + public Custom readFrom(StreamInput in) throws IOException { + RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; + for (int i = 0; i < repository.length; i++) { + repository[i] = RepositoryMetaData.readFrom(in); } + return new RepositoriesMetaData(repository); + } - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData readFrom(StreamInput in) throws IOException { - RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; - for (int i = 0; i < repository.length; i++) { - repository[i] = RepositoryMetaData.readFrom(in); - } - return new RepositoriesMetaData(repository); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.repositories().size()); - for (RepositoryMetaData repository : repositories.repositories()) { - repository.writeTo(out); - } - } - - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - List repository = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String name = parser.currentName(); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); - } - String type = null; - Settings settings = ImmutableSettings.EMPTY; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("type".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); - } - type = parser.text(); - } else if ("settings".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); - } - settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); - } - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); - } - } - if (type == null) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); - } - repository.add(new RepositoryMetaData(name, type, settings)); - } else { - throw new ElasticsearchParseException("failed to parse repositories"); - } - } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - for (RepositoryMetaData repository : customIndexMetaData.repositories()) { - toXContent(repository, builder, params); - } - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - - /** - * Serializes information about a single repository - * - * @param repository repository metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("type", repository.type()); - builder.startObject("settings"); - for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { - builder.field(settingEntry.getKey(), settingEntry.getValue()); - } - builder.endObject(); - - builder.endObject(); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(repositories.size()); + for (RepositoryMetaData repository : repositories) { + repository.writeTo(out); } } + /** + * {@inheritDoc} + */ + @Override + public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + List repository = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String name = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); + } + String type = null; + Settings settings = ImmutableSettings.EMPTY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("type".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); + } + type = parser.text(); + } else if ("settings".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); + } + settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); + } + } + if (type == null) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); + } + repository.add(new RepositoryMetaData(name, type, settings)); + } else { + throw new ElasticsearchParseException("failed to parse repositories"); + } + } + return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + for (RepositoryMetaData repository : repositories) { + toXContent(repository, builder, params); + } + return builder; + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + + /** + * Serializes information about a single repository + * + * @param repository repository metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public static void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("type", repository.type()); + builder.startObject("settings"); + for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { + builder.field(settingEntry.getKey(), settingEntry.getValue()); + } + builder.endObject(); + + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java index ea50b30ba88..a283f1f43c1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java @@ -99,4 +99,25 @@ public class RepositoryMetaData { out.writeString(type); ImmutableSettings.writeSettingsToStream(settings, out); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoryMetaData that = (RepositoryMetaData) o; + + if (!name.equals(that.name)) return false; + if (!type.equals(that.type)) return false; + return settings.equals(that.settings); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + settings.hashCode(); + return result; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 373d5ff858c..6dbde85c158 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -30,16 +31,17 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; /** * Meta data about restore processes that are currently executing */ -public class RestoreMetaData implements MetaData.Custom { +public class RestoreMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "restore"; - public static final Factory FACTORY = new Factory(); + public static final RestoreMetaData PROTO = new RestoreMetaData(); private final ImmutableList entries; @@ -395,124 +397,122 @@ public class RestoreMetaData implements MetaData.Custom { } /** - * Restore metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; - } - - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); - builder.put(shardId, shardState); - } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new RestoreMetaData(entries); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - shardEntry.getValue().writeTo(out); - } + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); + builder.put(shardId, shardState); } + entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); } + return new RestoreMetaData(entries); + } - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - /** - * Serializes single restore operation - * - * @param entry restore operation metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("snapshot", entry.snapshotId().getSnapshot()); - builder.field("repository", entry.snapshotId().getRepository()); - builder.field("state", entry.state()); - builder.startArray("indices"); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + shardEntry.getValue().writeTo(out); } - builder.endArray(); - builder.startArray("shards"); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardRestoreStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field("index", shardId.getIndex()); - builder.field("shard", shardId.getId()); - builder.field("state", status.state()); - } - builder.endObject(); - } - } - - builder.endArray(); - builder.endObject(); } } + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray("snapshots"); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + /** + * Serializes single restore operation + * + * @param entry restore operation metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("snapshot", entry.snapshotId().getSnapshot()); + builder.field("repository", entry.snapshotId().getRepository()); + builder.field("state", entry.state()); + builder.startArray("indices"); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.startArray("shards"); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardRestoreStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field("index", shardId.getIndex()); + builder.field("shard", shardId.getId()); + builder.field("state", status.state()); + } + builder.endObject(); + } + } + + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index b759fe5daeb..5010fcab5ac 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -22,6 +22,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -38,10 +41,10 @@ import static com.google.common.collect.Maps.newHashMap; /** * Meta data about snapshots that are currently executing */ -public class SnapshotMetaData implements MetaData.Custom { +public class SnapshotMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "snapshots"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotMetaData PROTO = new SnapshotMetaData(); @Override public boolean equals(Object o) { @@ -330,123 +333,123 @@ public class SnapshotMetaData implements MetaData.Custom { return null; } + @Override + public String type() { + return TYPE; + } - public static class Factory extends MetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public SnapshotMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - boolean includeGlobalState = in.readBoolean(); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - long startTime = in.readLong(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - String nodeId = in.readOptionalString(); - State shardState = State.fromValue(in.readByte()); - builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); - } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + @Override + public SnapshotMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + boolean includeGlobalState = in.readBoolean(); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new SnapshotMetaData(entries); - } - - @Override - public void writeTo(SnapshotMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeBoolean(entry.includeGlobalState()); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeLong(entry.startTime()); - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - out.writeOptionalString(shardEntry.getValue().nodeId()); - out.writeByte(shardEntry.getValue().state().value()); - } + long startTime = in.readLong(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + String nodeId = in.readOptionalString(); + State shardState = State.fromValue(in.readByte()); + builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } + entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); } + return new SnapshotMetaData(entries); + } - @Override - public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - static final class Fields { - static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); - static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); - static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); - static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); - static final XContentBuilderString STATE = new XContentBuilderString("state"); - static final XContentBuilderString INDICES = new XContentBuilderString("indices"); - static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); - static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); - static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString NODE = new XContentBuilderString("node"); - } - - @Override - public void toXContent(SnapshotMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeBoolean(entry.includeGlobalState()); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); - builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); - builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); - builder.field(Fields.STATE, entry.state()); - builder.startArray(Fields.INDICES); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeLong(entry.startTime()); + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + out.writeOptionalString(shardEntry.getValue().nodeId()); + out.writeByte(shardEntry.getValue().state().value()); } - builder.endArray(); - builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); - builder.startArray(Fields.SHARDS); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardSnapshotStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field(Fields.INDEX, shardId.getIndex()); - builder.field(Fields.SHARD, shardId.getId()); - builder.field(Fields.STATE, status.state()); - builder.field(Fields.NODE, status.nodeId()); - } - builder.endObject(); - } - } - builder.endArray(); - builder.endObject(); } } + @Override + public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + static final class Fields { + static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); + static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); + static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); + static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString STATE = new XContentBuilderString("state"); + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); + static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString INDEX = new XContentBuilderString("index"); + static final XContentBuilderString SHARD = new XContentBuilderString("shard"); + static final XContentBuilderString NODE = new XContentBuilderString("node"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray(Fields.SNAPSHOTS); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); + builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); + builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.STATE, entry.state()); + builder.startArray(Fields.INDICES); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); + builder.startArray(Fields.SHARDS); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardSnapshotStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field(Fields.INDEX, shardId.getIndex()); + builder.field(Fields.SHARD, shardId.getId()); + builder.field(Fields.STATE, status.state()); + builder.field(Fields.NODE, status.nodeId()); + } + builder.endObject(); + } + } + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 0a4986476e5..61b5b876536 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -45,9 +46,10 @@ import static com.google.common.collect.Lists.newArrayList; * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. */ -public class DiscoveryNodes implements Iterable { +public class DiscoveryNodes extends AbstractDiffable implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); + public static final DiscoveryNodes PROTO = EMPTY_NODES; private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; @@ -568,6 +570,44 @@ public class DiscoveryNodes implements Iterable { } } + public void writeTo(StreamOutput out) throws IOException { + if (masterNodeId == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(masterNodeId); + } + out.writeVInt(nodes.size()); + for (DiscoveryNode node : this) { + node.writeTo(out); + } + } + + public DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + Builder builder = new Builder(); + if (in.readBoolean()) { + builder.masterNodeId(in.readString()); + } + if (localNode != null) { + builder.localNodeId(localNode.id()); + } + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + DiscoveryNode node = DiscoveryNode.readNode(in); + if (localNode != null && node.id().equals(localNode.id())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + builder.put(node); + } + return builder.build(); + } + + @Override + public DiscoveryNodes readFrom(StreamInput in) throws IOException { + return readFrom(in, localNode()); + } + public static Builder builder() { return new Builder(); } @@ -632,37 +672,8 @@ public class DiscoveryNodes implements Iterable { return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion); } - public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException { - if (nodes.masterNodeId() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(nodes.masterNodeId); - } - out.writeVInt(nodes.size()); - for (DiscoveryNode node : nodes) { - node.writeTo(out); - } - } - public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - Builder builder = new Builder(); - if (in.readBoolean()) { - builder.masterNodeId(in.readString()); - } - if (localNode != null) { - builder.localNodeId(localNode.id()); - } - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - DiscoveryNode node = DiscoveryNode.readNode(in); - if (localNode != null && node.id().equals(localNode.id())) { - // reuse the same instance of our address and local node id for faster equality - node = localNode; - } - builder.put(node); - } - return builder.build(); + return PROTO.readFrom(in, localNode); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 5f0356d3572..239f5113781 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -56,7 +57,9 @@ import static com.google.common.collect.Lists.newArrayList; * represented as {@link ShardRouting}. *

    */ -public class IndexRoutingTable implements Iterable { +public class IndexRoutingTable extends AbstractDiffable implements Iterable { + + public static final IndexRoutingTable PROTO = builder("").build(); private final String index; private final ShardShuffler shuffler; @@ -315,9 +318,51 @@ public class IndexRoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexRoutingTable that = (IndexRoutingTable) o; + + if (!index.equals(that.index)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = index.hashCode(); + result = 31 * result + shards.hashCode(); + return result; + } + public void validate() throws RoutingValidationException { } + @Override + public IndexRoutingTable readFrom(StreamInput in) throws IOException { + String index = in.readString(); + Builder builder = new Builder(index); + + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shards.size()); + for (IndexShardRoutingTable indexShard : this) { + IndexShardRoutingTable.Builder.writeToThin(indexShard, out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -339,30 +384,7 @@ public class IndexRoutingTable implements Iterable { * @throws IOException if something happens during read */ public static IndexRoutingTable readFrom(StreamInput in) throws IOException { - String index = in.readString(); - Builder builder = new Builder(index); - - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); - } - - return builder.build(); - } - - /** - * Writes an {@link IndexRoutingTable} to a {@link StreamOutput}. - * - * @param index {@link IndexRoutingTable} to write - * @param out {@link StreamOutput} to write to - * @throws IOException if something happens during write - */ - public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException { - out.writeString(index.index()); - out.writeVInt(index.shards.size()); - for (IndexShardRoutingTable indexShard : index) { - IndexShardRoutingTable.Builder.writeToThin(indexShard, out); - } + return PROTO.readFrom(in); } /** diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 00e50b76129..2371b96f5b0 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -347,6 +347,28 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexShardRoutingTable that = (IndexShardRoutingTable) o; + + if (primaryAllocatedPostApi != that.primaryAllocatedPostApi) return false; + if (!shardId.equals(that.shardId)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = shardId.hashCode(); + result = 31 * result + shards.hashCode(); + result = 31 * result + (primaryAllocatedPostApi ? 1 : 0); + return result; + } + /** * Returns true iff all shards in the routing table are started otherwise false */ diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 9f1b5db6c6b..25a8bac2f88 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.google.common.collect.*; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,7 +44,9 @@ import static com.google.common.collect.Maps.newHashMap; * * @see IndexRoutingTable */ -public class RoutingTable implements Iterable { +public class RoutingTable implements Iterable, Diffable { + + public static RoutingTable PROTO = builder().build(); public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); @@ -254,6 +256,66 @@ public class RoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public Diff diff(RoutingTable previousState) { + return new RoutingTableDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new RoutingTableDiff(in); + } + + @Override + public RoutingTable readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); + builder.add(index); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeVInt(indicesRouting.size()); + for (IndexRoutingTable index : indicesRouting.values()) { + index.writeTo(out); + } + } + + private static class RoutingTableDiff implements Diff { + + private final long version; + + private final Diff> indicesRouting; + + public RoutingTableDiff(RoutingTable before, RoutingTable after) { + version = after.version; + indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting); + } + + public RoutingTableDiff(StreamInput in) throws IOException { + version = in.readLong(); + indicesRouting = DiffableUtils.readImmutableMapDiff(in, IndexRoutingTable.PROTO); + } + + @Override + public RoutingTable apply(RoutingTable part) { + return new RoutingTable(version, indicesRouting.apply(part.indicesRouting)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + indicesRouting.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -403,6 +465,11 @@ public class RoutingTable implements Iterable { return this; } + public Builder indicesRouting(ImmutableMap indicesRouting) { + this.indicesRouting.putAll(indicesRouting); + return this; + } + public Builder remove(String index) { indicesRouting.remove(index); return this; @@ -422,23 +489,7 @@ public class RoutingTable implements Iterable { } public static RoutingTable readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); - builder.add(index); - } - - return builder.build(); - } - - public static void writeTo(RoutingTable table, StreamOutput out) throws IOException { - out.writeLong(table.version); - out.writeVInt(table.indicesRouting.size()); - for (IndexRoutingTable index : table.indicesRouting.values()) { - IndexRoutingTable.Builder.writeTo(index, out); - } + return PROTO.readFrom(in); } } @@ -450,5 +501,4 @@ public class RoutingTable implements Iterable { return sb.toString(); } - } diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index eb527e07fe4..615c03003d4 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -402,7 +402,7 @@ public class InternalClusterService extends AbstractLifecycleComponent { + /** + * Reads a copy of an object with the same type form the stream input + * + * The caller object remains unchanged. + */ + T readFrom(StreamInput in) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java new file mode 100644 index 00000000000..9025315dc43 --- /dev/null +++ b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.io.stream; + +import java.io.IOException; + +public interface Writeable extends StreamableReader { + + /** + * Writes the current object into the output stream out + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/discovery/Discovery.java b/src/main/java/org/elasticsearch/discovery/Discovery.java index dfd51e6348f..36b8e5da6f5 100644 --- a/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -59,7 +60,7 @@ public interface Discovery extends LifecycleComponent { * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ - void publish(ClusterState clusterState, AckListener ackListener); + void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener); public static interface AckListener { void onNodeAck(DiscoveryNode node, @Nullable Throwable t); diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index f73f2bbb593..e6a3668921b 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -21,6 +21,7 @@ package org.elasticsearch.discovery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -132,9 +133,9 @@ public class DiscoveryService extends AbstractLifecycleComponent implem private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); + private volatile ClusterState lastProcessedClusterState; + @Inject public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) { @@ -274,7 +280,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { if (!master) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } @@ -287,7 +293,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } nodesToPublishTo.add(localDiscovery.localNode); } - publish(members, clusterState, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } } @@ -300,17 +306,47 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem return members.toArray(new LocalDiscovery[members.size()]); } - private void publish(LocalDiscovery[] members, ClusterState clusterState, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { + private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { try { // we do the marshaling intentionally, to check it works well... - final byte[] clusterStateBytes = Builder.toBytes(clusterState); + byte[] clusterStateBytes = null; + byte[] clusterStateDiffBytes = null; + ClusterState clusterState = clusterChangedEvent.state(); for (final LocalDiscovery discovery : members) { if (discovery.master) { continue; } - final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + ClusterState newNodeSpecificClusterState = null; + synchronized (this) { + // we do the marshaling intentionally, to check it works well... + // check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time + if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) { + // both conditions are true - which means we can try sending cluster state as diffs + if (clusterStateDiffBytes == null) { + Diff diff = clusterState.diff(clusterChangedEvent.previousState()); + BytesStreamOutput os = new BytesStreamOutput(); + diff.writeTo(os); + clusterStateDiffBytes = os.bytes().toBytes(); + } + try { + newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(new BytesStreamInput(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); + logger.debug("sending diff cluster state version with size {} to [{}]", clusterStateDiffBytes.length, discovery.localNode.getName()); + } catch (IncompatibleClusterStateVersionException ex) { + logger.warn("incompatible cluster state version - resending complete cluster state", ex); + } + } + if (newNodeSpecificClusterState == null) { + if (clusterStateBytes == null) { + clusterStateBytes = Builder.toBytes(clusterState); + } + newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + } + discovery.lastProcessedClusterState = newNodeSpecificClusterState; + } + final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; + nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); // ignore cluster state messages that do not include "me", not in the game yet... if (nodeSpecificClusterState.nodes().localNode() != null) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 9ad4945844a..8a6382dcf94 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -331,12 +331,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Override - public void publish(ClusterState clusterState, AckListener ackListener) { - if (!clusterState.getNodes().localNodeMaster()) { + public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { + if (!clusterChangedEvent.state().getNodes().localNodeMaster()) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } - nodesFD.updateNodesAndPing(clusterState); - publishClusterState.publish(clusterState, ackListener); + nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + publishClusterState.publish(clusterChangedEvent, ackListener); } /** diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index fd1ba85c25c..c4ad8895e79 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -21,8 +21,12 @@ package org.elasticsearch.discovery.zen.publish; import com.google.common.collect.Maps; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; @@ -40,10 +44,13 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; +import java.io.IOException; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; /** * @@ -83,73 +90,43 @@ public class PublishClusterStateAction extends AbstractComponent { transportService.removeHandler(ACTION_NAME); } - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { - Set nodesToPublishTo = new HashSet<>(clusterState.nodes().size()); + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { + Set nodesToPublishTo = new HashSet<>(clusterChangedEvent.state().nodes().size()); DiscoveryNode localNode = nodesProvider.nodes().localNode(); - for (final DiscoveryNode node : clusterState.nodes()) { + for (final DiscoveryNode node : clusterChangedEvent.state().nodes()) { if (node.equals(localNode)) { continue; } nodesToPublishTo.add(node); } - publish(clusterState, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(clusterChangedEvent, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } - private void publish(final ClusterState clusterState, final Set nodesToPublishTo, + private void publish(final ClusterChangedEvent clusterChangedEvent, final Set nodesToPublishTo, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { Map serializedStates = Maps.newHashMap(); + Map serializedDiffs = Maps.newHashMap(); + final ClusterState clusterState = clusterChangedEvent.state(); + final ClusterState previousState = clusterChangedEvent.previousState(); final AtomicBoolean timedOutWaitingForNodes = new AtomicBoolean(false); final TimeValue publishTimeout = discoverySettings.getPublishTimeout(); + final boolean sendFullVersion = !discoverySettings.getPublishDiff() || previousState == null; + Diff diff = null; for (final DiscoveryNode node : nodesToPublishTo) { // try and serialize the cluster state once (or per version), so we don't serialize it // per node when we send it over the wire, compress it while we are at it... - BytesReference bytes = serializedStates.get(node.version()); - if (bytes == null) { - try { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(clusterState, stream); - stream.close(); - bytes = bStream.bytes(); - serializedStates.put(node.version(), bytes); - } catch (Throwable e) { - logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); - publishResponseHandler.onFailure(node, e); - continue; + // we don't send full version if node didn't exist in the previous version of cluster state + if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) { + sendFullClusterState(clusterState, serializedStates, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + if (diff == null) { + diff = clusterState.diff(previousState); } - } - try { - TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); - // no need to put a timeout on the options here, because we want the response to eventually be received - // and not log an error if it arrives after the timeout - transportService.sendRequest(node, ACTION_NAME, - new BytesTransportRequest(bytes, node.version()), - options, // no need to compress, we already compressed the bytes - - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - - @Override - public void handleResponse(TransportResponse.Empty response) { - if (timedOutWaitingForNodes.get()) { - logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); - } - publishResponseHandler.onResponse(node); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("failed to send cluster state to {}", exp, node); - publishResponseHandler.onFailure(node, exp); - } - }); - } catch (Throwable t) { - logger.debug("error sending cluster state to {}", t, node); - publishResponseHandler.onFailure(node, t); + sendClusterStateDiff(clusterState, diff, serializedDiffs, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); } } @@ -171,7 +148,107 @@ public class PublishClusterStateAction extends AbstractComponent { } } + private void sendFullClusterState(ClusterState clusterState, @Nullable Map serializedStates, + DiscoveryNode node, AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = null; + if (serializedStates != null) { + bytes = serializedStates.get(node.version()); + } + if (bytes == null) { + try { + bytes = serializeFullClusterState(clusterState, node.version()); + if (serializedStates != null) { + serializedStates.put(node.version(), bytes); + } + } catch (Throwable e) { + logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, false); + } + + private void sendClusterStateDiff(ClusterState clusterState, Diff diff, Map serializedDiffs, DiscoveryNode node, + AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = serializedDiffs.get(node.version()); + if (bytes == null) { + try { + bytes = serializeDiffClusterState(diff, node.version()); + serializedDiffs.put(node.version(), bytes); + } catch (Throwable e) { + logger.warn("failed to serialize diff of cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, true); + } + + private void publishClusterStateToNode(final ClusterState clusterState, BytesReference bytes, + final DiscoveryNode node, final AtomicBoolean timedOutWaitingForNodes, + final TimeValue publishTimeout, + final BlockingClusterStatePublishResponseHandler publishResponseHandler, + final boolean sendDiffs) { + try { + TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); + // no need to put a timeout on the options here, because we want the response to eventually be received + // and not log an error if it arrives after the timeout + transportService.sendRequest(node, ACTION_NAME, + new BytesTransportRequest(bytes, node.version()), + options, // no need to compress, we already compressed the bytes + + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + + @Override + public void handleResponse(TransportResponse.Empty response) { + if (timedOutWaitingForNodes.get()) { + logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); + } + publishResponseHandler.onResponse(node); + } + + @Override + public void handleException(TransportException exp) { + if (sendDiffs && exp.unwrapCause() instanceof IncompatibleClusterStateVersionException) { + logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); + sendFullClusterState(clusterState, null, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + logger.debug("failed to send cluster state to {}", exp, node); + publishResponseHandler.onFailure(node, exp); + } + } + }); + } catch (Throwable t) { + logger.warn("error sending cluster state to {}", t, node); + publishResponseHandler.onFailure(node, t); + } + } + + public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(true); + clusterState.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + + public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(false); + diff.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + private class PublishClusterStateRequestHandler implements TransportRequestHandler { + private ClusterState lastSeenClusterState; @Override public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { @@ -183,11 +260,24 @@ public class PublishClusterStateAction extends AbstractComponent { in = request.bytes().streamInput(); } in.setVersion(request.version()); - ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); - clusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - logger.debug("received cluster state version {}", clusterState.version()); + synchronized (this) { + // If true we received full cluster state - otherwise diffs + if (in.readBoolean()) { + lastSeenClusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + logger.debug("received full cluster state version {} with size {}", lastSeenClusterState.version(), request.bytes().length()); + } else if (lastSeenClusterState != null) { + Diff diff = lastSeenClusterState.readDiffFrom(in); + lastSeenClusterState = diff.apply(lastSeenClusterState); + logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length()); + } else { + logger.debug("received diff for but don't have any local cluster state - requesting full state"); + throw new IncompatibleClusterStateVersionException("have no local cluster state"); + } + lastSeenClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); + } + try { - listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { + listener.onNewClusterState(lastSeenClusterState, new NewClusterStateListener.NewStateProcessed() { @Override public void onNewClusterStateProcessed() { try { @@ -207,7 +297,7 @@ public class PublishClusterStateAction extends AbstractComponent { } }); } catch (Exception e) { - logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version()); + logger.warn("unexpected error while processing cluster state version [{}]", e, lastSeenClusterState.version()); try { channel.sendResponse(e); } catch (Throwable e1) { diff --git a/src/main/java/org/elasticsearch/gateway/Gateway.java b/src/main/java/org/elasticsearch/gateway/Gateway.java index cd15bccdc4a..139b5763489 100644 --- a/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.indices.IndicesService; + import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 43dec7edb51..5538ef6d043 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -198,7 +198,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode.writeTo(out); out.writeVInt(indices.length); for (IndexMetaData indexMetaData : indices) { - IndexMetaData.Builder.writeTo(indexMetaData, out); + indexMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 77ab900ce90..d7334046de0 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -221,7 +221,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA out.writeBoolean(false); } else { out.writeBoolean(true); - MetaData.Builder.writeTo(metaData, out); + metaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java index be4e1b4e3f3..85b46925b5f 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java @@ -58,7 +58,7 @@ public class RestGetRepositoriesAction extends BaseRestHandler { public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { builder.startObject(); for (RepositoryMetaData repositoryMetaData : response.repositories()) { - RepositoriesMetaData.FACTORY.toXContent(repositoryMetaData, builder, request); + RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index dc800f37062..d2653bc1745 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -147,7 +147,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { - IndexWarmersMetaData.FACTORY.toXContent(warmer, builder, params); + IndexWarmersMetaData.toXContent(warmer, builder, params); } } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index 7023eecedd4..be83ccbe4b5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -72,7 +72,7 @@ public class RestGetWarmerAction extends BaseRestHandler { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { - IndexWarmersMetaData.FACTORY.toXContent(warmerEntry, builder, request); + IndexWarmersMetaData.toXContent(warmerEntry, builder, request); } builder.endObject(); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java index de56f823eac..ef1ef44ffb9 100644 --- a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java @@ -22,7 +22,9 @@ package org.elasticsearch.search.warmer; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -33,16 +35,33 @@ import org.elasticsearch.common.xcontent.*; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; /** */ -public class IndexWarmersMetaData implements IndexMetaData.Custom { +public class IndexWarmersMetaData extends AbstractDiffable implements IndexMetaData.Custom { public static final String TYPE = "warmers"; - public static final Factory FACTORY = new Factory(); + public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexWarmersMetaData that = (IndexWarmersMetaData) o; + + return entries.equals(that.entries); + + } + + @Override + public int hashCode() { + return entries.hashCode(); + } public static class Entry { private final String name; @@ -74,6 +93,29 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { public Boolean queryCache() { return this.queryCache; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Entry entry = (Entry) o; + + if (!name.equals(entry.name)) return false; + if (!Arrays.equals(types, entry.types)) return false; + if (!source.equals(entry.source)) return false; + return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + Arrays.hashCode(types); + result = 31 * result + source.hashCode(); + result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0); + return result; + } } private final ImmutableList entries; @@ -92,149 +134,143 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { return TYPE; } - public static class Factory implements IndexMetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; + @Override + public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + String name = in.readString(); + String[] types = in.readStringArray(); + BytesReference source = null; + if (in.readBoolean()) { + source = in.readBytesReference(); + } + Boolean queryCache; + queryCache = in.readOptionalBoolean(); + entries[i] = new Entry(name, types, queryCache, source); } + return new IndexWarmersMetaData(entries); + } - @Override - public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - String name = in.readString(); - String[] types = in.readStringArray(); - BytesReference source = null; - if (in.readBoolean()) { - source = in.readBytesReference(); - } - Boolean queryCache = null; - queryCache = in.readOptionalBoolean(); - entries[i] = new Entry(name, types, queryCache, source); - } - return new IndexWarmersMetaData(entries); - } - - @Override - public void writeTo(IndexWarmersMetaData warmers, StreamOutput out) throws IOException { - out.writeVInt(warmers.entries().size()); - for (Entry entry : warmers.entries()) { - out.writeString(entry.name()); - out.writeStringArray(entry.types()); - if (entry.source() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeBytesReference(entry.source()); - } - out.writeOptionalBoolean(entry.queryCache()); - } - } - - @Override - public IndexWarmersMetaData fromMap(Map map) throws IOException { - // if it starts with the type, remove it - if (map.size() == 1 && map.containsKey(TYPE)) { - map = (Map) map.values().iterator().next(); - } - XContentBuilder builder = XContentFactory.smileBuilder().map(map); - try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { - // move to START_OBJECT - parser.nextToken(); - return fromXContent(parser); - } - } - - @Override - public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { - // we get here after we are at warmers token - String currentFieldName = null; - XContentParser.Token token; - List entries = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - String name = currentFieldName; - List types = new ArrayList<>(2); - BytesReference source = null; - Boolean queryCache = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if ("types".equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - types.add(parser.text()); - } - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("source".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); - source = builder.bytes(); - } - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - if ("source".equals(currentFieldName)) { - source = new BytesArray(parser.binaryValue()); - } - } else if (token.isValue()) { - if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { - queryCache = parser.booleanValue(); - } - } - } - entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - @Override - public void toXContent(IndexWarmersMetaData warmers, XContentBuilder builder, ToXContent.Params params) throws IOException { - //No need, IndexMetaData already writes it - //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (Entry entry : warmers.entries()) { - toXContent(entry, builder, params); - } - //No need, IndexMetaData already writes it - //builder.endObject(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - boolean binary = params.paramAsBoolean("binary", false); - builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("types", entry.types()); - if (entry.queryCache() != null) { - builder.field("queryCache", entry.queryCache()); - } - builder.field("source"); - if (binary) { - builder.value(entry.source()); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries().size()); + for (Entry entry : entries()) { + out.writeString(entry.name()); + out.writeStringArray(entry.types()); + if (entry.source() == null) { + out.writeBoolean(false); } else { - Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); - builder.map(mapping); + out.writeBoolean(true); + out.writeBytesReference(entry.source()); } - builder.endObject(); - } - - @Override - public IndexWarmersMetaData merge(IndexWarmersMetaData first, IndexWarmersMetaData second) { - List entries = Lists.newArrayList(); - entries.addAll(first.entries()); - for (Entry secondEntry : second.entries()) { - boolean found = false; - for (Entry firstEntry : first.entries()) { - if (firstEntry.name().equals(secondEntry.name())) { - found = true; - break; - } - } - if (!found) { - entries.add(secondEntry); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + out.writeOptionalBoolean(entry.queryCache()); } } + + @Override + public IndexWarmersMetaData fromMap(Map map) throws IOException { + // if it starts with the type, remove it + if (map.size() == 1 && map.containsKey(TYPE)) { + map = (Map) map.values().iterator().next(); + } + XContentBuilder builder = XContentFactory.smileBuilder().map(map); + try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { + // move to START_OBJECT + parser.nextToken(); + return fromXContent(parser); + } + } + + @Override + public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { + // we get here after we are at warmers token + String currentFieldName = null; + XContentParser.Token token; + List entries = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + String name = currentFieldName; + List types = new ArrayList<>(2); + BytesReference source = null; + Boolean queryCache = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if ("types".equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + types.add(parser.text()); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + if ("source".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); + source = builder.bytes(); + } + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + if ("source".equals(currentFieldName)) { + source = new BytesArray(parser.binaryValue()); + } + } else if (token.isValue()) { + if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { + queryCache = parser.booleanValue(); + } + } + } + entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + //No need, IndexMetaData already writes it + //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); + for (Entry entry : entries()) { + toXContent(entry, builder, params); + } + //No need, IndexMetaData already writes it + //builder.endObject(); + return builder; + } + + public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + boolean binary = params.paramAsBoolean("binary", false); + builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("types", entry.types()); + if (entry.queryCache() != null) { + builder.field("queryCache", entry.queryCache()); + } + builder.field("source"); + if (binary) { + builder.value(entry.source()); + } else { + Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); + builder.map(mapping); + } + builder.endObject(); + } + + @Override + public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) { + IndexWarmersMetaData second = (IndexWarmersMetaData) other; + List entries = Lists.newArrayList(); + entries.addAll(entries()); + for (Entry secondEntry : second.entries()) { + boolean found = false; + for (Entry firstEntry : entries()) { + if (firstEntry.name().equals(secondEntry.name())) { + found = true; + break; + } + } + if (!found) { + entries.add(secondEntry); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } } diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java new file mode 100644 index 00000000000..33008fd63d2 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java @@ -0,0 +1,625 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.Matchers.*; + +public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { + + protected ThreadPool threadPool; + protected Map nodes = newHashMap(); + + public static class MockNode { + public final DiscoveryNode discoveryNode; + public final MockTransportService service; + public final PublishClusterStateAction action; + public final MockDiscoveryNodesProvider nodesProvider; + + public MockNode(DiscoveryNode discoveryNode, MockTransportService service, PublishClusterStateAction action, MockDiscoveryNodesProvider nodesProvider) { + this.discoveryNode = discoveryNode; + this.service = service; + this.action = action; + this.nodesProvider = nodesProvider; + } + + public void connectTo(DiscoveryNode node) { + service.connectToNode(node); + nodesProvider.addNode(node); + } + } + + public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception { + return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + } + + public MockNode createMockNode(String name, Settings settings, Version version, PublishClusterStateAction.NewClusterStateListener listener) throws Exception { + MockTransportService service = buildTransportService( + ImmutableSettings.builder().put(settings).put("name", name, TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + version + ); + DiscoveryNode discoveryNode = new DiscoveryNode(name, name, service.boundAddress().publishAddress(), ImmutableMap.of(), version); + MockDiscoveryNodesProvider nodesProvider = new MockDiscoveryNodesProvider(discoveryNode); + PublishClusterStateAction action = buildPublishClusterStateAction(settings, service, nodesProvider, listener); + MockNode node = new MockNode(discoveryNode, service, action, nodesProvider); + nodesProvider.addNode(discoveryNode); + final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); + TransportConnectionListener waitForConnection = new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node) { + latch.countDown(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + fail("disconnect should not be called " + node); + } + }; + node.service.addConnectionListener(waitForConnection); + for (MockNode curNode : nodes.values()) { + curNode.service.addConnectionListener(waitForConnection); + curNode.connectTo(node.discoveryNode); + node.connectTo(curNode.discoveryNode); + } + node.connectTo(node.discoveryNode); + assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true)); + for (MockNode curNode : nodes.values()) { + curNode.service.removeConnectionListener(waitForConnection); + } + node.service.removeConnectionListener(waitForConnection); + if (nodes.put(name, node) != null) { + fail("Node with the name " + name + " already exist"); + } + return node; + } + + public MockTransportService service(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.service; + } + return null; + } + + public PublishClusterStateAction action(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.action; + } + return null; + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new ThreadPool(getClass().getName()); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + for (MockNode curNode : nodes.values()) { + curNode.action.close(); + curNode.service.close(); + } + terminate(threadPool); + } + + protected MockTransportService buildTransportService(Settings settings, Version version) { + MockTransportService transportService = new MockTransportService(settings, new LocalTransport(settings, threadPool, version), threadPool); + transportService.start(); + return transportService; + } + + protected PublishClusterStateAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, MockDiscoveryNodesProvider nodesProvider, + PublishClusterStateAction.NewClusterStateListener listener) { + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + return new PublishClusterStateAction(settings, transportService, nodesProvider, listener, discoverySettings); + } + + + static class MockDiscoveryNodesProvider implements DiscoveryNodesProvider { + + private DiscoveryNodes discoveryNodes = DiscoveryNodes.EMPTY_NODES; + + public MockDiscoveryNodesProvider(DiscoveryNode localNode) { + discoveryNodes = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build(); + } + + public void addNode(DiscoveryNode node) { + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(node).build(); + } + + @Override + public DiscoveryNodes nodes() { + return discoveryNodes; + } + + @Override + public NodeService nodeService() { + assert false; + throw new UnsupportedOperationException("Shouldn't be here"); + } + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimpleClusterStatePublishing() throws Exception { + MockNewClusterStateListener mockListenerA = new MockNewClusterStateListener(); + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerA); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(1)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - remove block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // Adding new node - this node should get full cluster state while nodeB should still be getting diffs + + MockNewClusterStateListener mockListenerC = new MockNewClusterStateListener(); + MockNode nodeC = createMockNode("nodeC", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerC); + + // cluster state update 3 - register node C + previousClusterState = clusterState; + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + mockListenerC.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + // First state + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update 4 - update settings + previousClusterState = clusterState; + MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(ImmutableSettings.settingsBuilder().put("foo", "bar").build()).build(); + clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); + NewClusterStateExpectation expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // node B becomes the master and sends a version of the cluster state that goes back + discoveryNodes = DiscoveryNodes.builder(discoveryNodes) + .put(nodeA.discoveryNode) + .put(nodeB.discoveryNode) + .put(nodeC.discoveryNode) + .build(); + previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerA.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeB.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testUnexpectedDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testDisablingDiffPublishing() throws Exception { + Settings noDiffPublishingSettings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + + MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff()); + assertFalse(clusterState.wasReadFromDiff()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimultaneousClusterStatePublishing() throws Exception { + int numberOfNodes = randomIntBetween(2, 10); + int numberOfIterations = randomIntBetween(50, 200); + Settings settings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "100ms").put(DiscoverySettings.PUBLISH_DIFF_ENABLE, true).build(); + MockNode[] nodes = new MockNode[numberOfNodes]; + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < nodes.length; i++) { + final String name = "node" + i; + nodes[i] = createMockNode(name, settings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public synchronized void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + assertProperMetaDataForVersion(clusterState.metaData(), clusterState.version()); + if (randomInt(10) < 2) { + // Cause timeouts from time to time + try { + Thread.sleep(randomInt(110)); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + newStateProcessed.onNewClusterStateProcessed(); + } + }); + discoveryNodesBuilder.put(nodes[i].discoveryNode); + } + + AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; + DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); + MetaData metaData = MetaData.EMPTY_META_DATA; + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(metaData).build(); + ClusterState previousState; + for (int i = 0; i < numberOfIterations; i++) { + previousState = clusterState; + metaData = buildMetaDataForVersion(metaData, i + 1); + clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build(); + listeners[i] = publishStateDiff(nodes[0].action, clusterState, previousState); + } + + for (int i = 0; i < numberOfIterations; i++) { + listeners[i].await(1, TimeUnit.SECONDS); + } + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSerializationFailureDuringDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + + ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) { + @Override + public Diff diff(ClusterState previousState) { + return new Diff() { + @Override + public ClusterState apply(ClusterState part) { + fail("this diff shouldn't be applied"); + return part; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("Simulated failure of diff serialization"); + } + }; + } + }; + List> errors = publishStateDiff(nodeA.action, unserializableClusterState, previousClusterState).awaitErrors(1, TimeUnit.SECONDS); + assertThat(errors.size(), equalTo(1)); + assertThat(errors.get(0).v2().getMessage(), containsString("Simulated failure of diff serialization")); + } + + private MetaData buildMetaDataForVersion(MetaData metaData, long version) { + ImmutableOpenMap.Builder indices = ImmutableOpenMap.builder(metaData.indices()); + indices.put("test" + version, IndexMetaData.builder("test" + version).settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards((int) version).numberOfReplicas(0).build()); + return MetaData.builder(metaData) + .transientSettings(ImmutableSettings.builder().put("test", version).build()) + .indices(indices.build()) + .build(); + } + + private void assertProperMetaDataForVersion(MetaData metaData, long version) { + for (long i = 1; i <= version; i++) { + assertThat(metaData.index("test" + i), notNullValue()); + assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i)); + } + assertThat(metaData.index("test" + (version + 1)), nullValue()); + assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); + } + + public void publishStateDiffAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + publishStateDiff(action, state, previousState).await(1, TimeUnit.SECONDS); + } + + public AssertingAckListener publishStateDiff(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); + ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); + action.publish(changedEvent, assertingAckListener); + return assertingAckListener; + } + + public static class AssertingAckListener implements Discovery.AckListener { + private final List> errors = new CopyOnWriteArrayList<>(); + private final AtomicBoolean timeoutOccured = new AtomicBoolean(); + private final CountDownLatch countDown; + + public AssertingAckListener(int nodeCount) { + countDown = new CountDownLatch(nodeCount); + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + if (t != null) { + errors.add(new Tuple<>(node, t)); + } + countDown.countDown(); + } + + @Override + public void onTimeout() { + timeoutOccured.set(true); + // Fast forward the counter - no reason to wait here + long currentCount = countDown.getCount(); + for (long i = 0; i < currentCount; i++) { + countDown.countDown(); + } + } + + public void await(long timeout, TimeUnit unit) throws InterruptedException { + assertThat(awaitErrors(timeout, unit), emptyIterable()); + } + + public List> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { + countDown.await(timeout, unit); + assertFalse(timeoutOccured.get()); + return errors; + } + + } + + public interface NewClusterStateExpectation { + void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed); + } + + public static class MockNewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener { + CopyOnWriteArrayList expectations = new CopyOnWriteArrayList(); + + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + final NewClusterStateExpectation expectation; + try { + expectation = expectations.remove(0); + } catch (ArrayIndexOutOfBoundsException ex) { + fail("Unexpected cluster state update " + clusterState.prettyPrint()); + return; + } + expectation.check(clusterState, newStateProcessed); + newStateProcessed.onNewClusterStateProcessed(); + } + + public void add(NewClusterStateExpectation expectation) { + expectations.add(expectation); + } + } + + public static class DelegatingClusterState extends ClusterState { + + public DelegatingClusterState(ClusterState clusterState) { + super(clusterState.version(), clusterState.uuid(), clusterState); + } + + + } + +} diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java new file mode 100644 index 00000000000..84df1eaf209 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java @@ -0,0 +1,534 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.*; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.query.FilterBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.List; + +import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; +import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; +import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.Matchers.equalTo; + + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) +public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { + + @Test + public void testClusterStateDiffSerialization() throws Exception { + DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), Version.CURRENT); + DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); + + int iterationCount = randomIntBetween(10, 300); + for (int iteration = 0; iteration < iterationCount; iteration++) { + ClusterState previousClusterState = clusterState; + ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs; + int changesCount = randomIntBetween(1, 4); + ClusterState.Builder builder = null; + for (int i = 0; i < changesCount; i++) { + if (i > 0) { + clusterState = builder.build(); + } + switch (randomInt(4)) { + case 0: + builder = randomNodes(clusterState); + break; + case 1: + builder = randomRoutingTable(clusterState); + break; + case 2: + builder = randomBlocks(clusterState); + break; + case 3: + case 4: + builder = randomMetaDataChanges(clusterState); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + clusterState = builder.incrementVersion().build(); + + if (randomIntBetween(0, 10) < 1) { + // Update cluster state via full serialization from time to time + clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().localNode()); + } else { + // Update cluster states using diffs + Diff diffBeforeSerialization = clusterState.diff(previousClusterState); + BytesStreamOutput os = new BytesStreamOutput(); + diffBeforeSerialization.writeTo(os); + byte[] diffBytes = os.bytes().toBytes(); + Diff diff; + try (BytesStreamInput input = new BytesStreamInput(diffBytes)) { + diff = previousClusterStateFromDiffs.readDiffFrom(input); + clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs); + } + } + + + try { + // Check non-diffable elements + assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version())); + assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid())); + + // Check nodes + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + assertThat(clusterStateFromDiffs.nodes().localNodeId(), equalTo(previousClusterStateFromDiffs.nodes().localNodeId())); + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + for (ObjectCursor node : clusterStateFromDiffs.nodes().nodes().keys()) { + DiscoveryNode node1 = clusterState.nodes().get(node.value); + DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value); + assertThat(node1.version(), equalTo(node2.version())); + assertThat(node1.address(), equalTo(node2.address())); + assertThat(node1.attributes(), equalTo(node2.attributes())); + } + + // Check routing table + assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version())); + assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting())); + + // Check cluster blocks + assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); + assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); + assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); + + // Check metadata + assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); + assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid())); + assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings())); + assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings())); + assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices())); + assertThat(clusterStateFromDiffs.metaData().templates(), equalTo(clusterState.metaData().templates())); + assertThat(clusterStateFromDiffs.metaData().customs(), equalTo(clusterState.metaData().customs())); + assertThat(clusterStateFromDiffs.metaData().aliases(), equalTo(clusterState.metaData().aliases())); + + // JSON Serialization test - make sure that both states produce similar JSON + assertThat(mapsEqualIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)), equalTo(true)); + + // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order + // however, serialized size should remain the same + assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); + } catch (AssertionError error) { + logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); + throw error; + } + } + + logger.info("Final cluster state:[{}]", clusterState.toString()); + + } + + private ClusterState.Builder randomNodes(ClusterState clusterState) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().nodes().size() - 1), clusterState.nodes().nodes().keys().toArray(String.class)); + for (String nodeId : nodeIds) { + if (nodeId.startsWith("node-")) { + if (randomBoolean()) { + nodes.remove(nodeId); + } else { + nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + } + } + int additionalNodeCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalNodeCount; i++) { + nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + return ClusterState.builder(clusterState).nodes(nodes); + } + + private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { + RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); + int numberOfIndices = clusterState.routingTable().indicesRouting().size(); + if (numberOfIndices > 0) { + List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keySet().toArray(new String[numberOfIndices])); + for (String index : randomIndices) { + if (randomBoolean()) { + builder.remove(index); + } else { + builder.add(randomIndexRoutingTable(index, clusterState.nodes().nodes().keys().toArray(String.class))); + } + } + } + int additionalIndexCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalIndexCount; i++) { + builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().nodes().keys().toArray(String.class))); + } + return ClusterState.builder(clusterState).routingTable(builder.build()); + } + + private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds) { + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index); + int shardCount = randomInt(10); + + for (int i = 0; i < shardCount; i++) { + IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, i), randomBoolean()); + int replicaCount = randomIntBetween(1, 10); + for (int j = 0; j < replicaCount; j++) { + indexShard.addShard( + new MutableShardRouting(index, i, randomFrom(nodeIds), j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(1, 4)), 1)); + } + builder.addIndexShard(indexShard.build()); + } + return builder.build(); + } + + private ClusterState.Builder randomBlocks(ClusterState clusterState) { + ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); + int globalBlocksCount = clusterState.blocks().global().size(); + if (globalBlocksCount > 0) { + List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); + for (ClusterBlock block : blocks) { + builder.removeGlobalBlock(block); + } + } + int additionalGlobalBlocksCount = randomIntBetween(1, 3); + for (int i = 0; i < additionalGlobalBlocksCount; i++) { + builder.addGlobalBlock(randomGlobalBlock()); + } + return ClusterState.builder(clusterState).blocks(builder); + } + + private ClusterBlock randomGlobalBlock() { + switch (randomInt(2)) { + case 0: + return DiscoverySettings.NO_MASTER_BLOCK_ALL; + case 1: + return DiscoverySettings.NO_MASTER_BLOCK_WRITES; + default: + return GatewayService.STATE_NOT_RECOVERED_BLOCK; + } + } + + private ClusterState.Builder randomMetaDataChanges(ClusterState clusterState) { + MetaData metaData = clusterState.metaData(); + int changesCount = randomIntBetween(1, 10); + for (int i = 0; i < changesCount; i++) { + switch (randomInt(3)) { + case 0: + metaData = randomMetaDataSettings(metaData); + break; + case 1: + metaData = randomIndices(metaData); + break; + case 2: + metaData = randomTemplates(metaData); + break; + case 3: + metaData = randomMetaDataCustoms(metaData); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + return ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).version(metaData.version() + 1).build()); + } + + private Settings randomSettings(Settings settings) { + ImmutableSettings.Builder builder = ImmutableSettings.builder(); + if (randomBoolean()) { + builder.put(settings); + } + int settingsCount = randomInt(10); + for (int i = 0; i < settingsCount; i++) { + builder.put(randomAsciiOfLength(10), randomAsciiOfLength(10)); + } + return builder.build(); + + } + + private MetaData randomMetaDataSettings(MetaData metaData) { + if (randomBoolean()) { + return MetaData.builder(metaData).persistentSettings(randomSettings(metaData.persistentSettings())).build(); + } else { + return MetaData.builder(metaData).transientSettings(randomSettings(metaData.transientSettings())).build(); + } + } + + private interface RandomPart { + /** + * Returns list of parts from metadata + */ + ImmutableOpenMap parts(MetaData metaData); + + /** + * Puts the part back into metadata + */ + MetaData.Builder put(MetaData.Builder builder, T part); + + /** + * Remove the part from metadata + */ + MetaData.Builder remove(MetaData.Builder builder, String name); + + /** + * Returns a random part with the specified name + */ + T randomCreate(String name); + + /** + * Makes random modifications to the part + */ + T randomChange(T part); + + } + + private MetaData randomParts(MetaData metaData, String prefix, RandomPart randomPart) { + MetaData.Builder builder = MetaData.builder(metaData); + ImmutableOpenMap parts = randomPart.parts(metaData); + int partCount = parts.size(); + if (partCount > 0) { + List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); + for (String part : randomParts) { + if (randomBoolean()) { + randomPart.remove(builder, part); + } else { + randomPart.put(builder, randomPart.randomChange(parts.get(part))); + } + } + } + int additionalPartCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalPartCount; i++) { + String name = randomName(prefix); + randomPart.put(builder, randomPart.randomCreate(name)); + } + return builder.build(); + } + + private MetaData randomIndices(MetaData metaData) { + return randomParts(metaData, "index", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.indices(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexMetaData part) { + return builder.put(part, true); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.remove(name); + } + + @Override + public IndexMetaData randomCreate(String name) { + IndexMetaData.Builder builder = IndexMetaData.builder(name); + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + setRandomSettings(getRandom(), settingsBuilder); + settingsBuilder.put(randomSettings(ImmutableSettings.EMPTY)).put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion(random())); + builder.settings(settingsBuilder); + builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); + int aliasCount = randomInt(10); + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + return builder.build(); + } + + @Override + public IndexMetaData randomChange(IndexMetaData part) { + IndexMetaData.Builder builder = IndexMetaData.builder(part); + switch (randomIntBetween(0, 3)) { + case 0: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(randomSettings(ImmutableSettings.EMPTY))); + break; + case 1: + if (randomBoolean() && part.aliases().isEmpty() == false) { + builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class))); + } else { + builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10))); + } + break; + case 2: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); + break; + case 3: + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + return builder.build(); + } + }); + } + + private IndexWarmersMetaData randomWarmers() { + if (randomBoolean()) { + return new IndexWarmersMetaData( + new IndexWarmersMetaData.Entry( + randomName("warm"), + new String[]{randomName("type")}, + randomBoolean(), + new BytesArray(randomAsciiOfLength(1000))) + ); + } else { + return new IndexWarmersMetaData(); + } + } + + private MetaData randomTemplates(MetaData metaData) { + return randomParts(metaData, "template", new RandomPart() { + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.templates(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexTemplateMetaData part) { + return builder.put(part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeTemplate(name); + } + + @Override + public IndexTemplateMetaData randomCreate(String name) { + IndexTemplateMetaData.Builder builder = IndexTemplateMetaData.builder(name); + builder.order(randomInt(1000)) + .template(randomName("temp")) + .settings(randomSettings(ImmutableSettings.EMPTY)); + int aliasCount = randomIntBetween(0, 10); + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + return builder.build(); + } + + @Override + public IndexTemplateMetaData randomChange(IndexTemplateMetaData part) { + IndexTemplateMetaData.Builder builder = new IndexTemplateMetaData.Builder(part); + builder.order(randomInt(1000)); + return builder.build(); + } + }); + } + + private AliasMetaData randomAlias() { + AliasMetaData.Builder builder = newAliasMetaDataBuilder(randomName("alias")); + if (randomBoolean()) { + builder.filter(FilterBuilders.termFilter("test", randomRealisticUnicodeOfCodepointLength(10)).toString()); + } + if (randomBoolean()) { + builder.routing(randomAsciiOfLength(10)); + } + return builder.build(); + } + + private MetaData randomMetaDataCustoms(final MetaData metaData) { + return randomParts(metaData, "custom", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.customs(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) { + if (part instanceof SnapshotMetaData) { + return builder.putCustom(SnapshotMetaData.TYPE, part); + } else if (part instanceof RepositoriesMetaData) { + return builder.putCustom(RepositoriesMetaData.TYPE, part); + } else if (part instanceof RestoreMetaData) { + return builder.putCustom(RestoreMetaData.TYPE, part); + } + throw new IllegalArgumentException("Unknown custom part " + part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeCustom(name); + } + + @Override + public MetaData.Custom randomCreate(String name) { + switch (randomIntBetween(0, 2)) { + case 0: + return new SnapshotMetaData(new SnapshotMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + randomBoolean(), + SnapshotMetaData.State.fromValue((byte) randomIntBetween(0, 6)), + ImmutableList.of(), + Math.abs(randomLong()), + ImmutableMap.of())); + case 1: + return new RepositoriesMetaData(); + case 2: + return new RestoreMetaData(new RestoreMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + RestoreMetaData.State.fromValue((byte) randomIntBetween(0, 3)), + ImmutableList.of(), + ImmutableMap.of())); + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + + @Override + public MetaData.Custom randomChange(MetaData.Custom part) { + return part; + } + }); + } + + private String randomName(String prefix) { + return prefix + Strings.randomBase64UUID(getRandom()); + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index cbbff463f20..83a27850591 100644 --- a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -81,7 +81,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase { RoutingTable source = strategy.reroute(clusterState).routingTable(); BytesStreamOutput outStream = new BytesStreamOutput(); - RoutingTable.Builder.writeTo(source, outStream); + source.writeTo(outStream); BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes()); RoutingTable target = RoutingTable.Builder.readFrom(inStream); diff --git a/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java new file mode 100644 index 00000000000..d87d900a0e8 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.serialization; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.util.Map; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.CoreMatchers.equalTo; + +public class DiffableTests extends ElasticsearchTestCase { + + @Test + public void testImmutableMapDiff() throws IOException { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableMap before = builder.build(); + Map map = newHashMap(); + map.putAll(before); + map.remove("bar"); + map.put("baz", new TestDiffable("4")); + map.put("new", new TestDiffable("5")); + ImmutableMap after = ImmutableMap.copyOf(map); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableMap serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + } + + @Test + public void testImmutableOpenMapDiff() throws IOException { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableOpenMap before = builder.build(); + builder = ImmutableOpenMap.builder(before); + builder.remove("bar"); + builder.put("baz", new TestDiffable("4")); + builder.put("new", new TestDiffable("5")); + ImmutableOpenMap after = builder.build(); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableOpenMap serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public TestDiffable readFrom(StreamInput in, String key) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return AbstractDiffable.readDiffFrom(new StreamableReader() { + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + }, in); + } + }).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + + } + public static class TestDiffable extends AbstractDiffable { + + public static final TestDiffable PROTO = new TestDiffable(""); + + private final String value; + + public TestDiffable(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } + +} diff --git a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java b/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java new file mode 100644 index 00000000000..9ebffe58783 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; + +public final class XContentTestUtils { + private XContentTestUtils() { + + } + + public static Map convertToMap(ToXContent part) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + part.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return XContentHelper.convertToMap(builder.bytes(), false).v2(); + } + + + /** + * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored + */ + public static boolean mapsEqualIgnoringArrayOrder(Map first, Map second) { + if (first.size() != second.size()) { + return false; + } + + for (String key : first.keySet()) { + if (objectsEqualIgnoringArrayOrder(first.get(key), second.get(key)) == false) { + return false; + } + } + return true; + } + + @SuppressWarnings("unchecked") + private static boolean objectsEqualIgnoringArrayOrder(Object first, Object second) { + if (first == null ) { + return second == null; + } else if (first instanceof List) { + if (second instanceof List) { + List secondList = Lists.newArrayList((List) second); + List firstList = (List) first; + if (firstList.size() == secondList.size()) { + for (Object firstObj : firstList) { + boolean found = false; + for (Object secondObj : secondList) { + if (objectsEqualIgnoringArrayOrder(firstObj, secondObj)) { + secondList.remove(secondObj); + found = true; + break; + } + } + if (found == false) { + return false; + } + } + return secondList.isEmpty(); + } else { + return false; + } + } else { + return false; + } + } else if (first instanceof Map) { + if (second instanceof Map) { + return mapsEqualIgnoringArrayOrder((Map) first, (Map) second); + } else { + return false; + } + } else { + return first.equals(second); + } + } + +} diff --git a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java index 430690ae146..0850cd5e095 100644 --- a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index 58e177b1115..228faa8cf4d 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -32,9 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -196,12 +193,7 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(builder.build(), stream); - stream.close(); - BytesReference bytes = bStream.bytes(); + BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.version()); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index c5adf8cb50e..c97fa5b789d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -443,11 +443,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -460,11 +460,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -477,11 +477,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index e1efe59776d..3cc8a0cfe20 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.junit.Test; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; @@ -61,6 +62,12 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "").build(); } + @Override + protected void ensureClusterStateConsistency() throws IOException { + // testShardActiveElseWhere might change the state of a non-master node + // so we cannot check state consistency of this cluster + } + @Test public void indexCleanup() throws Exception { final String masterNode = internalCluster().startNode(ImmutableSettings.builder().put("node.data", false)); diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 1c3f8f8c9ca..230c6cae3ce 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -285,6 +285,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/8802") public void testBrokenMapping() throws Exception { // clean all templates setup by the framework. client().admin().indices().prepareDeleteTemplate("*").get(); diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java index ff8264fdc03..8d569275aea 100644 --- a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java @@ -38,7 +38,9 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; @@ -748,7 +750,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests )); } - public static abstract class TestCustomMetaData implements MetaData.Custom { + public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { private final String data; protected TestCustomMetaData(String data) { @@ -776,194 +778,182 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests return data.hashCode(); } - public static abstract class TestCustomMetaDataFactory extends MetaData.Custom.Factory { + protected abstract TestCustomMetaData newTestCustomMetaData(String data); - protected abstract TestCustomMetaData newTestCustomMetaData(String data); + @Override + public Custom readFrom(StreamInput in) throws IOException { + return newTestCustomMetaData(in.readString()); + } - @Override - public T readFrom(StreamInput in) throws IOException { - return (T) newTestCustomMetaData(in.readString()); - } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getData()); + } - @Override - public void writeTo(T metadata, StreamOutput out) throws IOException { - out.writeString(metadata.getData()); - } - - @Override - public T fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); + @Override + public Custom fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String data = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("data".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); } + data = parser.text(); } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); + throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); } + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata"); } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return (T) newTestCustomMetaData(data); } + if (data == null) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); + } + return newTestCustomMetaData(data); + } - @Override - public void toXContent(T metadata, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", metadata.getData()); - } + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.field("data", getData()); + return builder; } } + static { - MetaData.registerFactory(SnapshottableMetadata.TYPE, SnapshottableMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.FACTORY); - MetaData.registerFactory(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.FACTORY); + MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO); + MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO); } public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableMetadata PROTO = new SnapshottableMetadata(""); public SnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_SNAPSHOT; - } + @Override + public EnumSet context() { + return MetaData.API_AND_SNAPSHOT; } } public static class NonSnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata(""); public NonSnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected NonSnapshottableMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableMetadata(data); + } - @Override - protected NonSnapshottableMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableMetadata(data); - } + @Override + public EnumSet context() { + return MetaData.API_ONLY; } } public static class SnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata(""); public SnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableGatewayMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); - } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); } } public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata(""); public NonSnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { - - @Override - public String type() { - return TYPE; - } - - @Override - protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - + @Override + public String type() { + return TYPE; } + + @Override + protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableGatewayMetadata(data); + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + } public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway_no_api"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata(""); public SnapshotableGatewayNoApiMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } - - @Override - protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { - return new SnapshotableGatewayNoApiMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); - } + @Override + protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { + return new SnapshotableGatewayNoApiMetadata(data); + } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); } } diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 1c0c11bb5cd..be318020fc0 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -166,6 +166,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; +import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -357,7 +359,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas */ - private void randomIndexTemplate() throws IOException { + public void randomIndexTemplate() throws IOException { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { @@ -650,6 +652,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); @@ -1088,8 +1091,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) - .get().isAcknowledged()); + settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) + .get().isAcknowledged()); } /** @@ -1136,6 +1139,35 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } } + /** + * Verifies that all nodes that have the same version of the cluster state as master have same cluster state + */ + protected void ensureClusterStateConsistency() throws IOException { + if (cluster() != null) { + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + for (Client client : cluster()) { + ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + if (masterClusterState.version() == localClusterState.version()) { + try { + assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; + assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); + + // Compare JSON serialization + assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, convertToMap(localClusterState)), equalTo(true)); + } catch (AssertionError error) { + logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); + throw error; + } + } + } + } + } + /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 55b4b15af01..8c7ae1955f3 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -71,6 +71,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed; @@ -617,4 +618,17 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { return threadGroup.getName(); } } + + /** + * Returns size random values + */ + public static List randomSubsetOf(int size, T... values) { + if (size > values.length) { + throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); + } + List list = newArrayList(values); + Collections.shuffle(list); + return list.subList(0, size); + } + } From 9745808c3f199218a42a7e37d5c3120ab5d700aa Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Tue, 21 Apr 2015 23:54:22 +0900 Subject: [PATCH 138/236] Settings: validate number_of_shards/number_of_replicas without index setting prefix Move the validation logic to MetaDataCreateIndexService Add ShardClusterSnapshotRestoreTests Add the validation to RestoreService Closes #10693 --- .../indices/create/CreateIndexRequest.java | 8 --- .../metadata/MetaDataCreateIndexService.java | 45 ++++++++++---- .../snapshots/RestoreService.java | 1 + .../indices/create/CreateIndexTests.java | 60 +++++++++++++++---- .../settings/UpdateNumberOfReplicasTests.java | 19 ++++++ .../SharedClusterSnapshotRestoreTests.java | 12 ++++ 6 files changed, 116 insertions(+), 29 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index c2d379d71d0..7c795546258 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -106,14 +106,6 @@ public class CreateIndexRequest extends AcknowledgedRequest if (index == null) { validationException = addValidationError("index is missing", validationException); } - Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); - Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); - if (number_of_primaries != null && number_of_primaries <= 0) { - validationException = addValidationError("index must have 1 or more primary shards", validationException); - } - if (number_of_replicas != null && number_of_replicas < 0) { - validationException = addValidationError("index must have 0 or more replica shards", validationException); - } return validationException; } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d96377f8226..f4077966485 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -338,8 +338,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (request.index().equals(ScriptService.SCRIPT_INDEX)) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 0)); indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, "0-all"); - } - else { + } else { if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { if (request.index().equals(riverIndexName)) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -426,7 +425,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { } for (Alias alias : request.aliases()) { AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) - .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); + .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); indexMetaDataBuilder.putAlias(aliasMetaData); } @@ -445,11 +444,11 @@ public class MetaDataCreateIndexService extends AbstractComponent { } indexService.indicesLifecycle().beforeIndexAddedToCluster(new Index(request.index()), - indexMetaData.settings()); + indexMetaData.settings()); MetaData newMetaData = MetaData.builder(currentState.metaData()) - .put(indexMetaData, false) - .build(); + .put(indexMetaData, false) + .build(); logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.numberOfShards(), indexMetaData.numberOfReplicas(), mappings.keySet()); @@ -467,7 +466,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (request.state() == State.OPEN) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.index())); + .addAsNew(updatedState.metaData().index(request.index())); RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build()); updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); } @@ -554,11 +553,37 @@ public class MetaDataCreateIndexService extends AbstractComponent { private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws ElasticsearchException { validateIndexName(request.index(), state); - String customPath = request.settings().get(IndexMetaData.SETTING_DATA_PATH, null); + validateIndexSettings(request.index(), request.settings()); + } + + public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException { + String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null); + List validationErrors = Lists.newArrayList(); if (customPath != null && nodeEnv.isCustomPathsEnabled() == false) { - throw new IndexCreationException(new Index(request.index()), - new ElasticsearchIllegalArgumentException("custom data_paths for indices is disabled")); + validationErrors.add("custom data_paths for indices is disabled"); } + Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); + Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); + if (number_of_primaries != null && number_of_primaries <= 0) { + validationErrors.add("index must have 1 or more primary shards"); + } + if (number_of_replicas != null && number_of_replicas < 0) { + validationErrors.add("index must have 0 or more replica shards"); + } + if (validationErrors.isEmpty() == false) { + throw new IndexCreationException(new Index(indexName), + new ElasticsearchIllegalArgumentException(getMessage(validationErrors))); + } + } + + private String getMessage(List validationErrors) { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";"); + } + return sb.toString(); } private static class DefaultIndexTemplateFilter implements IndexTemplateFilter { diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 6790991c298..b544922e0f8 100644 --- a/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -190,6 +190,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name createIndexService.validateIndexName(renamedIndex, currentState); + createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.settings()); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex); indexMdBuilder.settings(ImmutableSettings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.aliases().isEmpty()) { diff --git a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java index 1b0102f0071..a7e31e0c52b 100644 --- a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java +++ b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -107,34 +107,34 @@ public class CreateIndexTests extends ElasticsearchIntegrationTest{ public void testInvalidShardCountSettings() throws Exception { try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) + .build()) .get(); fail("should have thrown an exception about the primary shard count"); - } catch (ActionRequestValidationException e) { + } catch (ElasticsearchIllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); } try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + .build()) .get(); fail("should have thrown an exception about the replica shard count"); - } catch (ActionRequestValidationException e) { + } catch (ElasticsearchIllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); } try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + .build()) .get(); fail("should have thrown an exception about the shard count"); - } catch (ActionRequestValidationException e) { + } catch (ElasticsearchIllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); assertThat("message contains error about shard count: " + e.getMessage(), @@ -151,4 +151,42 @@ public class CreateIndexTests extends ElasticsearchIntegrationTest{ setClusterReadOnly(false); } } + + @Test + public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (ElasticsearchIllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); + } + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (ElasticsearchIllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); + } + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (ElasticsearchIllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); + } + } + } diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java index 74776657270..f98a3c3a5d3 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java @@ -19,10 +19,13 @@ package org.elasticsearch.indices.settings; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -263,4 +266,20 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(3)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 4)); } + + @Test + public void testUpdateWithInvalidNumberOfReplicas() { + createIndex("test"); + try { + client().admin().indices().prepareUpdateSettings("test") + .setSettings(ImmutableSettings.settingsBuilder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + ) + .execute().actionGet(); + fail("should have thrown an exception about the replica shard count"); + } catch (ElasticsearchIllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("the value of the setting index.number_of_replicas must be a non negative integer"), equalTo(true)); + } + } } diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index be3c44e9cc6..b24d74b84b2 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -25,6 +25,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; @@ -1636,6 +1637,17 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { .setIndexSettings(newIncorrectIndexSettings) .setWaitForCompletion(true), SnapshotRestoreException.class); + logger.info("--> try restoring while changing the number of replicas to a negative number - should fail"); + Settings newIncorrectReplicasIndexSettings = ImmutableSettings.builder() + .put(newIndexSettings) + .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build(); + assertThrows(client.admin().cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectReplicasIndexSettings) + .setWaitForCompletion(true), ElasticsearchIllegalArgumentException.class); + logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") From 5d8e9e24c917b5f2c0958ba68be34a42efaeadbc Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 27 Apr 2015 07:47:14 +0200 Subject: [PATCH 139/236] HTTP: Ensure url path expansion only works inside of plugins This prevents reading of files that are not part of the plugin directory by specifically crafted paths. --- .../org/elasticsearch/http/HttpServer.java | 11 +++---- .../plugins/SitePluginTests.java | 31 +++++++++++++++++++ .../plugins/dummy/_site/dir1/.empty | 0 3 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty diff --git a/src/main/java/org/elasticsearch/http/HttpServer.java b/src/main/java/org/elasticsearch/http/HttpServer.java index 6d43053e408..6a84b0c04dc 100644 --- a/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/src/main/java/org/elasticsearch/http/HttpServer.java @@ -177,11 +177,13 @@ public class HttpServer extends AbstractLifecycleComponent { sitePath = sitePath.replace("/", separator); // this is a plugin provided site, serve it as static files from the plugin location Path file = FileSystemUtils.append(siteFile, PathUtils.get(sitePath), 0); - if (!Files.exists(file) || Files.isHidden(file)) { + + // return not found instead of forbidden to prevent malicious requests to find out if files exist or dont exist + if (!Files.exists(file) || Files.isHidden(file) || !file.toAbsolutePath().normalize().startsWith(siteFile.toAbsolutePath())) { channel.sendResponse(new BytesRestResponse(NOT_FOUND)); return; } - + BasicFileAttributes attributes = Files.readAttributes(file, BasicFileAttributes.class); if (!attributes.isRegularFile()) { // If it's not a dir, we send a 403 @@ -196,10 +198,7 @@ public class HttpServer extends AbstractLifecycleComponent { return; } } - if (!file.toAbsolutePath().startsWith(siteFile.toAbsolutePath())) { - channel.sendResponse(new BytesRestResponse(FORBIDDEN)); - return; - } + try { byte[] data = Files.readAllBytes(file); channel.sendResponse(new BytesRestResponse(OK, guessMimeType(sitePath), data)); diff --git a/src/test/java/org/elasticsearch/plugins/SitePluginTests.java b/src/test/java/org/elasticsearch/plugins/SitePluginTests.java index 7889ec9ead9..8106c6f60dd 100644 --- a/src/test/java/org/elasticsearch/plugins/SitePluginTests.java +++ b/src/test/java/org/elasticsearch/plugins/SitePluginTests.java @@ -34,6 +34,9 @@ import org.junit.Test; import java.net.URISyntaxException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; @@ -86,6 +89,34 @@ public class SitePluginTests extends ElasticsearchIntegrationTest { assertThat(response.getBody(), containsString("Dummy Site Plugin")); } + /** + * Test normalizing of path + */ + @Test + public void testThatPathsAreNormalized() throws Exception { + // more info: https://www.owasp.org/index.php/Path_Traversal + List notFoundUris = new ArrayList<>(); + notFoundUris.add("/_plugin/dummy/../../../../../log4j.properties"); + notFoundUris.add("/_plugin/dummy/../../../../../%00log4j.properties"); + notFoundUris.add("/_plugin/dummy/..%c0%af..%c0%af..%c0%af..%c0%af..%c0%aflog4j.properties"); + notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html"); + notFoundUris.add("/_plugin/dummy/%2e%2e/%2e%2e/%2e%2e/%2e%2e/index.html"); + notFoundUris.add("/_plugin/dummy/%2e%2e%2f%2e%2e%2f%2e%2e%2f%2e%2e%2findex.html"); + notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html"); + notFoundUris.add("/_plugin/dummy/..\\..\\..\\..\\..\\log4j.properties"); + + for (String uri : notFoundUris) { + HttpResponse response = httpClient().path(uri).execute(); + String message = String.format(Locale.ROOT, "URI [%s] expected to be not found", uri); + assertThat(message, response.getStatusCode(), equalTo(RestStatus.NOT_FOUND.getStatus())); + } + + // using relative path inside of the plugin should work + HttpResponse response = httpClient().path("/_plugin/dummy/dir1/../dir1/../index.html").execute(); + assertThat(response.getStatusCode(), equalTo(RestStatus.OK.getStatus())); + assertThat(response.getBody(), containsString("Dummy Site Plugin")); + } + /** * Test case for #4845: https://github.com/elasticsearch/elasticsearch/issues/4845 * Serving _site plugins do not pick up on index.html for sub directories diff --git a/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty b/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty new file mode 100644 index 00000000000..e69de29bb2d From f529eddbcd72d6a12bab180c49beedaa6a57723f Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 27 Apr 2015 08:52:00 +0200 Subject: [PATCH 140/236] Docs: Updated docs/README.asciidoc to point to the new docs repo Closes #10817 --- docs/{README.md => README.asciidoc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{README.md => README.asciidoc} (100%) diff --git a/docs/README.md b/docs/README.asciidoc similarity index 100% rename from docs/README.md rename to docs/README.asciidoc From 061a010453551858cec8f657115f0e2a69629fb2 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 27 Apr 2015 09:07:02 +0200 Subject: [PATCH 141/236] Revert "Add support for cluster state diffs" This reverts commit d746e14cf3f6d9b25a2c6018eedca249555cf44b. --- .../reroute/ClusterRerouteResponse.java | 2 +- .../cluster/state/ClusterStateResponse.java | 2 +- .../state/TransportClusterStateAction.java | 15 +- .../indices/alias/get/GetAliasesResponse.java | 2 +- .../indices/create/CreateIndexRequest.java | 10 +- .../admin/indices/get/GetIndexResponse.java | 6 +- .../mapping/get/GetMappingsResponse.java | 4 +- .../get/GetIndexTemplatesResponse.java | 2 +- .../template/put/PutIndexTemplateRequest.java | 10 +- .../cluster/AbstractDiffable.java | 108 --- .../elasticsearch/cluster/ClusterState.java | 272 ++------ .../java/org/elasticsearch/cluster/Diff.java | 42 -- .../org/elasticsearch/cluster/Diffable.java | 42 -- .../elasticsearch/cluster/DiffableUtils.java | 283 -------- ...ompatibleClusterStateVersionException.java | 35 - .../cluster/block/ClusterBlocks.java | 75 +-- .../cluster/metadata/AliasMetaData.java | 85 +-- .../cluster/metadata/IndexMetaData.java | 234 +++---- .../metadata/IndexTemplateMetaData.java | 105 ++- .../cluster/metadata/MappingMetaData.java | 48 +- .../cluster/metadata/MetaData.java | 266 +++----- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/RepositoriesMetaData.java | 222 +++---- .../cluster/metadata/RepositoryMetaData.java | 21 - .../cluster/metadata/RestoreMetaData.java | 226 +++---- .../cluster/metadata/SnapshotMetaData.java | 231 ++++--- .../cluster/node/DiscoveryNodes.java | 73 +- .../cluster/routing/IndexRoutingTable.java | 72 +- .../routing/IndexShardRoutingTable.java | 22 - .../cluster/routing/RoutingTable.java | 90 +-- .../service/InternalClusterService.java | 8 +- .../ClusterDynamicSettingsModule.java | 1 - .../common/io/stream/StreamableReader.java | 30 - .../common/io/stream/Writeable.java | 30 - .../elasticsearch/discovery/Discovery.java | 3 +- .../discovery/DiscoveryService.java | 5 +- .../discovery/DiscoverySettings.java | 13 - .../discovery/local/LocalDiscovery.java | 46 +- .../discovery/zen/ZenDiscovery.java | 8 +- .../publish/PublishClusterStateAction.java | 194 ++---- .../org/elasticsearch/gateway/Gateway.java | 2 +- .../gateway/LocalAllocateDangledIndices.java | 2 +- .../TransportNodesListGatewayMetaState.java | 2 +- .../get/RestGetRepositoriesAction.java | 2 +- .../indices/get/RestGetIndicesAction.java | 2 +- .../warmer/get/RestGetWarmerAction.java | 2 +- .../search/warmer/IndexWarmersMetaData.java | 304 ++++----- .../ClusterStateDiffPublishingTests.java | 625 ------------------ .../cluster/ClusterStateDiffTests.java | 534 --------------- .../ClusterSerializationTests.java | 2 +- .../cluster/serialization/DiffableTests.java | 127 ---- .../common/xcontent/XContentTestUtils.java | 100 --- .../discovery/ZenUnicastDiscoveryTests.java | 1 - .../discovery/zen/ZenDiscoveryTests.java | 10 +- .../timestamp/TimestampMappingTests.java | 12 +- .../store/IndicesStoreIntegrationTests.java | 7 - .../template/SimpleIndexTemplateTests.java | 1 - .../DedicatedClusterSnapshotRestoreTests.java | 214 +++--- .../test/ElasticsearchIntegrationTest.java | 38 +- .../test/ElasticsearchTestCase.java | 14 - 60 files changed, 1128 insertions(+), 3818 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/cluster/AbstractDiffable.java delete mode 100644 src/main/java/org/elasticsearch/cluster/Diff.java delete mode 100644 src/main/java/org/elasticsearch/cluster/Diffable.java delete mode 100644 src/main/java/org/elasticsearch/cluster/DiffableUtils.java delete mode 100644 src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java delete mode 100644 src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java delete mode 100644 src/main/java/org/elasticsearch/common/io/stream/Writeable.java delete mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java delete mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java delete mode 100644 src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java delete mode 100644 src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 28f9cb1db90..79b31f620d5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -68,7 +68,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - state.writeTo(out); + ClusterState.Builder.writeTo(state, out); writeAcknowledged(out); RoutingExplanations.writeTo(explanations, out); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index e9aa9b723fa..861a84a9e71 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -62,6 +62,6 @@ public class ClusterStateResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); - clusterState.writeTo(out); + ClusterState.Builder.writeTo(clusterState, out); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 150a15eacfd..fc1db98c35e 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.state; +import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -28,6 +29,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -37,6 +39,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + +import static com.google.common.collect.Lists.newArrayList; +import static org.elasticsearch.cluster.metadata.MetaData.lookupFactorySafe; + /** * */ @@ -77,7 +84,6 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); - builder.uuid(currentState.uuid()); if (request.nodes()) { builder.nodes(currentState.nodes()); } @@ -116,9 +122,10 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio } // Filter our metadata that shouldn't be returned by API - for(ObjectObjectCursor custom : currentState.metaData().customs()) { - if(!custom.value.context().contains(MetaData.XContentContext.API)) { - mdBuilder.removeCustom(custom.key); + for(ObjectCursor type : currentState.metaData().customs().keys()) { + Custom.Factory factory = lookupFactorySafe(type.value); + if(!factory.context().contains(MetaData.XContentContext.API)) { + mdBuilder.removeCustom(type.value); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 106e864a367..765a9395afc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -74,7 +74,7 @@ public class GetAliasesResponse extends ActionResponse { out.writeString(entry.key); out.writeVInt(entry.value.size()); for (AliasMetaData aliasMetaData : entry.value) { - aliasMetaData.writeTo(out); + AliasMetaData.Builder.writeTo(aliasMetaData, out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 7c795546258..0be9e2767c1 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -397,11 +397,11 @@ public class CreateIndexRequest extends AcknowledgedRequest aliases((Map) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { + IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); + if (factory != null) { found = true; try { - customs.put(name, proto.fromMap((Map) entry.getValue())); + customs.put(name, factory.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -449,7 +449,7 @@ public class CreateIndexRequest extends AcknowledgedRequest int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); customs.put(type, customIndexMetaData); } int aliasesSize = in.readVInt(); @@ -473,7 +473,7 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeVInt(customs.size()); for (Map.Entry entry : customs.entrySet()) { out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 7080a694a11..35e6cfa4804 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -134,7 +134,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + mappingEntryBuilder.put(in.readString(), MappingMetaData.readFrom(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -181,7 +181,7 @@ public class GetIndexResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor mappingEntry : indexEntry.value) { out.writeString(mappingEntry.key); - mappingEntry.value.writeTo(out); + MappingMetaData.writeTo(mappingEntry.value, out); } } out.writeVInt(aliases.size()); @@ -189,7 +189,7 @@ public class GetIndexResponse extends ActionResponse { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { - aliasEntry.writeTo(out); + AliasMetaData.Builder.writeTo(aliasEntry, out); } } out.writeVInt(settings.size()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 30e9e24c493..b27577f8da3 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -59,7 +59,7 @@ public class GetMappingsResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); + typeMapBuilder.put(in.readString(), MappingMetaData.readFrom(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } @@ -75,7 +75,7 @@ public class GetMappingsResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor typeEntry : indexEntry.value) { out.writeString(typeEntry.key); - typeEntry.value.writeTo(out); + MappingMetaData.writeTo(typeEntry.value, out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 2ce6d8d2c1a..56de19872f2 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -60,7 +60,7 @@ public class GetIndexTemplatesResponse extends ActionResponse { super.writeTo(out); out.writeVInt(indexTemplates.size()); for (IndexTemplateMetaData indexTemplate : indexTemplates) { - indexTemplate.writeTo(out); + IndexTemplateMetaData.Builder.writeTo(indexTemplate, out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 608b7fa82b2..b728abf934e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -293,10 +293,10 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { + IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); + if (factory != null) { try { - customs.put(name, proto.fromMap((Map) entry.getValue())); + customs.put(name, factory.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -441,7 +441,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest entry : customs.entrySet()) { out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java deleted file mode 100644 index 4e6da2bd569..00000000000 --- a/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamableReader; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or - * nothing is object remained the same. - */ -public abstract class AbstractDiffable> implements Diffable { - - @Override - public Diff diff(T previousState) { - if (this.get().equals(previousState)) { - return new CompleteDiff<>(); - } else { - return new CompleteDiff<>(get()); - } - } - - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new CompleteDiff<>(this, in); - } - - public static > Diff readDiffFrom(StreamableReader reader, StreamInput in) throws IOException { - return new CompleteDiff(reader, in); - } - - private static class CompleteDiff> implements Diff { - - @Nullable - private final T part; - - /** - * Creates simple diff with changes - */ - public CompleteDiff(T part) { - this.part = part; - } - - /** - * Creates simple diff without changes - */ - public CompleteDiff() { - this.part = null; - } - - /** - * Read simple diff from the stream - */ - public CompleteDiff(StreamableReader reader, StreamInput in) throws IOException { - if (in.readBoolean()) { - this.part = reader.readFrom(in); - } else { - this.part = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (part != null) { - out.writeBoolean(true); - part.writeTo(out); - } else { - out.writeBoolean(false); - } - } - - @Override - public T apply(T part) { - if (this.part != null) { - return this.part; - } else { - return part; - } - } - } - - @SuppressWarnings("unchecked") - public T get() { - return (T) this; - } -} - diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index b092a121c07..ef4d67740dc 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -57,9 +56,7 @@ import java.util.Map; /** * */ -public class ClusterState implements ToXContent, Diffable { - - public static final ClusterState PROTO = builder(ClusterName.DEFAULT).build(); +public class ClusterState implements ToXContent { public static enum ClusterStateStatus { UNKNOWN((byte) 0), @@ -78,43 +75,47 @@ public class ClusterState implements ToXContent, Diffable { } } - public interface Custom extends Diffable, ToXContent { + public interface Custom { - String type(); + interface Factory { + + String type(); + + T readFrom(StreamInput in) throws IOException; + + void writeTo(T customState, StreamOutput out) throws IOException; + + void toXContent(T customState, XContentBuilder builder, ToXContent.Params params); + } } - private final static Map customPrototypes = new HashMap<>(); + private final static Map customFactories = new HashMap<>(); /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); + public static void registerFactory(String type, Custom.Factory factory) { + customFactories.put(type, factory); } @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); + public static Custom.Factory lookupFactory(String type) { + return customFactories.get(type); } - public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { - @SuppressWarnings("unchecked") - T proto = (T)customPrototypes.get(type); - if (proto == null) { - throw new ElasticsearchIllegalArgumentException("No custom state prototype registered for type [" + type + "]"); + public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + Custom.Factory factory = customFactories.get(type); + if (factory == null) { + throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]"); } - return proto; + return factory; } - public static final String UNKNOWN_UUID = "_na_"; public static final long UNKNOWN_VERSION = -1; private final long version; - private final String uuid; - private final RoutingTable routingTable; private final DiscoveryNodes nodes; @@ -127,20 +128,17 @@ public class ClusterState implements ToXContent, Diffable { private final ClusterName clusterName; - private final boolean wasReadFromDiff; - // built on demand private volatile RoutingNodes routingNodes; private volatile ClusterStateStatus status; - public ClusterState(long version, String uuid, ClusterState state) { - this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); + public ClusterState(long version, ClusterState state) { + this(state.clusterName, version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs()); } - public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { + public ClusterState(ClusterName clusterName, long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs) { this.version = version; - this.uuid = uuid; this.clusterName = clusterName; this.metaData = metaData; this.routingTable = routingTable; @@ -148,7 +146,6 @@ public class ClusterState implements ToXContent, Diffable { this.blocks = blocks; this.customs = customs; this.status = ClusterStateStatus.UNKNOWN; - this.wasReadFromDiff = wasReadFromDiff; } public ClusterStateStatus status() { @@ -168,14 +165,6 @@ public class ClusterState implements ToXContent, Diffable { return version(); } - /** - * This uuid is automatically generated for for each version of cluster state. It is used to make sure that - * we are applying diffs to the right previous state. - */ - public String uuid() { - return this.uuid; - } - public DiscoveryNodes nodes() { return this.nodes; } @@ -228,11 +217,6 @@ public class ClusterState implements ToXContent, Diffable { return this.clusterName; } - // Used for testing and logging to determine how this cluster state was send over the wire - boolean wasReadFromDiff() { - return wasReadFromDiff; - } - /** * Returns a built (on demand) routing nodes view of the routing table. NOTE, the routing nodes * are mutable, use them just for read operations @@ -248,8 +232,6 @@ public class ClusterState implements ToXContent, Diffable { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("version: ").append(version).append("\n"); - sb.append("uuid: ").append(uuid).append("\n"); - sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); sb.append(nodes().prettyPrint()); sb.append(routingTable().prettyPrint()); @@ -321,13 +303,14 @@ public class ClusterState implements ToXContent, Diffable { } } + + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); if (metrics.contains(Metric.VERSION)) { builder.field("version", version); - builder.field("uuid", uuid); } if (metrics.contains(Metric.MASTER_NODE)) { @@ -452,7 +435,7 @@ public class ClusterState implements ToXContent, Diffable { for (ObjectObjectCursor cursor : metaData.customs()) { builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); + MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); builder.endObject(); } @@ -491,7 +474,7 @@ public class ClusterState implements ToXContent, Diffable { builder.startObject("nodes"); for (RoutingNode routingNode : readOnlyRoutingNodes()) { - builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); + builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); for (ShardRouting shardRouting : routingNode) { shardRouting.toXContent(builder, params); } @@ -504,7 +487,7 @@ public class ClusterState implements ToXContent, Diffable { if (metrics.contains(Metric.CUSTOMS)) { for (ObjectObjectCursor cursor : customs) { builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); + lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); builder.endObject(); } } @@ -524,25 +507,21 @@ public class ClusterState implements ToXContent, Diffable { private final ClusterName clusterName; private long version = 0; - private String uuid = UNKNOWN_UUID; private MetaData metaData = MetaData.EMPTY_META_DATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; - private boolean fromDiff; public Builder(ClusterState state) { this.clusterName = state.clusterName; this.version = state.version(); - this.uuid = state.uuid(); this.nodes = state.nodes(); this.routingTable = state.routingTable(); this.metaData = state.metaData(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); - this.fromDiff = false; } public Builder(ClusterName clusterName) { @@ -596,17 +575,6 @@ public class ClusterState implements ToXContent, Diffable { return this; } - public Builder incrementVersion() { - this.version = version + 1; - this.uuid = UNKNOWN_UUID; - return this; - } - - public Builder uuid(String uuid) { - this.uuid = uuid; - return this; - } - public Custom getCustom(String type) { return customs.get(type); } @@ -621,26 +589,13 @@ public class ClusterState implements ToXContent, Diffable { return this; } - public Builder customs(ImmutableOpenMap customs) { - this.customs.putAll(customs); - return this; - } - - public Builder fromDiff(boolean fromDiff) { - this.fromDiff = fromDiff; - return this; - } - public ClusterState build() { - if (UNKNOWN_UUID.equals(uuid)) { - uuid = Strings.randomBase64UUID(); - } - return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), fromDiff); + return new ClusterState(clusterName, version, metaData, routingTable, nodes, blocks, customs.build()); } public static byte[] toBytes(ClusterState state) throws IOException { BytesStreamOutput os = new BytesStreamOutput(); - state.writeTo(os); + writeTo(state, os); return os.bytes().toBytes(); } @@ -652,152 +607,39 @@ public class ClusterState implements ToXContent, Diffable { return readFrom(new BytesStreamInput(data), localNode); } + public static void writeTo(ClusterState state, StreamOutput out) throws IOException { + state.clusterName.writeTo(out); + out.writeLong(state.version()); + MetaData.Builder.writeTo(state.metaData(), out); + RoutingTable.Builder.writeTo(state.routingTable(), out); + DiscoveryNodes.Builder.writeTo(state.nodes(), out); + ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out); + out.writeVInt(state.customs().size()); + for (ObjectObjectCursor cursor : state.customs()) { + out.writeString(cursor.key); + lookupFactorySafe(cursor.key).writeTo(cursor.value, out); + } + } + /** * @param in input stream * @param localNode used to set the local node in the cluster state. can be null. */ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - return PROTO.readFrom(in, localNode); - } - - } - - @Override - public Diff diff(ClusterState previousState) { - return new ClusterStateDiff(previousState, this); - } - - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new ClusterStateDiff(in, this); - } - - public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { - ClusterName clusterName = ClusterName.readClusterName(in); - Builder builder = new Builder(clusterName); - builder.version = in.readLong(); - builder.uuid = in.readString(); - builder.metaData = MetaData.Builder.readFrom(in); - builder.routingTable = RoutingTable.Builder.readFrom(in); - builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); - builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - @Override - public ClusterState readFrom(StreamInput in) throws IOException { - return readFrom(in, nodes.localNode()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - clusterName.writeTo(out); - out.writeLong(version); - out.writeString(uuid); - metaData.writeTo(out); - routingTable.writeTo(out); - nodes.writeTo(out); - blocks.writeTo(out); - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - } - - private static class ClusterStateDiff implements Diff { - - private final long toVersion; - - private final String fromUuid; - - private final String toUuid; - - private final ClusterName clusterName; - - private final Diff routingTable; - - private final Diff nodes; - - private final Diff metaData; - - private final Diff blocks; - - private final Diff> customs; - - public ClusterStateDiff(ClusterState before, ClusterState after) { - fromUuid = before.uuid; - toUuid = after.uuid; - toVersion = after.version; - clusterName = after.clusterName; - routingTable = after.routingTable.diff(before.routingTable); - nodes = after.nodes.diff(before.nodes); - metaData = after.metaData.diff(before.metaData); - blocks = after.blocks.diff(before.blocks); - customs = DiffableUtils.diff(before.customs, after.customs); - } - - public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { - clusterName = ClusterName.readClusterName(in); - fromUuid = in.readString(); - toUuid = in.readString(); - toVersion = in.readLong(); - routingTable = proto.routingTable.readDiffFrom(in); - nodes = proto.nodes.readDiffFrom(in); - metaData = proto.metaData.readDiffFrom(in); - blocks = proto.blocks.readDiffFrom(in); - customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { - @Override - public Custom readFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiffFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - clusterName.writeTo(out); - out.writeString(fromUuid); - out.writeString(toUuid); - out.writeLong(toVersion); - routingTable.writeTo(out); - nodes.writeTo(out); - metaData.writeTo(out); - blocks.writeTo(out); - customs.writeTo(out); - } - - @Override - public ClusterState apply(ClusterState state) { + ClusterName clusterName = ClusterName.readClusterName(in); Builder builder = new Builder(clusterName); - if (toUuid.equals(state.uuid)) { - // no need to read the rest - cluster state didn't change - return state; + builder.version = in.readLong(); + builder.metaData = MetaData.Builder.readFrom(in); + builder.routingTable = RoutingTable.Builder.readFrom(in); + builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); + builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); } - if (fromUuid.equals(state.uuid) == false) { - throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid); - } - builder.uuid(toUuid); - builder.version(toVersion); - builder.routingTable(routingTable.apply(state.routingTable)); - builder.nodes(nodes.apply(state.nodes)); - builder.metaData(metaData.apply(state.metaData)); - builder.blocks(blocks.apply(state.blocks)); - builder.customs(customs.apply(state.customs)); - builder.fromDiff(true); return builder.build(); } } - } diff --git a/src/main/java/org/elasticsearch/cluster/Diff.java b/src/main/java/org/elasticsearch/cluster/Diff.java deleted file mode 100644 index 2e571f43bca..00000000000 --- a/src/main/java/org/elasticsearch/cluster/Diff.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Represents difference between states of cluster state parts - */ -public interface Diff { - - /** - * Applies difference to the specified part and retunrs the resulted part - */ - T apply(T part); - - /** - * Writes the differences into the output stream - * @param out - * @throws IOException - */ - void writeTo(StreamOutput out) throws IOException; -} diff --git a/src/main/java/org/elasticsearch/cluster/Diffable.java b/src/main/java/org/elasticsearch/cluster/Diffable.java deleted file mode 100644 index 7ce60047a2b..00000000000 --- a/src/main/java/org/elasticsearch/cluster/Diffable.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Cluster state part, changes in which can be serialized - */ -public interface Diffable extends Writeable { - - /** - * Returns serializable object representing differences between this and previousState - */ - Diff diff(T previousState); - - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - -} diff --git a/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java deleted file mode 100644 index 4e912a34f97..00000000000 --- a/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; - -public final class DiffableUtils { - private DiffableUtils() { - } - - /** - * Calculates diff between two ImmutableOpenMaps of Diffable objects - */ - public static > Diff> diff(ImmutableOpenMap before, ImmutableOpenMap after) { - assert after != null && before != null; - return new ImmutableOpenMapDiff<>(before, after); - } - - /** - * Calculates diff between two ImmutableMaps of Diffable objects - */ - public static > Diff> diff(ImmutableMap before, ImmutableMap after) { - assert after != null && before != null; - return new ImmutableMapDiff<>(before, after); - } - - /** - * Loads an object that represents difference between two ImmutableOpenMaps - */ - public static > Diff> readImmutableOpenMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { - return new ImmutableOpenMapDiff<>(in, keyedReader); - } - - /** - * Loads an object that represents difference between two ImmutableMaps - */ - public static > Diff> readImmutableMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { - return new ImmutableMapDiff<>(in, keyedReader); - } - - /** - * Loads an object that represents difference between two ImmutableOpenMaps - */ - public static > Diff> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException { - return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto)); - } - - /** - * Loads an object that represents difference between two ImmutableMaps - */ - public static > Diff> readImmutableMapDiff(StreamInput in, T proto) throws IOException { - return new ImmutableMapDiff<>(in, new PrototypeReader<>(proto)); - } - - /** - * A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's - * used in custom metadata deserialization. - */ - public interface KeyedReader { - - /** - * reads an object of the type T from the stream input - */ - T readFrom(StreamInput in, String key) throws IOException; - - /** - * reads an object that respresents differences between two objects with the type T from the stream input - */ - Diff readDiffFrom(StreamInput in, String key) throws IOException; - } - - /** - * Implementation of the KeyedReader that is using a prototype object for reading operations - * - * Note: this implementation is ignoring the key. - */ - public static class PrototypeReader> implements KeyedReader { - private T proto; - - public PrototypeReader(T proto) { - this.proto = proto; - } - - @Override - public T readFrom(StreamInput in, String key) throws IOException { - return proto.readFrom(in); - } - - @Override - public Diff readDiffFrom(StreamInput in, String key) throws IOException { - return proto.readDiffFrom(in); - } - } - - /** - * Represents differences between two ImmutableMaps of diffable objects - * - * @param the diffable object - */ - private static class ImmutableMapDiff> extends MapDiff> { - - protected ImmutableMapDiff(StreamInput in, KeyedReader reader) throws IOException { - super(in, reader); - } - - public ImmutableMapDiff(ImmutableMap before, ImmutableMap after) { - assert after != null && before != null; - for (String key : before.keySet()) { - if (!after.containsKey(key)) { - deletes.add(key); - } - } - for (ImmutableMap.Entry partIter : after.entrySet()) { - T beforePart = before.get(partIter.getKey()); - if (beforePart == null) { - adds.put(partIter.getKey(), partIter.getValue()); - } else if (partIter.getValue().equals(beforePart) == false) { - diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart)); - } - } - } - - @Override - public ImmutableMap apply(ImmutableMap map) { - HashMap builder = newHashMap(); - builder.putAll(map); - - for (String part : deletes) { - builder.remove(part); - } - - for (Map.Entry> diff : diffs.entrySet()) { - builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); - } - - for (Map.Entry additon : adds.entrySet()) { - builder.put(additon.getKey(), additon.getValue()); - } - return ImmutableMap.copyOf(builder); - } - } - - /** - * Represents differences between two ImmutableOpenMap of diffable objects - * - * @param the diffable object - */ - private static class ImmutableOpenMapDiff> extends MapDiff> { - - protected ImmutableOpenMapDiff(StreamInput in, KeyedReader reader) throws IOException { - super(in, reader); - } - - public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after) { - assert after != null && before != null; - for (ObjectCursor key : before.keys()) { - if (!after.containsKey(key.value)) { - deletes.add(key.value); - } - } - for (ObjectObjectCursor partIter : after) { - T beforePart = before.get(partIter.key); - if (beforePart == null) { - adds.put(partIter.key, partIter.value); - } else if (partIter.value.equals(beforePart) == false) { - diffs.put(partIter.key, partIter.value.diff(beforePart)); - } - } - } - - @Override - public ImmutableOpenMap apply(ImmutableOpenMap map) { - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); - builder.putAll(map); - - for (String part : deletes) { - builder.remove(part); - } - - for (Map.Entry> diff : diffs.entrySet()) { - builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); - } - - for (Map.Entry additon : adds.entrySet()) { - builder.put(additon.getKey(), additon.getValue()); - } - return builder.build(); - } - } - - /** - * Represents differences between two maps of diffable objects - * - * This class is used as base class for different map implementations - * - * @param the diffable object - */ - private static abstract class MapDiff, M> implements Diff { - - protected final List deletes; - protected final Map> diffs; - protected final Map adds; - - protected MapDiff() { - deletes = newArrayList(); - diffs = newHashMap(); - adds = newHashMap(); - } - - protected MapDiff(StreamInput in, KeyedReader reader) throws IOException { - deletes = newArrayList(); - diffs = newHashMap(); - adds = newHashMap(); - int deletesCount = in.readVInt(); - for (int i = 0; i < deletesCount; i++) { - deletes.add(in.readString()); - } - - int diffsCount = in.readVInt(); - for (int i = 0; i < diffsCount; i++) { - String key = in.readString(); - Diff diff = reader.readDiffFrom(in, key); - diffs.put(key, diff); - } - - int addsCount = in.readVInt(); - for (int i = 0; i < addsCount; i++) { - String key = in.readString(); - T part = reader.readFrom(in, key); - adds.put(key, part); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(deletes.size()); - for (String delete : deletes) { - out.writeString(delete); - } - - out.writeVInt(diffs.size()); - for (Map.Entry> entry : diffs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - - out.writeVInt(adds.size()); - for (Map.Entry entry : adds.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - } - } -} diff --git a/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java deleted file mode 100644 index 92f5897bf2e..00000000000 --- a/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.ElasticsearchException; - -/** - * Thrown by {@link Diffable#readDiffAndApply(org.elasticsearch.common.io.stream.StreamInput)} method - */ -public class IncompatibleClusterStateVersionException extends ElasticsearchException { - public IncompatibleClusterStateVersionException(String msg) { - super(msg); - } - - public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { - super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); - } -} diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 95c0ba7127e..bb7d332de4f 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,12 +36,10 @@ import java.util.Set; /** * Represents current cluster level blocks to block dirty operations done against the cluster. */ -public class ClusterBlocks extends AbstractDiffable { +public class ClusterBlocks { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.of(), ImmutableMap.>of()); - public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK; - private final ImmutableSet global; private final ImmutableMap> indicesBlocks; @@ -206,43 +203,6 @@ public class ClusterBlocks extends AbstractDiffable { return new ClusterBlockException(builder.build()); } - @Override - public void writeTo(StreamOutput out) throws IOException { - writeBlockSet(global, out); - out.writeVInt(indicesBlocks.size()); - for (Map.Entry> entry : indicesBlocks.entrySet()) { - out.writeString(entry.getKey()); - writeBlockSet(entry.getValue(), out); - } - } - - private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { - out.writeVInt(blocks.size()); - for (ClusterBlock block : blocks) { - block.writeTo(out); - } - } - - @Override - public ClusterBlocks readFrom(StreamInput in) throws IOException { - ImmutableSet global = readBlockSet(in); - ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); - int size = in.readVInt(); - for (int j = 0; j < size; j++) { - indicesBuilder.put(in.readString().intern(), readBlockSet(in)); - } - return new ClusterBlocks(global, indicesBuilder.build()); - } - - private static ImmutableSet readBlockSet(StreamInput in) throws IOException { - ImmutableSet.Builder builder = ImmutableSet.builder(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.add(ClusterBlock.readClusterBlock(in)); - } - return builder.build(); - } - static class ImmutableLevelHolder { static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.of(), ImmutableMap.>of()); @@ -353,7 +313,38 @@ public class ClusterBlocks extends AbstractDiffable { } public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException { - return PROTO.readFrom(in); + ImmutableSet global = readBlockSet(in); + ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); + int size = in.readVInt(); + for (int j = 0; j < size; j++) { + indicesBuilder.put(in.readString().intern(), readBlockSet(in)); + } + return new ClusterBlocks(global, indicesBuilder.build()); + } + + public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException { + writeBlockSet(blocks.global(), out); + out.writeVInt(blocks.indices().size()); + for (Map.Entry> entry : blocks.indices().entrySet()) { + out.writeString(entry.getKey()); + writeBlockSet(entry.getValue(), out); + } + } + + private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { + out.writeVInt(blocks.size()); + for (ClusterBlock block : blocks) { + block.writeTo(out); + } + } + + private static ImmutableSet readBlockSet(StreamInput in) throws IOException { + ImmutableSet.Builder builder = ImmutableSet.builder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.add(ClusterBlock.readClusterBlock(in)); + } + return builder.build(); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 0f7e55c8087..008935ec026 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,9 +38,7 @@ import java.util.Set; /** * */ -public class AliasMetaData extends AbstractDiffable { - - public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); +public class AliasMetaData { private final String alias; @@ -149,48 +146,6 @@ public class AliasMetaData extends AbstractDiffable { return result; } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(alias()); - if (filter() != null) { - out.writeBoolean(true); - filter.writeTo(out); - } else { - out.writeBoolean(false); - } - if (indexRouting() != null) { - out.writeBoolean(true); - out.writeString(indexRouting()); - } else { - out.writeBoolean(false); - } - if (searchRouting() != null) { - out.writeBoolean(true); - out.writeString(searchRouting()); - } else { - out.writeBoolean(false); - } - - } - - @Override - public AliasMetaData readFrom(StreamInput in) throws IOException { - String alias = in.readString(); - CompressedString filter = null; - if (in.readBoolean()) { - filter = CompressedString.readCompressedString(in); - } - String indexRouting = null; - if (in.readBoolean()) { - indexRouting = in.readString(); - } - String searchRouting = null; - if (in.readBoolean()) { - searchRouting = in.readString(); - } - return new AliasMetaData(alias, filter, indexRouting, searchRouting); - } - public static class Builder { private final String alias; @@ -339,12 +294,44 @@ public class AliasMetaData extends AbstractDiffable { return builder.build(); } - public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { - aliasMetaData.writeTo(out); + public static void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { + out.writeString(aliasMetaData.alias()); + if (aliasMetaData.filter() != null) { + out.writeBoolean(true); + aliasMetaData.filter.writeTo(out); + } else { + out.writeBoolean(false); + } + if (aliasMetaData.indexRouting() != null) { + out.writeBoolean(true); + out.writeString(aliasMetaData.indexRouting()); + } else { + out.writeBoolean(false); + } + if (aliasMetaData.searchRouting() != null) { + out.writeBoolean(true); + out.writeString(aliasMetaData.searchRouting()); + } else { + out.writeBoolean(false); + } + } public static AliasMetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); + String alias = in.readString(); + CompressedString filter = null; + if (in.readBoolean()) { + filter = CompressedString.readCompressedString(in); + } + String indexRouting = null; + if (in.readBoolean()) { + indexRouting = in.readString(); + } + String searchRouting = null; + if (in.readBoolean()) { + searchRouting = in.readString(); + } + return new AliasMetaData(alias, filter, indexRouting, searchRouting); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 2005de524bd..1543151fad0 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -25,9 +25,6 @@ import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.Diffable; -import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; @@ -64,54 +61,60 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class IndexMetaData implements Diffable { +public class IndexMetaData { - public static final IndexMetaData PROTO = IndexMetaData.builder("") - .settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1).numberOfReplicas(0).build(); - public interface Custom extends Diffable, ToXContent { + public interface Custom { String type(); - Custom fromMap(Map map) throws IOException; + interface Factory { - Custom fromXContent(XContentParser parser) throws IOException; + String type(); - /** - * Merges from this to another, with this being more important, i.e., if something exists in this and another, - * this will prevail. - */ - Custom mergeWith(Custom another); + T readFrom(StreamInput in) throws IOException; + + void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; + + T fromMap(Map map) throws IOException; + + T fromXContent(XContentParser parser) throws IOException; + + void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; + + /** + * Merges from first to second, with first being more important, i.e., if something exists in first and second, + * first will prevail. + */ + T merge(T first, T second); + } } - public static Map customPrototypes = new HashMap<>(); + public static Map customFactories = new HashMap<>(); static { // register non plugin custom metadata - registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO); + registerFactory(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.FACTORY); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); + public static void registerFactory(String type, Custom.Factory factory) { + customFactories.put(type, factory); } @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); + public static Custom.Factory lookupFactory(String type) { + return customFactories.get(type); } - public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new ElasticsearchIllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); + public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + Custom.Factory factory = customFactories.get(type); + if (factory == null) { + throw new ElasticsearchIllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); } - return proto; + return factory; } public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -450,9 +453,7 @@ public class IndexMetaData implements Diffable { if (state != that.state) { return false; } - if (!customs.equals(that.customs)) { - return false; - } + return true; } @@ -466,126 +467,6 @@ public class IndexMetaData implements Diffable { return result; } - @Override - public Diff diff(IndexMetaData previousState) { - return new IndexMetaDataDiff(previousState, this); - } - - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new IndexMetaDataDiff(in); - } - - private static class IndexMetaDataDiff implements Diff { - - private final String index; - private final long version; - private final State state; - private final Settings settings; - private final Diff> mappings; - private final Diff> aliases; - private Diff> customs; - - public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { - index = after.index; - version = after.version; - state = after.state; - settings = after.settings; - mappings = DiffableUtils.diff(before.mappings, after.mappings); - aliases = DiffableUtils.diff(before.aliases, after.aliases); - customs = DiffableUtils.diff(before.customs, after.customs); - } - - public IndexMetaDataDiff(StreamInput in) throws IOException { - index = in.readString(); - version = in.readLong(); - state = State.fromId(in.readByte()); - settings = ImmutableSettings.readSettingsFromStream(in); - mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO); - aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO); - customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader() { - @Override - public Custom readFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiffFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); - out.writeLong(version); - out.writeByte(state.id); - ImmutableSettings.writeSettingsToStream(settings, out); - mappings.writeTo(out); - aliases.writeTo(out); - customs.writeTo(out); - } - - @Override - public IndexMetaData apply(IndexMetaData part) { - Builder builder = builder(index); - builder.version(version); - builder.state(state); - builder.settings(settings); - builder.mappings.putAll(mappings.apply(part.mappings)); - builder.aliases.putAll(aliases.apply(part.aliases)); - builder.customs.putAll(customs.apply(part.customs)); - return builder.build(); - } - } - - @Override - public IndexMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.version(in.readLong()); - builder.state(State.fromId(in.readByte())); - builder.settings(readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); - builder.putMapping(mappingMd); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); - out.writeLong(version); - out.writeByte(state.id()); - writeSettingsToStream(settings, out); - out.writeVInt(mappings.size()); - for (ObjectCursor cursor : mappings.values()) { - cursor.value.writeTo(out); - } - out.writeVInt(aliases.size()); - for (ObjectCursor cursor : aliases.values()) { - cursor.value.writeTo(out); - } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - } - public static Builder builder(String index) { return new Builder(index); } @@ -781,7 +662,7 @@ public class IndexMetaData implements Diffable { for (ObjectObjectCursor cursor : indexMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - cursor.value.toXContent(builder, params); + lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); builder.endObject(); } @@ -828,13 +709,12 @@ public class IndexMetaData implements Diffable { } } else { // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { + Custom.Factory factory = lookupFactory(currentFieldName); + if (factory == null) { //TODO warn parser.skipChildren(); } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); + builder.putCustom(factory.type(), factory.fromXContent(parser)); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -863,7 +743,47 @@ public class IndexMetaData implements Diffable { } public static IndexMetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); + Builder builder = new Builder(in.readString()); + builder.version(in.readLong()); + builder.state(State.fromId(in.readByte())); + builder.settings(readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + MappingMetaData mappingMd = MappingMetaData.readFrom(in); + builder.putMapping(mappingMd); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException { + out.writeString(indexMetaData.index()); + out.writeLong(indexMetaData.version()); + out.writeByte(indexMetaData.state().id()); + writeSettingsToStream(indexMetaData.settings(), out); + out.writeVInt(indexMetaData.mappings().size()); + for (ObjectCursor cursor : indexMetaData.mappings().values()) { + MappingMetaData.writeTo(cursor.value, out); + } + out.writeVInt(indexMetaData.aliases().size()); + for (ObjectCursor cursor : indexMetaData.aliases().values()) { + AliasMetaData.Builder.writeTo(cursor.value, out); + } + out.writeVInt(indexMetaData.customs().size()); + for (ObjectObjectCursor cursor : indexMetaData.customs()) { + out.writeString(cursor.key); + lookupFactorySafe(cursor.key).writeTo(cursor.value, out); + } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 54150ee6a1e..582e008550d 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.Sets; -import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.Version; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; @@ -42,9 +42,7 @@ import java.util.Set; /** * */ -public class IndexTemplateMetaData extends AbstractDiffable { - - public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); +public class IndexTemplateMetaData { private final String name; @@ -163,57 +161,11 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : mappings) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - out.writeVInt(aliases.size()); - for (ObjectCursor cursor : aliases.values()) { - cursor.value.writeTo(out); - } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - } - public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings"); static { - VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); + VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet()); } private String name; @@ -353,7 +305,7 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - cursor.value.toXContent(builder, params); + IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); builder.endObject(); } @@ -395,13 +347,12 @@ public class IndexTemplateMetaData extends AbstractDiffable factory = IndexMetaData.lookupFactory(currentFieldName); + if (factory == null) { //TODO warn parser.skipChildren(); } else { - IndexMetaData.Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); + builder.putCustom(factory.type(), factory.fromXContent(parser)); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -450,7 +401,47 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.mappings()) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + out.writeVInt(indexTemplateMetaData.aliases().size()); + for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { + AliasMetaData.Builder.writeTo(cursor.value, out); + } + out.writeVInt(indexTemplateMetaData.customs().size()); + for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { + out.writeString(cursor.key); + IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out); + } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index 0959a4612c1..f2ace98caeb 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,11 +19,9 @@ package org.elasticsearch.cluster.metadata; -import com.google.common.collect.Maps; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; @@ -41,18 +39,14 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -import static com.google.common.collect.Maps.newHashMap; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Mapping configuration for a type. */ -public class MappingMetaData extends AbstractDiffable { - - public static final MappingMetaData PROTO = new MappingMetaData(); +public class MappingMetaData { public static class Id { @@ -324,15 +318,6 @@ public class MappingMetaData extends AbstractDiffable { initMappers(withoutType); } - private MappingMetaData() { - this.type = ""; - try { - this.source = new CompressedString(""); - } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Cannot create MappingMetaData prototype", ex); - } - } - private void initMappers(Map withoutType) { if (withoutType.containsKey("_id")) { String path = null; @@ -548,35 +533,34 @@ public class MappingMetaData extends AbstractDiffable { } } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(type()); - source().writeTo(out); + public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws IOException { + out.writeString(mappingMd.type()); + mappingMd.source().writeTo(out); // id - if (id().hasPath()) { + if (mappingMd.id().hasPath()) { out.writeBoolean(true); - out.writeString(id().path()); + out.writeString(mappingMd.id().path()); } else { out.writeBoolean(false); } // routing - out.writeBoolean(routing().required()); - if (routing().hasPath()) { + out.writeBoolean(mappingMd.routing().required()); + if (mappingMd.routing().hasPath()) { out.writeBoolean(true); - out.writeString(routing().path()); + out.writeString(mappingMd.routing().path()); } else { out.writeBoolean(false); } // timestamp - out.writeBoolean(timestamp().enabled()); - out.writeOptionalString(timestamp().path()); - out.writeString(timestamp().format()); - out.writeOptionalString(timestamp().defaultTimestamp()); + out.writeBoolean(mappingMd.timestamp().enabled()); + out.writeOptionalString(mappingMd.timestamp().path()); + out.writeString(mappingMd.timestamp().format()); + out.writeOptionalString(mappingMd.timestamp().defaultTimestamp()); // TODO Remove the test in elasticsearch 2.0.0 if (out.getVersion().onOrAfter(Version.V_1_5_0)) { - out.writeOptionalBoolean(timestamp().ignoreMissing()); + out.writeOptionalBoolean(mappingMd.timestamp().ignoreMissing()); } - out.writeBoolean(hasParentField()); + out.writeBoolean(mappingMd.hasParentField()); } @Override @@ -605,7 +589,7 @@ public class MappingMetaData extends AbstractDiffable { return result; } - public MappingMetaData readFrom(StreamInput in) throws IOException { + public static MappingMetaData readFrom(StreamInput in) throws IOException { String type = in.readString(); CompressedString source = CompressedString.readCompressedString(in); // id diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 4f20e1212cc..51793b1d27b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -26,9 +26,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.*; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.Nullable; @@ -58,9 +56,7 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class MetaData implements Iterable, Diffable { - - public static final MetaData PROTO = builder().build(); +public class MetaData implements Iterable { public static final String ALL = "_all"; @@ -72,51 +68,60 @@ public class MetaData implements Iterable, Diffable { GATEWAY, /* Custom metadata should be stored as part of a snapshot */ - SNAPSHOT + SNAPSHOT; } public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); - public interface Custom extends Diffable, ToXContent { + public interface Custom { - String type(); + abstract class Factory { - Custom fromXContent(XContentParser parser) throws IOException; + public abstract String type(); - EnumSet context(); + public abstract T readFrom(StreamInput in) throws IOException; + + public abstract void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; + + public abstract T fromXContent(XContentParser parser) throws IOException; + + public abstract void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; + + public EnumSet context() { + return API_ONLY; + } + } } - public static Map customPrototypes = new HashMap<>(); + public static Map customFactories = new HashMap<>(); static { // register non plugin custom metadata - registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO); - registerPrototype(SnapshotMetaData.TYPE, SnapshotMetaData.PROTO); - registerPrototype(RestoreMetaData.TYPE, RestoreMetaData.PROTO); + registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY); + registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY); + registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); + public static void registerFactory(String type, Custom.Factory factory) { + customFactories.put(type, factory); } @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); + public static Custom.Factory lookupFactory(String type) { + return customFactories.get(type); } - public static T lookupPrototypeSafe(String type) throws ElasticsearchIllegalArgumentException { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new ElasticsearchIllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); + public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + Custom.Factory factory = customFactories.get(type); + if (factory == null) { + throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); } - return proto; + return factory; } @@ -640,14 +645,14 @@ public class MetaData implements Iterable, Diffable { /** * Translates the provided indices or aliases, eventually containing wildcard expressions, into actual indices. * - * @param indicesOptions how the aliases or indices need to be resolved to concrete indices + * @param indicesOptions how the aliases or indices need to be resolved to concrete indices * @param aliasesOrIndices the aliases or indices to be resolved to concrete indices * @return the obtained concrete indices - * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options - * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options - * don't allow such a case. + * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options + * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options + * don't allow such a case. * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided - * indices options don't allow such a case. + * indices options don't allow such a case. */ public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { @@ -1135,14 +1140,14 @@ public class MetaData implements Iterable, Diffable { // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { - if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { if (!cursor.value.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; for (ObjectObjectCursor cursor : metaData2.customs) { - if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -1150,129 +1155,6 @@ public class MetaData implements Iterable, Diffable { return true; } - @Override - public Diff diff(MetaData previousState) { - return new MetaDataDiff(previousState, this); - } - - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new MetaDataDiff(in); - } - - private static class MetaDataDiff implements Diff { - - private long version; - - private String uuid; - - private Settings transientSettings; - private Settings persistentSettings; - private Diff> indices; - private Diff> templates; - private Diff> customs; - - - public MetaDataDiff(MetaData before, MetaData after) { - uuid = after.uuid; - version = after.version; - transientSettings = after.transientSettings; - persistentSettings = after.persistentSettings; - indices = DiffableUtils.diff(before.indices, after.indices); - templates = DiffableUtils.diff(before.templates, after.templates); - customs = DiffableUtils.diff(before.customs, after.customs); - } - - public MetaDataDiff(StreamInput in) throws IOException { - uuid = in.readString(); - version = in.readLong(); - transientSettings = ImmutableSettings.readSettingsFromStream(in); - persistentSettings = ImmutableSettings.readSettingsFromStream(in); - indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO); - templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO); - customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { - @Override - public Custom readFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiffFrom(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(uuid); - out.writeLong(version); - ImmutableSettings.writeSettingsToStream(transientSettings, out); - ImmutableSettings.writeSettingsToStream(persistentSettings, out); - indices.writeTo(out); - templates.writeTo(out); - customs.writeTo(out); - } - - @Override - public MetaData apply(MetaData part) { - Builder builder = builder(); - builder.uuid(uuid); - builder.version(version); - builder.transientSettings(transientSettings); - builder.persistentSettings(persistentSettings); - builder.indices(indices.apply(part.indices)); - builder.templates(templates.apply(part.templates)); - builder.customs(customs.apply(part.customs)); - return builder.build(); - } - } - - @Override - public MetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - builder.uuid = in.readString(); - builder.transientSettings(readSettingsFromStream(in)); - builder.persistentSettings(readSettingsFromStream(in)); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexMetaData.Builder.readFrom(in), false); - } - size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexTemplateMetaData.Builder.readFrom(in)); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version); - out.writeString(uuid); - writeSettingsToStream(transientSettings, out); - writeSettingsToStream(persistentSettings, out); - out.writeVInt(indices.size()); - for (IndexMetaData indexMetaData : this) { - indexMetaData.writeTo(out); - } - out.writeVInt(templates.size()); - for (ObjectCursor cursor : templates.values()) { - cursor.value.writeTo(out); - } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - } - public static Builder builder() { return new Builder(); } @@ -1344,11 +1226,6 @@ public class MetaData implements Iterable, Diffable { return this; } - public Builder indices(ImmutableOpenMap indices) { - this.indices.putAll(indices); - return this; - } - public Builder put(IndexTemplateMetaData.Builder template) { return put(template.build()); } @@ -1363,11 +1240,6 @@ public class MetaData implements Iterable, Diffable { return this; } - public Builder templates(ImmutableOpenMap templates) { - this.templates.putAll(templates); - return this; - } - public Custom getCustom(String type) { return customs.get(type); } @@ -1382,11 +1254,6 @@ public class MetaData implements Iterable, Diffable { return this; } - public Builder customs(ImmutableOpenMap customs) { - this.customs.putAll(customs); - return this; - } - public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); @@ -1439,11 +1306,6 @@ public class MetaData implements Iterable, Diffable { return this; } - public Builder uuid(String uuid) { - this.uuid = uuid; - return this; - } - public Builder generateUuidIfNeeded() { if (uuid.equals("_na_")) { uuid = Strings.randomBase64UUID(); @@ -1502,10 +1364,10 @@ public class MetaData implements Iterable, Diffable { } for (ObjectObjectCursor cursor : metaData.customs()) { - Custom proto = lookupPrototypeSafe(cursor.key); - if (proto.context().contains(context)) { + Custom.Factory factory = lookupFactorySafe(cursor.key); + if (factory.context().contains(context)) { builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); + factory.toXContent(cursor.value, builder, params); builder.endObject(); } } @@ -1549,13 +1411,12 @@ public class MetaData implements Iterable, Diffable { } } else { // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { + Custom.Factory factory = lookupFactory(currentFieldName); + if (factory == null) { //TODO warn parser.skipChildren(); } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); + builder.putCustom(factory.type(), factory.fromXContent(parser)); } } } else if (token.isValue()) { @@ -1570,7 +1431,46 @@ public class MetaData implements Iterable, Diffable { } public static MetaData readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); + Builder builder = new Builder(); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.transientSettings(readSettingsFromStream(in)); + builder.persistentSettings(readSettingsFromStream(in)); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexMetaData.Builder.readFrom(in), false); + } + size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexTemplateMetaData.Builder.readFrom(in)); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + public static void writeTo(MetaData metaData, StreamOutput out) throws IOException { + out.writeLong(metaData.version); + out.writeString(metaData.uuid); + writeSettingsToStream(metaData.transientSettings(), out); + writeSettingsToStream(metaData.persistentSettings(), out); + out.writeVInt(metaData.indices.size()); + for (IndexMetaData indexMetaData : metaData) { + IndexMetaData.Builder.writeTo(indexMetaData, out); + } + out.writeVInt(metaData.templates.size()); + for (ObjectCursor cursor : metaData.templates.values()) { + IndexTemplateMetaData.Builder.writeTo(cursor.value, out); + } + out.writeVInt(metaData.customs().size()); + for (ObjectObjectCursor cursor : metaData.customs()) { + out.writeString(cursor.key); + lookupFactorySafe(cursor.key).writeTo(cursor.value, out); + } } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index f4077966485..244c598f0a3 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -273,7 +273,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (existing == null) { customs.put(type, custom); } else { - IndexMetaData.Custom merged = existing.mergeWith(custom); + IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom); customs.put(type, merged); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 51cd5db086b..81b11fc14b1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -21,8 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; @@ -41,11 +39,11 @@ import java.util.Map; /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom { +public class RepositoriesMetaData implements MetaData.Custom { public static final String TYPE = "repositories"; - public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); + public static final Factory FACTORY = new Factory(); private final ImmutableList repositories; @@ -82,132 +80,122 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me return null; } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - RepositoriesMetaData that = (RepositoriesMetaData) o; - - return repositories.equals(that.repositories); - - } - - @Override - public int hashCode() { - return repositories.hashCode(); - } - /** - * {@inheritDoc} + * Repository metadata factory */ - @Override - public String type() { - return TYPE; - } + public static class Factory extends MetaData.Custom.Factory { - /** - * {@inheritDoc} - */ - @Override - public Custom readFrom(StreamInput in) throws IOException { - RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; - for (int i = 0; i < repository.length; i++) { - repository[i] = RepositoryMetaData.readFrom(in); + /** + * {@inheritDoc} + */ + @Override + public String type() { + return TYPE; } - return new RepositoriesMetaData(repository); - } - /** - * {@inheritDoc} - */ - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(repositories.size()); - for (RepositoryMetaData repository : repositories) { - repository.writeTo(out); + /** + * {@inheritDoc} + */ + @Override + public RepositoriesMetaData readFrom(StreamInput in) throws IOException { + RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; + for (int i = 0; i < repository.length; i++) { + repository[i] = RepositoryMetaData.readFrom(in); + } + return new RepositoriesMetaData(repository); } - } - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - List repository = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String name = parser.currentName(); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); - } - String type = null; - Settings settings = ImmutableSettings.EMPTY; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("type".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); - } - type = parser.text(); - } else if ("settings".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); - } - settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); - } - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); - } - } - if (type == null) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); - } - repository.add(new RepositoryMetaData(name, type, settings)); - } else { - throw new ElasticsearchParseException("failed to parse repositories"); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException { + out.writeVInt(repositories.repositories().size()); + for (RepositoryMetaData repository : repositories.repositories()) { + repository.writeTo(out); } } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); - } - /** - * {@inheritDoc} - */ - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - for (RepositoryMetaData repository : repositories) { - toXContent(repository, builder, params); + /** + * {@inheritDoc} + */ + @Override + public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + List repository = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String name = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); + } + String type = null; + Settings settings = ImmutableSettings.EMPTY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("type".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); + } + type = parser.text(); + } else if ("settings".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); + } + settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); + } + } + if (type == null) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); + } + repository.add(new RepositoryMetaData(name, type, settings)); + } else { + throw new ElasticsearchParseException("failed to parse repositories"); + } + } + return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); } - return builder; - } - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - - /** - * Serializes information about a single repository - * - * @param repository repository metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public static void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("type", repository.type()); - builder.startObject("settings"); - for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { - builder.field(settingEntry.getKey(), settingEntry.getValue()); + /** + * {@inheritDoc} + */ + @Override + public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { + for (RepositoryMetaData repository : customIndexMetaData.repositories()) { + toXContent(repository, builder, params); + } } - builder.endObject(); - builder.endObject(); + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + + /** + * Serializes information about a single repository + * + * @param repository repository metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("type", repository.type()); + builder.startObject("settings"); + for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { + builder.field(settingEntry.getKey(), settingEntry.getValue()); + } + builder.endObject(); + + builder.endObject(); + } } + } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java index a283f1f43c1..ea50b30ba88 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java @@ -99,25 +99,4 @@ public class RepositoryMetaData { out.writeString(type); ImmutableSettings.writeSettingsToStream(settings, out); } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - RepositoryMetaData that = (RepositoryMetaData) o; - - if (!name.equals(that.name)) return false; - if (!type.equals(that.type)) return false; - return settings.equals(that.settings); - - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + type.hashCode(); - result = 31 * result + settings.hashCode(); - return result; - } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 6dbde85c158..373d5ff858c 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,17 +30,16 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.EnumSet; import java.util.Map; /** * Meta data about restore processes that are currently executing */ -public class RestoreMetaData extends AbstractDiffable implements MetaData.Custom { +public class RestoreMetaData implements MetaData.Custom { public static final String TYPE = "restore"; - public static final RestoreMetaData PROTO = new RestoreMetaData(); + public static final Factory FACTORY = new Factory(); private final ImmutableList entries; @@ -397,122 +395,124 @@ public class RestoreMetaData extends AbstractDiffable implement } /** - * {@inheritDoc} + * Restore metadata factory */ - @Override - public String type() { - return TYPE; - } + public static class Factory extends MetaData.Custom.Factory { - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); - builder.put(shardId, shardState); - } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + /** + * {@inheritDoc} + */ + @Override + public String type() { + return TYPE; } - return new RestoreMetaData(entries); - } - /** - * {@inheritDoc} - */ - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(entries.size()); - for (Entry entry : entries) { - entry.snapshotId().writeTo(out); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - shardEntry.getValue().writeTo(out); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public EnumSet context() { - return MetaData.API_ONLY; - } - - /** - * {@inheritDoc} - */ - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - for (Entry entry : entries) { - toXContent(entry, builder, params); - } - builder.endArray(); - return builder; - } - - /** - * Serializes single restore operation - * - * @param entry restore operation metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("snapshot", entry.snapshotId().getSnapshot()); - builder.field("repository", entry.snapshotId().getRepository()); - builder.field("state", entry.state()); - builder.startArray("indices"); - { - for (String index : entry.indices()) { - builder.value(index); - } - } - builder.endArray(); - builder.startArray("shards"); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardRestoreStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field("index", shardId.getIndex()); - builder.field("shard", shardId.getId()); - builder.field("state", status.state()); + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); + } + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); + builder.put(shardId, shardState); + } + entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + } + return new RestoreMetaData(entries); + } + + /** + * {@inheritDoc} + */ + @Override + public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException { + out.writeVInt(repositories.entries().size()); + for (Entry entry : repositories.entries()) { + entry.snapshotId().writeTo(out); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); + } + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + shardEntry.getValue().writeTo(out); } - builder.endObject(); } } - builder.endArray(); - builder.endObject(); + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * {@inheritDoc} + */ + @Override + public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray("snapshots"); + for (Entry entry : customIndexMetaData.entries()) { + toXContent(entry, builder, params); + } + builder.endArray(); + } + + /** + * Serializes single restore operation + * + * @param entry restore operation metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("snapshot", entry.snapshotId().getSnapshot()); + builder.field("repository", entry.snapshotId().getRepository()); + builder.field("state", entry.state()); + builder.startArray("indices"); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.startArray("shards"); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardRestoreStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field("index", shardId.getIndex()); + builder.field("shard", shardId.getId()); + builder.field("state", status.state()); + } + builder.endObject(); + } + } + + builder.endArray(); + builder.endObject(); + } } + + } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index 5010fcab5ac..b759fe5daeb 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -22,8 +22,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -33,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.EnumSet; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -41,10 +38,10 @@ import static com.google.common.collect.Maps.newHashMap; /** * Meta data about snapshots that are currently executing */ -public class SnapshotMetaData extends AbstractDiffable implements MetaData.Custom { +public class SnapshotMetaData implements MetaData.Custom { public static final String TYPE = "snapshots"; - public static final SnapshotMetaData PROTO = new SnapshotMetaData(); + public static final Factory FACTORY = new Factory(); @Override public boolean equals(Object o) { @@ -333,123 +330,123 @@ public class SnapshotMetaData extends AbstractDiffable implements MetaDa return null; } - @Override - public String type() { - return TYPE; - } - @Override - public SnapshotMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - boolean includeGlobalState = in.readBoolean(); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - long startTime = in.readLong(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - String nodeId = in.readOptionalString(); - State shardState = State.fromValue(in.readByte()); - builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); - } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + public static class Factory extends MetaData.Custom.Factory { + + @Override + public String type() { + return TYPE; //To change body of implemented methods use File | Settings | File Templates. } - return new SnapshotMetaData(entries); - } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(entries.size()); - for (Entry entry : entries) { - entry.snapshotId().writeTo(out); - out.writeBoolean(entry.includeGlobalState()); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeLong(entry.startTime()); - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - out.writeOptionalString(shardEntry.getValue().nodeId()); - out.writeByte(shardEntry.getValue().state().value()); - } - } - } - - @Override - public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public EnumSet context() { - return MetaData.API_ONLY; - } - - static final class Fields { - static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); - static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); - static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); - static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); - static final XContentBuilderString STATE = new XContentBuilderString("state"); - static final XContentBuilderString INDICES = new XContentBuilderString("indices"); - static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); - static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); - static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString NODE = new XContentBuilderString("node"); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); - for (Entry entry : entries) { - toXContent(entry, builder, params); - } - builder.endArray(); - return builder; - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); - builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); - builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); - builder.field(Fields.STATE, entry.state()); - builder.startArray(Fields.INDICES); - { - for (String index : entry.indices()) { - builder.value(index); - } - } - builder.endArray(); - builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); - builder.startArray(Fields.SHARDS); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardSnapshotStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field(Fields.INDEX, shardId.getIndex()); - builder.field(Fields.SHARD, shardId.getId()); - builder.field(Fields.STATE, status.state()); - builder.field(Fields.NODE, status.nodeId()); + @Override + public SnapshotMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + boolean includeGlobalState = in.readBoolean(); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); + } + long startTime = in.readLong(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + String nodeId = in.readOptionalString(); + State shardState = State.fromValue(in.readByte()); + builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); + } + entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + } + return new SnapshotMetaData(entries); + } + + @Override + public void writeTo(SnapshotMetaData repositories, StreamOutput out) throws IOException { + out.writeVInt(repositories.entries().size()); + for (Entry entry : repositories.entries()) { + entry.snapshotId().writeTo(out); + out.writeBoolean(entry.includeGlobalState()); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); + } + out.writeLong(entry.startTime()); + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + out.writeOptionalString(shardEntry.getValue().nodeId()); + out.writeByte(shardEntry.getValue().state().value()); } - builder.endObject(); } } - builder.endArray(); - builder.endObject(); + + @Override + public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + + static final class Fields { + static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); + static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); + static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); + static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString STATE = new XContentBuilderString("state"); + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); + static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString INDEX = new XContentBuilderString("index"); + static final XContentBuilderString SHARD = new XContentBuilderString("shard"); + static final XContentBuilderString NODE = new XContentBuilderString("node"); + } + + @Override + public void toXContent(SnapshotMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray(Fields.SNAPSHOTS); + for (Entry entry : customIndexMetaData.entries()) { + toXContent(entry, builder, params); + } + builder.endArray(); + } + + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); + builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); + builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.STATE, entry.state()); + builder.startArray(Fields.INDICES); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); + builder.startArray(Fields.SHARDS); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardSnapshotStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field(Fields.INDEX, shardId.getIndex()); + builder.field(Fields.SHARD, shardId.getId()); + builder.field(Fields.STATE, status.state()); + builder.field(Fields.NODE, status.nodeId()); + } + builder.endObject(); + } + } + builder.endArray(); + builder.endObject(); + } } + + } diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 61b5b876536..0a4986476e5 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -26,7 +26,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -46,10 +45,9 @@ import static com.google.common.collect.Lists.newArrayList; * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. */ -public class DiscoveryNodes extends AbstractDiffable implements Iterable { +public class DiscoveryNodes implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); - public static final DiscoveryNodes PROTO = EMPTY_NODES; private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; @@ -570,44 +568,6 @@ public class DiscoveryNodes extends AbstractDiffable implements } } - public void writeTo(StreamOutput out) throws IOException { - if (masterNodeId == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(masterNodeId); - } - out.writeVInt(nodes.size()); - for (DiscoveryNode node : this) { - node.writeTo(out); - } - } - - public DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { - Builder builder = new Builder(); - if (in.readBoolean()) { - builder.masterNodeId(in.readString()); - } - if (localNode != null) { - builder.localNodeId(localNode.id()); - } - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - DiscoveryNode node = DiscoveryNode.readNode(in); - if (localNode != null && node.id().equals(localNode.id())) { - // reuse the same instance of our address and local node id for faster equality - node = localNode; - } - builder.put(node); - } - return builder.build(); - } - - @Override - public DiscoveryNodes readFrom(StreamInput in) throws IOException { - return readFrom(in, localNode()); - } - public static Builder builder() { return new Builder(); } @@ -672,8 +632,37 @@ public class DiscoveryNodes extends AbstractDiffable implements return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion); } + public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException { + if (nodes.masterNodeId() == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(nodes.masterNodeId); + } + out.writeVInt(nodes.size()); + for (DiscoveryNode node : nodes) { + node.writeTo(out); + } + } + public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - return PROTO.readFrom(in, localNode); + Builder builder = new Builder(); + if (in.readBoolean()) { + builder.masterNodeId(in.readString()); + } + if (localNode != null) { + builder.localNodeId(localNode.id()); + } + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + DiscoveryNode node = DiscoveryNode.readNode(in); + if (localNode != null && node.id().equals(localNode.id())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + builder.put(node); + } + return builder.build(); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 239f5113781..5f0356d3572 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -26,7 +26,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -57,9 +56,7 @@ import static com.google.common.collect.Lists.newArrayList; * represented as {@link ShardRouting}. *

    */ -public class IndexRoutingTable extends AbstractDiffable implements Iterable { - - public static final IndexRoutingTable PROTO = builder("").build(); +public class IndexRoutingTable implements Iterable { private final String index; private final ShardShuffler shuffler; @@ -318,51 +315,9 @@ public class IndexRoutingTable extends AbstractDiffable imple return new GroupShardsIterator(set); } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - IndexRoutingTable that = (IndexRoutingTable) o; - - if (!index.equals(that.index)) return false; - if (!shards.equals(that.shards)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = index.hashCode(); - result = 31 * result + shards.hashCode(); - return result; - } - public void validate() throws RoutingValidationException { } - @Override - public IndexRoutingTable readFrom(StreamInput in) throws IOException { - String index = in.readString(); - Builder builder = new Builder(index); - - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); - } - - return builder.build(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); - out.writeVInt(shards.size()); - for (IndexShardRoutingTable indexShard : this) { - IndexShardRoutingTable.Builder.writeToThin(indexShard, out); - } - } - public static Builder builder(String index) { return new Builder(index); } @@ -384,7 +339,30 @@ public class IndexRoutingTable extends AbstractDiffable imple * @throws IOException if something happens during read */ public static IndexRoutingTable readFrom(StreamInput in) throws IOException { - return PROTO.readFrom(in); + String index = in.readString(); + Builder builder = new Builder(index); + + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + + /** + * Writes an {@link IndexRoutingTable} to a {@link StreamOutput}. + * + * @param index {@link IndexRoutingTable} to write + * @param out {@link StreamOutput} to write to + * @throws IOException if something happens during write + */ + public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException { + out.writeString(index.index()); + out.writeVInt(index.shards.size()); + for (IndexShardRoutingTable indexShard : index) { + IndexShardRoutingTable.Builder.writeToThin(indexShard, out); + } } /** diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 2371b96f5b0..00e50b76129 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -347,28 +347,6 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - IndexShardRoutingTable that = (IndexShardRoutingTable) o; - - if (primaryAllocatedPostApi != that.primaryAllocatedPostApi) return false; - if (!shardId.equals(that.shardId)) return false; - if (!shards.equals(that.shards)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = shardId.hashCode(); - result = 31 * result + shards.hashCode(); - result = 31 * result + (primaryAllocatedPostApi ? 1 : 0); - return result; - } - /** * Returns true iff all shards in the routing table are started otherwise false */ diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 25a8bac2f88..9f1b5db6c6b 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.google.common.collect.*; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,9 +44,7 @@ import static com.google.common.collect.Maps.newHashMap; * * @see IndexRoutingTable */ -public class RoutingTable implements Iterable, Diffable { - - public static RoutingTable PROTO = builder().build(); +public class RoutingTable implements Iterable { public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); @@ -256,66 +254,6 @@ public class RoutingTable implements Iterable, Diffable diff(RoutingTable previousState) { - return new RoutingTableDiff(previousState, this); - } - - @Override - public Diff readDiffFrom(StreamInput in) throws IOException { - return new RoutingTableDiff(in); - } - - @Override - public RoutingTable readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); - builder.add(index); - } - - return builder.build(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version); - out.writeVInt(indicesRouting.size()); - for (IndexRoutingTable index : indicesRouting.values()) { - index.writeTo(out); - } - } - - private static class RoutingTableDiff implements Diff { - - private final long version; - - private final Diff> indicesRouting; - - public RoutingTableDiff(RoutingTable before, RoutingTable after) { - version = after.version; - indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting); - } - - public RoutingTableDiff(StreamInput in) throws IOException { - version = in.readLong(); - indicesRouting = DiffableUtils.readImmutableMapDiff(in, IndexRoutingTable.PROTO); - } - - @Override - public RoutingTable apply(RoutingTable part) { - return new RoutingTable(version, indicesRouting.apply(part.indicesRouting)); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version); - indicesRouting.writeTo(out); - } - } - public static Builder builder() { return new Builder(); } @@ -465,11 +403,6 @@ public class RoutingTable implements Iterable, Diffable indicesRouting) { - this.indicesRouting.putAll(indicesRouting); - return this; - } - public Builder remove(String index) { indicesRouting.remove(index); return this; @@ -489,7 +422,23 @@ public class RoutingTable implements Iterable, Diffable, Diffable { - /** - * Reads a copy of an object with the same type form the stream input - * - * The caller object remains unchanged. - */ - T readFrom(StreamInput in) throws IOException; -} diff --git a/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java deleted file mode 100644 index 9025315dc43..00000000000 --- a/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io.stream; - -import java.io.IOException; - -public interface Writeable extends StreamableReader { - - /** - * Writes the current object into the output stream out - */ - void writeTo(StreamOutput out) throws IOException; -} diff --git a/src/main/java/org/elasticsearch/discovery/Discovery.java b/src/main/java/org/elasticsearch/discovery/Discovery.java index 36b8e5da6f5..dfd51e6348f 100644 --- a/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -60,7 +59,7 @@ public interface Discovery extends LifecycleComponent { * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ - void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener); + void publish(ClusterState clusterState, AckListener ackListener); public static interface AckListener { void onNodeAck(DiscoveryNode node, @Nullable Throwable t); diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index e6a3668921b..f73f2bbb593 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -133,9 +132,9 @@ public class DiscoveryService extends AbstractLifecycleComponent implem private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); - private volatile ClusterState lastProcessedClusterState; - @Inject public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) { @@ -280,7 +274,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { + public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { if (!master) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } @@ -293,7 +287,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } nodesToPublishTo.add(localDiscovery.localNode); } - publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(members, clusterState, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } } @@ -306,47 +300,17 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem return members.toArray(new LocalDiscovery[members.size()]); } - private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { + private void publish(LocalDiscovery[] members, ClusterState clusterState, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { try { // we do the marshaling intentionally, to check it works well... - byte[] clusterStateBytes = null; - byte[] clusterStateDiffBytes = null; + final byte[] clusterStateBytes = Builder.toBytes(clusterState); - ClusterState clusterState = clusterChangedEvent.state(); for (final LocalDiscovery discovery : members) { if (discovery.master) { continue; } - ClusterState newNodeSpecificClusterState = null; - synchronized (this) { - // we do the marshaling intentionally, to check it works well... - // check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time - if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) { - // both conditions are true - which means we can try sending cluster state as diffs - if (clusterStateDiffBytes == null) { - Diff diff = clusterState.diff(clusterChangedEvent.previousState()); - BytesStreamOutput os = new BytesStreamOutput(); - diff.writeTo(os); - clusterStateDiffBytes = os.bytes().toBytes(); - } - try { - newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(new BytesStreamInput(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); - logger.debug("sending diff cluster state version with size {} to [{}]", clusterStateDiffBytes.length, discovery.localNode.getName()); - } catch (IncompatibleClusterStateVersionException ex) { - logger.warn("incompatible cluster state version - resending complete cluster state", ex); - } - } - if (newNodeSpecificClusterState == null) { - if (clusterStateBytes == null) { - clusterStateBytes = Builder.toBytes(clusterState); - } - newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); - } - discovery.lastProcessedClusterState = newNodeSpecificClusterState; - } - final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; - + final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); // ignore cluster state messages that do not include "me", not in the game yet... if (nodeSpecificClusterState.nodes().localNode() != null) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 8a6382dcf94..9ad4945844a 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -331,12 +331,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Override - public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { - if (!clusterChangedEvent.state().getNodes().localNodeMaster()) { + public void publish(ClusterState clusterState, AckListener ackListener) { + if (!clusterState.getNodes().localNodeMaster()) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } - nodesFD.updateNodesAndPing(clusterChangedEvent.state()); - publishClusterState.publish(clusterChangedEvent, ackListener); + nodesFD.updateNodesAndPing(clusterState); + publishClusterState.publish(clusterState, ackListener); } /** diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index c4ad8895e79..fd1ba85c25c 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -21,12 +21,8 @@ package org.elasticsearch.discovery.zen.publish; import com.google.common.collect.Maps; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; @@ -44,13 +40,10 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; -import java.io.IOException; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; /** * @@ -90,43 +83,73 @@ public class PublishClusterStateAction extends AbstractComponent { transportService.removeHandler(ACTION_NAME); } - public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { - Set nodesToPublishTo = new HashSet<>(clusterChangedEvent.state().nodes().size()); + public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { + Set nodesToPublishTo = new HashSet<>(clusterState.nodes().size()); DiscoveryNode localNode = nodesProvider.nodes().localNode(); - for (final DiscoveryNode node : clusterChangedEvent.state().nodes()) { + for (final DiscoveryNode node : clusterState.nodes()) { if (node.equals(localNode)) { continue; } nodesToPublishTo.add(node); } - publish(clusterChangedEvent, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(clusterState, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } - private void publish(final ClusterChangedEvent clusterChangedEvent, final Set nodesToPublishTo, + private void publish(final ClusterState clusterState, final Set nodesToPublishTo, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { Map serializedStates = Maps.newHashMap(); - Map serializedDiffs = Maps.newHashMap(); - final ClusterState clusterState = clusterChangedEvent.state(); - final ClusterState previousState = clusterChangedEvent.previousState(); final AtomicBoolean timedOutWaitingForNodes = new AtomicBoolean(false); final TimeValue publishTimeout = discoverySettings.getPublishTimeout(); - final boolean sendFullVersion = !discoverySettings.getPublishDiff() || previousState == null; - Diff diff = null; for (final DiscoveryNode node : nodesToPublishTo) { // try and serialize the cluster state once (or per version), so we don't serialize it // per node when we send it over the wire, compress it while we are at it... - // we don't send full version if node didn't exist in the previous version of cluster state - if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) { - sendFullClusterState(clusterState, serializedStates, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); - } else { - if (diff == null) { - diff = clusterState.diff(previousState); + BytesReference bytes = serializedStates.get(node.version()); + if (bytes == null) { + try { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(node.version()); + ClusterState.Builder.writeTo(clusterState, stream); + stream.close(); + bytes = bStream.bytes(); + serializedStates.put(node.version(), bytes); + } catch (Throwable e) { + logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + continue; } - sendClusterStateDiff(clusterState, diff, serializedDiffs, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } + try { + TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); + // no need to put a timeout on the options here, because we want the response to eventually be received + // and not log an error if it arrives after the timeout + transportService.sendRequest(node, ACTION_NAME, + new BytesTransportRequest(bytes, node.version()), + options, // no need to compress, we already compressed the bytes + + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + + @Override + public void handleResponse(TransportResponse.Empty response) { + if (timedOutWaitingForNodes.get()) { + logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); + } + publishResponseHandler.onResponse(node); + } + + @Override + public void handleException(TransportException exp) { + logger.debug("failed to send cluster state to {}", exp, node); + publishResponseHandler.onFailure(node, exp); + } + }); + } catch (Throwable t) { + logger.debug("error sending cluster state to {}", t, node); + publishResponseHandler.onFailure(node, t); } } @@ -148,107 +171,7 @@ public class PublishClusterStateAction extends AbstractComponent { } } - private void sendFullClusterState(ClusterState clusterState, @Nullable Map serializedStates, - DiscoveryNode node, AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, - BlockingClusterStatePublishResponseHandler publishResponseHandler) { - BytesReference bytes = null; - if (serializedStates != null) { - bytes = serializedStates.get(node.version()); - } - if (bytes == null) { - try { - bytes = serializeFullClusterState(clusterState, node.version()); - if (serializedStates != null) { - serializedStates.put(node.version(), bytes); - } - } catch (Throwable e) { - logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); - publishResponseHandler.onFailure(node, e); - return; - } - } - publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, false); - } - - private void sendClusterStateDiff(ClusterState clusterState, Diff diff, Map serializedDiffs, DiscoveryNode node, - AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, - BlockingClusterStatePublishResponseHandler publishResponseHandler) { - BytesReference bytes = serializedDiffs.get(node.version()); - if (bytes == null) { - try { - bytes = serializeDiffClusterState(diff, node.version()); - serializedDiffs.put(node.version(), bytes); - } catch (Throwable e) { - logger.warn("failed to serialize diff of cluster_state before publishing it to node {}", e, node); - publishResponseHandler.onFailure(node, e); - return; - } - } - publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, true); - } - - private void publishClusterStateToNode(final ClusterState clusterState, BytesReference bytes, - final DiscoveryNode node, final AtomicBoolean timedOutWaitingForNodes, - final TimeValue publishTimeout, - final BlockingClusterStatePublishResponseHandler publishResponseHandler, - final boolean sendDiffs) { - try { - TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); - // no need to put a timeout on the options here, because we want the response to eventually be received - // and not log an error if it arrives after the timeout - transportService.sendRequest(node, ACTION_NAME, - new BytesTransportRequest(bytes, node.version()), - options, // no need to compress, we already compressed the bytes - - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - - @Override - public void handleResponse(TransportResponse.Empty response) { - if (timedOutWaitingForNodes.get()) { - logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); - } - publishResponseHandler.onResponse(node); - } - - @Override - public void handleException(TransportException exp) { - if (sendDiffs && exp.unwrapCause() instanceof IncompatibleClusterStateVersionException) { - logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); - sendFullClusterState(clusterState, null, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); - } else { - logger.debug("failed to send cluster state to {}", exp, node); - publishResponseHandler.onFailure(node, exp); - } - } - }); - } catch (Throwable t) { - logger.warn("error sending cluster state to {}", t, node); - publishResponseHandler.onFailure(node, t); - } - } - - public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(nodeVersion); - stream.writeBoolean(true); - clusterState.writeTo(stream); - stream.close(); - return bStream.bytes(); - } - - public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(nodeVersion); - stream.writeBoolean(false); - diff.writeTo(stream); - stream.close(); - return bStream.bytes(); - } - private class PublishClusterStateRequestHandler implements TransportRequestHandler { - private ClusterState lastSeenClusterState; @Override public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { @@ -260,24 +183,11 @@ public class PublishClusterStateAction extends AbstractComponent { in = request.bytes().streamInput(); } in.setVersion(request.version()); - synchronized (this) { - // If true we received full cluster state - otherwise diffs - if (in.readBoolean()) { - lastSeenClusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); - logger.debug("received full cluster state version {} with size {}", lastSeenClusterState.version(), request.bytes().length()); - } else if (lastSeenClusterState != null) { - Diff diff = lastSeenClusterState.readDiffFrom(in); - lastSeenClusterState = diff.apply(lastSeenClusterState); - logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length()); - } else { - logger.debug("received diff for but don't have any local cluster state - requesting full state"); - throw new IncompatibleClusterStateVersionException("have no local cluster state"); - } - lastSeenClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - } - + ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + clusterState.status(ClusterState.ClusterStateStatus.RECEIVED); + logger.debug("received cluster state version {}", clusterState.version()); try { - listener.onNewClusterState(lastSeenClusterState, new NewClusterStateListener.NewStateProcessed() { + listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { @Override public void onNewClusterStateProcessed() { try { @@ -297,7 +207,7 @@ public class PublishClusterStateAction extends AbstractComponent { } }); } catch (Exception e) { - logger.warn("unexpected error while processing cluster state version [{}]", e, lastSeenClusterState.version()); + logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version()); try { channel.sendResponse(e); } catch (Throwable e1) { diff --git a/src/main/java/org/elasticsearch/gateway/Gateway.java b/src/main/java/org/elasticsearch/gateway/Gateway.java index 139b5763489..cd15bccdc4a 100644 --- a/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; - +import org.elasticsearch.indices.IndicesService; import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 5538ef6d043..43dec7edb51 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -198,7 +198,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode.writeTo(out); out.writeVInt(indices.length); for (IndexMetaData indexMetaData : indices) { - indexMetaData.writeTo(out); + IndexMetaData.Builder.writeTo(indexMetaData, out); } } } diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index d7334046de0..77ab900ce90 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -221,7 +221,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA out.writeBoolean(false); } else { out.writeBoolean(true); - metaData.writeTo(out); + MetaData.Builder.writeTo(metaData, out); } } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java index 85b46925b5f..be4e1b4e3f3 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java @@ -58,7 +58,7 @@ public class RestGetRepositoriesAction extends BaseRestHandler { public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { builder.startObject(); for (RepositoryMetaData repositoryMetaData : response.repositories()) { - RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); + RepositoriesMetaData.FACTORY.toXContent(repositoryMetaData, builder, request); } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index d2653bc1745..dc800f37062 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -147,7 +147,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { - IndexWarmersMetaData.toXContent(warmer, builder, params); + IndexWarmersMetaData.FACTORY.toXContent(warmer, builder, params); } } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index be83ccbe4b5..7023eecedd4 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -72,7 +72,7 @@ public class RestGetWarmerAction extends BaseRestHandler { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { - IndexWarmersMetaData.toXContent(warmerEntry, builder, request); + IndexWarmersMetaData.FACTORY.toXContent(warmerEntry, builder, request); } builder.endObject(); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java index ef1ef44ffb9..de56f823eac 100644 --- a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java @@ -22,9 +22,7 @@ package org.elasticsearch.search.warmer; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.Version; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -35,33 +33,16 @@ import org.elasticsearch.common.xcontent.*; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; /** */ -public class IndexWarmersMetaData extends AbstractDiffable implements IndexMetaData.Custom { +public class IndexWarmersMetaData implements IndexMetaData.Custom { public static final String TYPE = "warmers"; - public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData(); - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - IndexWarmersMetaData that = (IndexWarmersMetaData) o; - - return entries.equals(that.entries); - - } - - @Override - public int hashCode() { - return entries.hashCode(); - } + public static final Factory FACTORY = new Factory(); public static class Entry { private final String name; @@ -93,29 +74,6 @@ public class IndexWarmersMetaData extends AbstractDiffable public Boolean queryCache() { return this.queryCache; } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Entry entry = (Entry) o; - - if (!name.equals(entry.name)) return false; - if (!Arrays.equals(types, entry.types)) return false; - if (!source.equals(entry.source)) return false; - return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null); - - } - - @Override - public int hashCode() { - int result = name.hashCode(); - result = 31 * result + Arrays.hashCode(types); - result = 31 * result + source.hashCode(); - result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0); - return result; - } } private final ImmutableList entries; @@ -134,143 +92,149 @@ public class IndexWarmersMetaData extends AbstractDiffable return TYPE; } - @Override - public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - String name = in.readString(); - String[] types = in.readStringArray(); - BytesReference source = null; - if (in.readBoolean()) { - source = in.readBytesReference(); - } - Boolean queryCache; - queryCache = in.readOptionalBoolean(); - entries[i] = new Entry(name, types, queryCache, source); - } - return new IndexWarmersMetaData(entries); - } + public static class Factory implements IndexMetaData.Custom.Factory { - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(entries().size()); - for (Entry entry : entries()) { - out.writeString(entry.name()); - out.writeStringArray(entry.types()); - if (entry.source() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeBytesReference(entry.source()); - } - out.writeOptionalBoolean(entry.queryCache()); + @Override + public String type() { + return TYPE; } - } - @Override - public IndexWarmersMetaData fromMap(Map map) throws IOException { - // if it starts with the type, remove it - if (map.size() == 1 && map.containsKey(TYPE)) { - map = (Map) map.values().iterator().next(); - } - XContentBuilder builder = XContentFactory.smileBuilder().map(map); - try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { - // move to START_OBJECT - parser.nextToken(); - return fromXContent(parser); - } - } - - @Override - public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { - // we get here after we are at warmers token - String currentFieldName = null; - XContentParser.Token token; - List entries = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - String name = currentFieldName; - List types = new ArrayList<>(2); + @Override + public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + String name = in.readString(); + String[] types = in.readStringArray(); BytesReference source = null; + if (in.readBoolean()) { + source = in.readBytesReference(); + } Boolean queryCache = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if ("types".equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - types.add(parser.text()); + queryCache = in.readOptionalBoolean(); + entries[i] = new Entry(name, types, queryCache, source); + } + return new IndexWarmersMetaData(entries); + } + + @Override + public void writeTo(IndexWarmersMetaData warmers, StreamOutput out) throws IOException { + out.writeVInt(warmers.entries().size()); + for (Entry entry : warmers.entries()) { + out.writeString(entry.name()); + out.writeStringArray(entry.types()); + if (entry.source() == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeBytesReference(entry.source()); + } + out.writeOptionalBoolean(entry.queryCache()); + } + } + + @Override + public IndexWarmersMetaData fromMap(Map map) throws IOException { + // if it starts with the type, remove it + if (map.size() == 1 && map.containsKey(TYPE)) { + map = (Map) map.values().iterator().next(); + } + XContentBuilder builder = XContentFactory.smileBuilder().map(map); + try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { + // move to START_OBJECT + parser.nextToken(); + return fromXContent(parser); + } + } + + @Override + public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { + // we get here after we are at warmers token + String currentFieldName = null; + XContentParser.Token token; + List entries = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + String name = currentFieldName; + List types = new ArrayList<>(2); + BytesReference source = null; + Boolean queryCache = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if ("types".equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + types.add(parser.text()); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + if ("source".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); + source = builder.bytes(); + } + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + if ("source".equals(currentFieldName)) { + source = new BytesArray(parser.binaryValue()); + } + } else if (token.isValue()) { + if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { + queryCache = parser.booleanValue(); } } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("source".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); - source = builder.bytes(); - } - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - if ("source".equals(currentFieldName)) { - source = new BytesArray(parser.binaryValue()); - } - } else if (token.isValue()) { - if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { - queryCache = parser.booleanValue(); - } + } + entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } + + @Override + public void toXContent(IndexWarmersMetaData warmers, XContentBuilder builder, ToXContent.Params params) throws IOException { + //No need, IndexMetaData already writes it + //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); + for (Entry entry : warmers.entries()) { + toXContent(entry, builder, params); + } + //No need, IndexMetaData already writes it + //builder.endObject(); + } + + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + boolean binary = params.paramAsBoolean("binary", false); + builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("types", entry.types()); + if (entry.queryCache() != null) { + builder.field("queryCache", entry.queryCache()); + } + builder.field("source"); + if (binary) { + builder.value(entry.source()); + } else { + Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); + builder.map(mapping); + } + builder.endObject(); + } + + @Override + public IndexWarmersMetaData merge(IndexWarmersMetaData first, IndexWarmersMetaData second) { + List entries = Lists.newArrayList(); + entries.addAll(first.entries()); + for (Entry secondEntry : second.entries()) { + boolean found = false; + for (Entry firstEntry : first.entries()) { + if (firstEntry.name().equals(secondEntry.name())) { + found = true; + break; } } - entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - //No need, IndexMetaData already writes it - //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (Entry entry : entries()) { - toXContent(entry, builder, params); - } - //No need, IndexMetaData already writes it - //builder.endObject(); - return builder; - } - - public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - boolean binary = params.paramAsBoolean("binary", false); - builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("types", entry.types()); - if (entry.queryCache() != null) { - builder.field("queryCache", entry.queryCache()); - } - builder.field("source"); - if (binary) { - builder.value(entry.source()); - } else { - Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); - builder.map(mapping); - } - builder.endObject(); - } - - @Override - public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) { - IndexWarmersMetaData second = (IndexWarmersMetaData) other; - List entries = Lists.newArrayList(); - entries.addAll(entries()); - for (Entry secondEntry : second.entries()) { - boolean found = false; - for (Entry firstEntry : entries()) { - if (firstEntry.name().equals(secondEntry.name())) { - found = true; - break; + if (!found) { + entries.add(secondEntry); } } - if (!found) { - entries.add(secondEntry); - } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); } } diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java deleted file mode 100644 index 33008fd63d2..00000000000 --- a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; -import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; -import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.local.LocalTransport; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static com.google.common.collect.Maps.newHashMap; -import static org.hamcrest.Matchers.*; - -public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { - - protected ThreadPool threadPool; - protected Map nodes = newHashMap(); - - public static class MockNode { - public final DiscoveryNode discoveryNode; - public final MockTransportService service; - public final PublishClusterStateAction action; - public final MockDiscoveryNodesProvider nodesProvider; - - public MockNode(DiscoveryNode discoveryNode, MockTransportService service, PublishClusterStateAction action, MockDiscoveryNodesProvider nodesProvider) { - this.discoveryNode = discoveryNode; - this.service = service; - this.action = action; - this.nodesProvider = nodesProvider; - } - - public void connectTo(DiscoveryNode node) { - service.connectToNode(node); - nodesProvider.addNode(node); - } - } - - public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception { - return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid()); - newStateProcessed.onNewClusterStateProcessed(); - } - }); - } - - public MockNode createMockNode(String name, Settings settings, Version version, PublishClusterStateAction.NewClusterStateListener listener) throws Exception { - MockTransportService service = buildTransportService( - ImmutableSettings.builder().put(settings).put("name", name, TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), - version - ); - DiscoveryNode discoveryNode = new DiscoveryNode(name, name, service.boundAddress().publishAddress(), ImmutableMap.of(), version); - MockDiscoveryNodesProvider nodesProvider = new MockDiscoveryNodesProvider(discoveryNode); - PublishClusterStateAction action = buildPublishClusterStateAction(settings, service, nodesProvider, listener); - MockNode node = new MockNode(discoveryNode, service, action, nodesProvider); - nodesProvider.addNode(discoveryNode); - final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); - TransportConnectionListener waitForConnection = new TransportConnectionListener() { - @Override - public void onNodeConnected(DiscoveryNode node) { - latch.countDown(); - } - - @Override - public void onNodeDisconnected(DiscoveryNode node) { - fail("disconnect should not be called " + node); - } - }; - node.service.addConnectionListener(waitForConnection); - for (MockNode curNode : nodes.values()) { - curNode.service.addConnectionListener(waitForConnection); - curNode.connectTo(node.discoveryNode); - node.connectTo(curNode.discoveryNode); - } - node.connectTo(node.discoveryNode); - assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true)); - for (MockNode curNode : nodes.values()) { - curNode.service.removeConnectionListener(waitForConnection); - } - node.service.removeConnectionListener(waitForConnection); - if (nodes.put(name, node) != null) { - fail("Node with the name " + name + " already exist"); - } - return node; - } - - public MockTransportService service(String name) { - MockNode node = nodes.get(name); - if (node != null) { - return node.service; - } - return null; - } - - public PublishClusterStateAction action(String name) { - MockNode node = nodes.get(name); - if (node != null) { - return node.action; - } - return null; - } - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - threadPool = new ThreadPool(getClass().getName()); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - for (MockNode curNode : nodes.values()) { - curNode.action.close(); - curNode.service.close(); - } - terminate(threadPool); - } - - protected MockTransportService buildTransportService(Settings settings, Version version) { - MockTransportService transportService = new MockTransportService(settings, new LocalTransport(settings, threadPool, version), threadPool); - transportService.start(); - return transportService; - } - - protected PublishClusterStateAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, MockDiscoveryNodesProvider nodesProvider, - PublishClusterStateAction.NewClusterStateListener listener) { - DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); - return new PublishClusterStateAction(settings, transportService, nodesProvider, listener, discoverySettings); - } - - - static class MockDiscoveryNodesProvider implements DiscoveryNodesProvider { - - private DiscoveryNodes discoveryNodes = DiscoveryNodes.EMPTY_NODES; - - public MockDiscoveryNodesProvider(DiscoveryNode localNode) { - discoveryNodes = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build(); - } - - public void addNode(DiscoveryNode node) { - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(node).build(); - } - - @Override - public DiscoveryNodes nodes() { - return discoveryNodes; - } - - @Override - public NodeService nodeService() { - assert false; - throw new UnsupportedOperationException("Shouldn't be here"); - } - } - - - @Test - @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") - public void testSimpleClusterStatePublishing() throws Exception { - MockNewClusterStateListener mockListenerA = new MockNewClusterStateListener(); - MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerA); - - MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); - MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); - - // Initial cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - - // cluster state update - add nodeB - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - add block - previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - assertThat(clusterState.blocks().global().size(), equalTo(1)); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - remove block - previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - assertThat(clusterState.blocks().global().size(), equalTo(0)); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // Adding new node - this node should get full cluster state while nodeB should still be getting diffs - - MockNewClusterStateListener mockListenerC = new MockNewClusterStateListener(); - MockNode nodeC = createMockNode("nodeC", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerC); - - // cluster state update 3 - register node C - previousClusterState = clusterState; - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); - clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - assertThat(clusterState.blocks().global().size(), equalTo(0)); - } - }); - mockListenerC.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - // First state - assertFalse(clusterState.wasReadFromDiff()); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update 4 - update settings - previousClusterState = clusterState; - MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(ImmutableSettings.settingsBuilder().put("foo", "bar").build()).build(); - clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); - NewClusterStateExpectation expectation = new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - assertThat(clusterState.blocks().global().size(), equalTo(0)); - } - }; - mockListenerB.add(expectation); - mockListenerC.add(expectation); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - skipping one version change - should request full cluster state - previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); - clusterState = ClusterState.builder(clusterState).incrementVersion().build(); - expectation = new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }; - mockListenerB.add(expectation); - mockListenerC.add(expectation); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - skipping one version change - should request full cluster state - previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); - clusterState = ClusterState.builder(clusterState).incrementVersion().build(); - expectation = new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }; - mockListenerB.add(expectation); - mockListenerC.add(expectation); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // node B becomes the master and sends a version of the cluster state that goes back - discoveryNodes = DiscoveryNodes.builder(discoveryNodes) - .put(nodeA.discoveryNode) - .put(nodeB.discoveryNode) - .put(nodeC.discoveryNode) - .build(); - previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); - expectation = new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }; - mockListenerA.add(expectation); - mockListenerC.add(expectation); - publishStateDiffAndWait(nodeB.action, clusterState, previousClusterState); - } - - @Test - @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") - public void testUnexpectedDiffPublishing() throws Exception { - - MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - fail("Shouldn't send cluster state to myself"); - } - }); - - MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); - MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); - - // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); - ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - add block - previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - } - - @Test - @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") - public void testDisablingDiffPublishing() throws Exception { - Settings noDiffPublishingSettings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); - - MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - fail("Shouldn't send cluster state to myself"); - } - }); - - MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff()); - assertFalse(clusterState.wasReadFromDiff()); - newStateProcessed.onNewClusterStateProcessed(); - } - }); - - // Initial cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - - // cluster state update - add nodeB - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); - ClusterState previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - add block - previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - } - - - @Test - @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") - public void testSimultaneousClusterStatePublishing() throws Exception { - int numberOfNodes = randomIntBetween(2, 10); - int numberOfIterations = randomIntBetween(50, 200); - Settings settings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "100ms").put(DiscoverySettings.PUBLISH_DIFF_ENABLE, true).build(); - MockNode[] nodes = new MockNode[numberOfNodes]; - DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); - for (int i = 0; i < nodes.length; i++) { - final String name = "node" + i; - nodes[i] = createMockNode(name, settings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public synchronized void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - assertProperMetaDataForVersion(clusterState.metaData(), clusterState.version()); - if (randomInt(10) < 2) { - // Cause timeouts from time to time - try { - Thread.sleep(randomInt(110)); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - } - newStateProcessed.onNewClusterStateProcessed(); - } - }); - discoveryNodesBuilder.put(nodes[i].discoveryNode); - } - - AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; - DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); - MetaData metaData = MetaData.EMPTY_META_DATA; - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(metaData).build(); - ClusterState previousState; - for (int i = 0; i < numberOfIterations; i++) { - previousState = clusterState; - metaData = buildMetaDataForVersion(metaData, i + 1); - clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build(); - listeners[i] = publishStateDiff(nodes[0].action, clusterState, previousState); - } - - for (int i = 0; i < numberOfIterations; i++) { - listeners[i].await(1, TimeUnit.SECONDS); - } - } - - @Test - @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") - public void testSerializationFailureDuringDiffPublishing() throws Exception { - - MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - fail("Shouldn't send cluster state to myself"); - } - }); - - MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); - MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); - - // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); - ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertFalse(clusterState.wasReadFromDiff()); - } - }); - publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); - - // cluster state update - add block - previousClusterState = clusterState; - clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); - mockListenerB.add(new NewClusterStateExpectation() { - @Override - public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { - assertTrue(clusterState.wasReadFromDiff()); - } - }); - - ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) { - @Override - public Diff diff(ClusterState previousState) { - return new Diff() { - @Override - public ClusterState apply(ClusterState part) { - fail("this diff shouldn't be applied"); - return part; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new IOException("Simulated failure of diff serialization"); - } - }; - } - }; - List> errors = publishStateDiff(nodeA.action, unserializableClusterState, previousClusterState).awaitErrors(1, TimeUnit.SECONDS); - assertThat(errors.size(), equalTo(1)); - assertThat(errors.get(0).v2().getMessage(), containsString("Simulated failure of diff serialization")); - } - - private MetaData buildMetaDataForVersion(MetaData metaData, long version) { - ImmutableOpenMap.Builder indices = ImmutableOpenMap.builder(metaData.indices()); - indices.put("test" + version, IndexMetaData.builder("test" + version).settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards((int) version).numberOfReplicas(0).build()); - return MetaData.builder(metaData) - .transientSettings(ImmutableSettings.builder().put("test", version).build()) - .indices(indices.build()) - .build(); - } - - private void assertProperMetaDataForVersion(MetaData metaData, long version) { - for (long i = 1; i <= version; i++) { - assertThat(metaData.index("test" + i), notNullValue()); - assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i)); - } - assertThat(metaData.index("test" + (version + 1)), nullValue()); - assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); - } - - public void publishStateDiffAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { - publishStateDiff(action, state, previousState).await(1, TimeUnit.SECONDS); - } - - public AssertingAckListener publishStateDiff(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { - AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); - ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); - action.publish(changedEvent, assertingAckListener); - return assertingAckListener; - } - - public static class AssertingAckListener implements Discovery.AckListener { - private final List> errors = new CopyOnWriteArrayList<>(); - private final AtomicBoolean timeoutOccured = new AtomicBoolean(); - private final CountDownLatch countDown; - - public AssertingAckListener(int nodeCount) { - countDown = new CountDownLatch(nodeCount); - } - - @Override - public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { - if (t != null) { - errors.add(new Tuple<>(node, t)); - } - countDown.countDown(); - } - - @Override - public void onTimeout() { - timeoutOccured.set(true); - // Fast forward the counter - no reason to wait here - long currentCount = countDown.getCount(); - for (long i = 0; i < currentCount; i++) { - countDown.countDown(); - } - } - - public void await(long timeout, TimeUnit unit) throws InterruptedException { - assertThat(awaitErrors(timeout, unit), emptyIterable()); - } - - public List> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { - countDown.await(timeout, unit); - assertFalse(timeoutOccured.get()); - return errors; - } - - } - - public interface NewClusterStateExpectation { - void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed); - } - - public static class MockNewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener { - CopyOnWriteArrayList expectations = new CopyOnWriteArrayList(); - - @Override - public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { - final NewClusterStateExpectation expectation; - try { - expectation = expectations.remove(0); - } catch (ArrayIndexOutOfBoundsException ex) { - fail("Unexpected cluster state update " + clusterState.prettyPrint()); - return; - } - expectation.check(clusterState, newStateProcessed); - newStateProcessed.onNewClusterStateProcessed(); - } - - public void add(NewClusterStateExpectation expectation) { - expectations.add(expectation); - } - } - - public static class DelegatingClusterState extends ClusterState { - - public DelegatingClusterState(ClusterState clusterState) { - super(clusterState.version(), clusterState.uuid(), clusterState); - } - - - } - -} diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java deleted file mode 100644 index 84df1eaf209..00000000000 --- a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java +++ /dev/null @@ -1,534 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.*; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.*; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.BytesStreamInput; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.index.query.FilterBuilders; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.warmer.IndexWarmersMetaData; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import java.util.List; - -import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; -import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; -import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; -import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.equalTo; - - -@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) -public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { - - @Test - public void testClusterStateDiffSerialization() throws Exception { - DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), Version.CURRENT); - DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); - ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); - - int iterationCount = randomIntBetween(10, 300); - for (int iteration = 0; iteration < iterationCount; iteration++) { - ClusterState previousClusterState = clusterState; - ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs; - int changesCount = randomIntBetween(1, 4); - ClusterState.Builder builder = null; - for (int i = 0; i < changesCount; i++) { - if (i > 0) { - clusterState = builder.build(); - } - switch (randomInt(4)) { - case 0: - builder = randomNodes(clusterState); - break; - case 1: - builder = randomRoutingTable(clusterState); - break; - case 2: - builder = randomBlocks(clusterState); - break; - case 3: - case 4: - builder = randomMetaDataChanges(clusterState); - break; - default: - throw new IllegalArgumentException("Shouldn't be here"); - } - } - clusterState = builder.incrementVersion().build(); - - if (randomIntBetween(0, 10) < 1) { - // Update cluster state via full serialization from time to time - clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().localNode()); - } else { - // Update cluster states using diffs - Diff diffBeforeSerialization = clusterState.diff(previousClusterState); - BytesStreamOutput os = new BytesStreamOutput(); - diffBeforeSerialization.writeTo(os); - byte[] diffBytes = os.bytes().toBytes(); - Diff diff; - try (BytesStreamInput input = new BytesStreamInput(diffBytes)) { - diff = previousClusterStateFromDiffs.readDiffFrom(input); - clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs); - } - } - - - try { - // Check non-diffable elements - assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version())); - assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid())); - - // Check nodes - assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); - assertThat(clusterStateFromDiffs.nodes().localNodeId(), equalTo(previousClusterStateFromDiffs.nodes().localNodeId())); - assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); - for (ObjectCursor node : clusterStateFromDiffs.nodes().nodes().keys()) { - DiscoveryNode node1 = clusterState.nodes().get(node.value); - DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value); - assertThat(node1.version(), equalTo(node2.version())); - assertThat(node1.address(), equalTo(node2.address())); - assertThat(node1.attributes(), equalTo(node2.attributes())); - } - - // Check routing table - assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version())); - assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting())); - - // Check cluster blocks - assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); - assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); - assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); - - // Check metadata - assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); - assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid())); - assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings())); - assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings())); - assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices())); - assertThat(clusterStateFromDiffs.metaData().templates(), equalTo(clusterState.metaData().templates())); - assertThat(clusterStateFromDiffs.metaData().customs(), equalTo(clusterState.metaData().customs())); - assertThat(clusterStateFromDiffs.metaData().aliases(), equalTo(clusterState.metaData().aliases())); - - // JSON Serialization test - make sure that both states produce similar JSON - assertThat(mapsEqualIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)), equalTo(true)); - - // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order - // however, serialized size should remain the same - assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); - } catch (AssertionError error) { - logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); - throw error; - } - } - - logger.info("Final cluster state:[{}]", clusterState.toString()); - - } - - private ClusterState.Builder randomNodes(ClusterState clusterState) { - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().nodes().size() - 1), clusterState.nodes().nodes().keys().toArray(String.class)); - for (String nodeId : nodeIds) { - if (nodeId.startsWith("node-")) { - if (randomBoolean()) { - nodes.remove(nodeId); - } else { - nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); - } - } - } - int additionalNodeCount = randomIntBetween(1, 20); - for (int i = 0; i < additionalNodeCount; i++) { - nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); - } - return ClusterState.builder(clusterState).nodes(nodes); - } - - private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { - RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); - int numberOfIndices = clusterState.routingTable().indicesRouting().size(); - if (numberOfIndices > 0) { - List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keySet().toArray(new String[numberOfIndices])); - for (String index : randomIndices) { - if (randomBoolean()) { - builder.remove(index); - } else { - builder.add(randomIndexRoutingTable(index, clusterState.nodes().nodes().keys().toArray(String.class))); - } - } - } - int additionalIndexCount = randomIntBetween(1, 20); - for (int i = 0; i < additionalIndexCount; i++) { - builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().nodes().keys().toArray(String.class))); - } - return ClusterState.builder(clusterState).routingTable(builder.build()); - } - - private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds) { - IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index); - int shardCount = randomInt(10); - - for (int i = 0; i < shardCount; i++) { - IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, i), randomBoolean()); - int replicaCount = randomIntBetween(1, 10); - for (int j = 0; j < replicaCount; j++) { - indexShard.addShard( - new MutableShardRouting(index, i, randomFrom(nodeIds), j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(1, 4)), 1)); - } - builder.addIndexShard(indexShard.build()); - } - return builder.build(); - } - - private ClusterState.Builder randomBlocks(ClusterState clusterState) { - ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); - int globalBlocksCount = clusterState.blocks().global().size(); - if (globalBlocksCount > 0) { - List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); - for (ClusterBlock block : blocks) { - builder.removeGlobalBlock(block); - } - } - int additionalGlobalBlocksCount = randomIntBetween(1, 3); - for (int i = 0; i < additionalGlobalBlocksCount; i++) { - builder.addGlobalBlock(randomGlobalBlock()); - } - return ClusterState.builder(clusterState).blocks(builder); - } - - private ClusterBlock randomGlobalBlock() { - switch (randomInt(2)) { - case 0: - return DiscoverySettings.NO_MASTER_BLOCK_ALL; - case 1: - return DiscoverySettings.NO_MASTER_BLOCK_WRITES; - default: - return GatewayService.STATE_NOT_RECOVERED_BLOCK; - } - } - - private ClusterState.Builder randomMetaDataChanges(ClusterState clusterState) { - MetaData metaData = clusterState.metaData(); - int changesCount = randomIntBetween(1, 10); - for (int i = 0; i < changesCount; i++) { - switch (randomInt(3)) { - case 0: - metaData = randomMetaDataSettings(metaData); - break; - case 1: - metaData = randomIndices(metaData); - break; - case 2: - metaData = randomTemplates(metaData); - break; - case 3: - metaData = randomMetaDataCustoms(metaData); - break; - default: - throw new IllegalArgumentException("Shouldn't be here"); - } - } - return ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).version(metaData.version() + 1).build()); - } - - private Settings randomSettings(Settings settings) { - ImmutableSettings.Builder builder = ImmutableSettings.builder(); - if (randomBoolean()) { - builder.put(settings); - } - int settingsCount = randomInt(10); - for (int i = 0; i < settingsCount; i++) { - builder.put(randomAsciiOfLength(10), randomAsciiOfLength(10)); - } - return builder.build(); - - } - - private MetaData randomMetaDataSettings(MetaData metaData) { - if (randomBoolean()) { - return MetaData.builder(metaData).persistentSettings(randomSettings(metaData.persistentSettings())).build(); - } else { - return MetaData.builder(metaData).transientSettings(randomSettings(metaData.transientSettings())).build(); - } - } - - private interface RandomPart { - /** - * Returns list of parts from metadata - */ - ImmutableOpenMap parts(MetaData metaData); - - /** - * Puts the part back into metadata - */ - MetaData.Builder put(MetaData.Builder builder, T part); - - /** - * Remove the part from metadata - */ - MetaData.Builder remove(MetaData.Builder builder, String name); - - /** - * Returns a random part with the specified name - */ - T randomCreate(String name); - - /** - * Makes random modifications to the part - */ - T randomChange(T part); - - } - - private MetaData randomParts(MetaData metaData, String prefix, RandomPart randomPart) { - MetaData.Builder builder = MetaData.builder(metaData); - ImmutableOpenMap parts = randomPart.parts(metaData); - int partCount = parts.size(); - if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); - for (String part : randomParts) { - if (randomBoolean()) { - randomPart.remove(builder, part); - } else { - randomPart.put(builder, randomPart.randomChange(parts.get(part))); - } - } - } - int additionalPartCount = randomIntBetween(1, 20); - for (int i = 0; i < additionalPartCount; i++) { - String name = randomName(prefix); - randomPart.put(builder, randomPart.randomCreate(name)); - } - return builder.build(); - } - - private MetaData randomIndices(MetaData metaData) { - return randomParts(metaData, "index", new RandomPart() { - - @Override - public ImmutableOpenMap parts(MetaData metaData) { - return metaData.indices(); - } - - @Override - public MetaData.Builder put(MetaData.Builder builder, IndexMetaData part) { - return builder.put(part, true); - } - - @Override - public MetaData.Builder remove(MetaData.Builder builder, String name) { - return builder.remove(name); - } - - @Override - public IndexMetaData randomCreate(String name) { - IndexMetaData.Builder builder = IndexMetaData.builder(name); - ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); - setRandomSettings(getRandom(), settingsBuilder); - settingsBuilder.put(randomSettings(ImmutableSettings.EMPTY)).put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion(random())); - builder.settings(settingsBuilder); - builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); - int aliasCount = randomInt(10); - if (randomBoolean()) { - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - } - for (int i = 0; i < aliasCount; i++) { - builder.putAlias(randomAlias()); - } - return builder.build(); - } - - @Override - public IndexMetaData randomChange(IndexMetaData part) { - IndexMetaData.Builder builder = IndexMetaData.builder(part); - switch (randomIntBetween(0, 3)) { - case 0: - builder.settings(ImmutableSettings.builder().put(part.settings()).put(randomSettings(ImmutableSettings.EMPTY))); - break; - case 1: - if (randomBoolean() && part.aliases().isEmpty() == false) { - builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class))); - } else { - builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10))); - } - break; - case 2: - builder.settings(ImmutableSettings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); - break; - case 3: - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - break; - default: - throw new IllegalArgumentException("Shouldn't be here"); - } - return builder.build(); - } - }); - } - - private IndexWarmersMetaData randomWarmers() { - if (randomBoolean()) { - return new IndexWarmersMetaData( - new IndexWarmersMetaData.Entry( - randomName("warm"), - new String[]{randomName("type")}, - randomBoolean(), - new BytesArray(randomAsciiOfLength(1000))) - ); - } else { - return new IndexWarmersMetaData(); - } - } - - private MetaData randomTemplates(MetaData metaData) { - return randomParts(metaData, "template", new RandomPart() { - @Override - public ImmutableOpenMap parts(MetaData metaData) { - return metaData.templates(); - } - - @Override - public MetaData.Builder put(MetaData.Builder builder, IndexTemplateMetaData part) { - return builder.put(part); - } - - @Override - public MetaData.Builder remove(MetaData.Builder builder, String name) { - return builder.removeTemplate(name); - } - - @Override - public IndexTemplateMetaData randomCreate(String name) { - IndexTemplateMetaData.Builder builder = IndexTemplateMetaData.builder(name); - builder.order(randomInt(1000)) - .template(randomName("temp")) - .settings(randomSettings(ImmutableSettings.EMPTY)); - int aliasCount = randomIntBetween(0, 10); - for (int i = 0; i < aliasCount; i++) { - builder.putAlias(randomAlias()); - } - if (randomBoolean()) { - builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); - } - return builder.build(); - } - - @Override - public IndexTemplateMetaData randomChange(IndexTemplateMetaData part) { - IndexTemplateMetaData.Builder builder = new IndexTemplateMetaData.Builder(part); - builder.order(randomInt(1000)); - return builder.build(); - } - }); - } - - private AliasMetaData randomAlias() { - AliasMetaData.Builder builder = newAliasMetaDataBuilder(randomName("alias")); - if (randomBoolean()) { - builder.filter(FilterBuilders.termFilter("test", randomRealisticUnicodeOfCodepointLength(10)).toString()); - } - if (randomBoolean()) { - builder.routing(randomAsciiOfLength(10)); - } - return builder.build(); - } - - private MetaData randomMetaDataCustoms(final MetaData metaData) { - return randomParts(metaData, "custom", new RandomPart() { - - @Override - public ImmutableOpenMap parts(MetaData metaData) { - return metaData.customs(); - } - - @Override - public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) { - if (part instanceof SnapshotMetaData) { - return builder.putCustom(SnapshotMetaData.TYPE, part); - } else if (part instanceof RepositoriesMetaData) { - return builder.putCustom(RepositoriesMetaData.TYPE, part); - } else if (part instanceof RestoreMetaData) { - return builder.putCustom(RestoreMetaData.TYPE, part); - } - throw new IllegalArgumentException("Unknown custom part " + part); - } - - @Override - public MetaData.Builder remove(MetaData.Builder builder, String name) { - return builder.removeCustom(name); - } - - @Override - public MetaData.Custom randomCreate(String name) { - switch (randomIntBetween(0, 2)) { - case 0: - return new SnapshotMetaData(new SnapshotMetaData.Entry( - new SnapshotId(randomName("repo"), randomName("snap")), - randomBoolean(), - SnapshotMetaData.State.fromValue((byte) randomIntBetween(0, 6)), - ImmutableList.of(), - Math.abs(randomLong()), - ImmutableMap.of())); - case 1: - return new RepositoriesMetaData(); - case 2: - return new RestoreMetaData(new RestoreMetaData.Entry( - new SnapshotId(randomName("repo"), randomName("snap")), - RestoreMetaData.State.fromValue((byte) randomIntBetween(0, 3)), - ImmutableList.of(), - ImmutableMap.of())); - default: - throw new IllegalArgumentException("Shouldn't be here"); - } - } - - @Override - public MetaData.Custom randomChange(MetaData.Custom part) { - return part; - } - }); - } - - private String randomName(String prefix) { - return prefix + Strings.randomBase64UUID(getRandom()); - } -} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 83a27850591..cbbff463f20 100644 --- a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -81,7 +81,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase { RoutingTable source = strategy.reroute(clusterState).routingTable(); BytesStreamOutput outStream = new BytesStreamOutput(); - source.writeTo(outStream); + RoutingTable.Builder.writeTo(source, outStream); BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes()); RoutingTable target = RoutingTable.Builder.readFrom(inStream); diff --git a/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java deleted file mode 100644 index d87d900a0e8..00000000000 --- a/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.serialization; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.DiffableUtils.KeyedReader; -import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.io.stream.*; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Test; - -import java.io.IOException; -import java.util.Map; - -import static com.google.common.collect.Maps.newHashMap; -import static org.hamcrest.CoreMatchers.equalTo; - -public class DiffableTests extends ElasticsearchTestCase { - - @Test - public void testImmutableMapDiff() throws IOException { - ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put("foo", new TestDiffable("1")); - builder.put("bar", new TestDiffable("2")); - builder.put("baz", new TestDiffable("3")); - ImmutableMap before = builder.build(); - Map map = newHashMap(); - map.putAll(before); - map.remove("bar"); - map.put("baz", new TestDiffable("4")); - map.put("new", new TestDiffable("5")); - ImmutableMap after = ImmutableMap.copyOf(map); - Diff diff = DiffableUtils.diff(before, after); - BytesStreamOutput out = new BytesStreamOutput(); - diff.writeTo(out); - BytesStreamInput in = new BytesStreamInput(out.bytes()); - ImmutableMap serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before); - assertThat(serialized.size(), equalTo(3)); - assertThat(serialized.get("foo").value(), equalTo("1")); - assertThat(serialized.get("baz").value(), equalTo("4")); - assertThat(serialized.get("new").value(), equalTo("5")); - } - - @Test - public void testImmutableOpenMapDiff() throws IOException { - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); - builder.put("foo", new TestDiffable("1")); - builder.put("bar", new TestDiffable("2")); - builder.put("baz", new TestDiffable("3")); - ImmutableOpenMap before = builder.build(); - builder = ImmutableOpenMap.builder(before); - builder.remove("bar"); - builder.put("baz", new TestDiffable("4")); - builder.put("new", new TestDiffable("5")); - ImmutableOpenMap after = builder.build(); - Diff diff = DiffableUtils.diff(before, after); - BytesStreamOutput out = new BytesStreamOutput(); - diff.writeTo(out); - BytesStreamInput in = new BytesStreamInput(out.bytes()); - ImmutableOpenMap serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { - @Override - public TestDiffable readFrom(StreamInput in, String key) throws IOException { - return new TestDiffable(in.readString()); - } - - @Override - public Diff readDiffFrom(StreamInput in, String key) throws IOException { - return AbstractDiffable.readDiffFrom(new StreamableReader() { - @Override - public TestDiffable readFrom(StreamInput in) throws IOException { - return new TestDiffable(in.readString()); - } - }, in); - } - }).apply(before); - assertThat(serialized.size(), equalTo(3)); - assertThat(serialized.get("foo").value(), equalTo("1")); - assertThat(serialized.get("baz").value(), equalTo("4")); - assertThat(serialized.get("new").value(), equalTo("5")); - - } - public static class TestDiffable extends AbstractDiffable { - - public static final TestDiffable PROTO = new TestDiffable(""); - - private final String value; - - public TestDiffable(String value) { - this.value = value; - } - - public String value() { - return value; - } - - @Override - public TestDiffable readFrom(StreamInput in) throws IOException { - return new TestDiffable(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - } - } - -} diff --git a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java b/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java deleted file mode 100644 index 9ebffe58783..00000000000 --- a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.xcontent; - -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; - -public final class XContentTestUtils { - private XContentTestUtils() { - - } - - public static Map convertToMap(ToXContent part) throws IOException { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - part.toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return XContentHelper.convertToMap(builder.bytes(), false).v2(); - } - - - /** - * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored - */ - public static boolean mapsEqualIgnoringArrayOrder(Map first, Map second) { - if (first.size() != second.size()) { - return false; - } - - for (String key : first.keySet()) { - if (objectsEqualIgnoringArrayOrder(first.get(key), second.get(key)) == false) { - return false; - } - } - return true; - } - - @SuppressWarnings("unchecked") - private static boolean objectsEqualIgnoringArrayOrder(Object first, Object second) { - if (first == null ) { - return second == null; - } else if (first instanceof List) { - if (second instanceof List) { - List secondList = Lists.newArrayList((List) second); - List firstList = (List) first; - if (firstList.size() == secondList.size()) { - for (Object firstObj : firstList) { - boolean found = false; - for (Object secondObj : secondList) { - if (objectsEqualIgnoringArrayOrder(firstObj, secondObj)) { - secondList.remove(secondObj); - found = true; - break; - } - } - if (found == false) { - return false; - } - } - return secondList.isEmpty(); - } else { - return false; - } - } else { - return false; - } - } else if (first instanceof Map) { - if (second instanceof Map) { - return mapsEqualIgnoringArrayOrder((Map) first, (Map) second); - } else { - return false; - } - } else { - return first.equals(second); - } - } - -} diff --git a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java index 0850cd5e095..430690ae146 100644 --- a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index 228faa8cf4d..58e177b1115 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -32,6 +32,9 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -193,7 +196,12 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); - BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.version()); + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(node.version()); + ClusterState.Builder.writeTo(builder.build(), stream); + stream.close(); + BytesReference bytes = bStream.bytes(); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index c97fa5b789d..c5adf8cb50e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -443,11 +443,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - expected.writeTo(out); + MappingMetaData.writeTo(expected, out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -460,11 +460,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - expected.writeTo(out); + MappingMetaData.writeTo(expected, out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -477,11 +477,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - expected.writeTo(out); + MappingMetaData.writeTo(expected, out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index 3cc8a0cfe20..e1efe59776d 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.junit.Test; -import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; @@ -62,12 +61,6 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "").build(); } - @Override - protected void ensureClusterStateConsistency() throws IOException { - // testShardActiveElseWhere might change the state of a non-master node - // so we cannot check state consistency of this cluster - } - @Test public void indexCleanup() throws Exception { final String masterNode = internalCluster().startNode(ImmutableSettings.builder().put("node.data", false)); diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 230c6cae3ce..1c3f8f8c9ca 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -285,7 +285,6 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { } @Test - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/8802") public void testBrokenMapping() throws Exception { // clean all templates setup by the framework. client().admin().indices().prepareDeleteTemplate("*").get(); diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java index 8d569275aea..ff8264fdc03 100644 --- a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java @@ -38,9 +38,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; @@ -750,7 +748,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests )); } - public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { + public static abstract class TestCustomMetaData implements MetaData.Custom { private final String data; protected TestCustomMetaData(String data) { @@ -778,182 +776,194 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests return data.hashCode(); } - protected abstract TestCustomMetaData newTestCustomMetaData(String data); + public static abstract class TestCustomMetaDataFactory extends MetaData.Custom.Factory { - @Override - public Custom readFrom(StreamInput in) throws IOException { - return newTestCustomMetaData(in.readString()); - } + protected abstract TestCustomMetaData newTestCustomMetaData(String data); - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(getData()); - } + @Override + public T readFrom(StreamInput in) throws IOException { + return (T) newTestCustomMetaData(in.readString()); + } - @Override - public Custom fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); + @Override + public void writeTo(T metadata, StreamOutput out) throws IOException { + out.writeString(metadata.getData()); + } + + @Override + public T fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String data = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("data".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); + } + data = parser.text(); + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); } - data = parser.text(); } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); + throw new ElasticsearchParseException("failed to parse snapshottable metadata"); } - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); } + if (data == null) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); + } + return (T) newTestCustomMetaData(data); } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return newTestCustomMetaData(data); - } - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", getData()); - return builder; + @Override + public void toXContent(T metadata, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.field("data", metadata.getData()); + } } } - static { - MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); - MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO); - MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO); - MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO); - MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO); + MetaData.registerFactory(SnapshottableMetadata.TYPE, SnapshottableMetadata.FACTORY); + MetaData.registerFactory(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.FACTORY); + MetaData.registerFactory(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.FACTORY); + MetaData.registerFactory(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.FACTORY); + MetaData.registerFactory(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.FACTORY); } public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; - public static final SnapshottableMetadata PROTO = new SnapshottableMetadata(""); + public static final Factory FACTORY = new Factory(); public SnapshottableMetadata(String data) { super(data); } - @Override - public String type() { - return TYPE; - } + private static class Factory extends TestCustomMetaDataFactory { - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableMetadata(data); - } + @Override + public String type() { + return TYPE; + } - @Override - public EnumSet context() { - return MetaData.API_AND_SNAPSHOT; + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableMetadata(data); + } + + @Override + public EnumSet context() { + return MetaData.API_AND_SNAPSHOT; + } } } public static class NonSnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable"; - public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata(""); + public static final Factory FACTORY = new Factory(); public NonSnapshottableMetadata(String data) { super(data); } - @Override - public String type() { - return TYPE; - } + private static class Factory extends TestCustomMetaDataFactory { - @Override - protected NonSnapshottableMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableMetadata(data); - } + @Override + public String type() { + return TYPE; + } - @Override - public EnumSet context() { - return MetaData.API_ONLY; + @Override + protected NonSnapshottableMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableMetadata(data); + } } } public static class SnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway"; - public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata(""); + public static final Factory FACTORY = new Factory(); public SnapshottableGatewayMetadata(String data) { super(data); } - @Override - public String type() { - return TYPE; - } + private static class Factory extends TestCustomMetaDataFactory { - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableGatewayMetadata(data); - } + @Override + public String type() { + return TYPE; + } - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableGatewayMetadata(data); + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); + } } } public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable_gateway"; - public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata(""); + public static final Factory FACTORY = new Factory(); public NonSnapshottableGatewayMetadata(String data) { super(data); } - @Override - public String type() { - return TYPE; - } + private static class Factory extends TestCustomMetaDataFactory { - @Override - protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableGatewayMetadata(data); - } + @Override + public String type() { + return TYPE; + } - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } + @Override + protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableGatewayMetadata(data); + } + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + + } } public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway_no_api"; - public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata(""); + public static final Factory FACTORY = new Factory(); public SnapshotableGatewayNoApiMetadata(String data) { super(data); } - @Override - public String type() { - return TYPE; - } + private static class Factory extends TestCustomMetaDataFactory { - @Override - protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { - return new SnapshotableGatewayNoApiMetadata(data); - } + @Override + public String type() { + return TYPE; + } + + @Override + protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { + return new SnapshotableGatewayNoApiMetadata(data); + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); + } - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); } } diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index be318020fc0..1c0c11bb5cd 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -166,8 +166,6 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; -import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -359,7 +357,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas */ - public void randomIndexTemplate() throws IOException { + private void randomIndexTemplate() throws IOException { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { @@ -652,7 +650,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); - ensureClusterStateConsistency(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); @@ -1091,8 +1088,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) - .get().isAcknowledged()); + settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) + .get().isAcknowledged()); } /** @@ -1139,35 +1136,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } } - /** - * Verifies that all nodes that have the same version of the cluster state as master have same cluster state - */ - protected void ensureClusterStateConsistency() throws IOException { - if (cluster() != null) { - ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); - Map masterStateMap = convertToMap(masterClusterState); - int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; - for (Client client : cluster()) { - ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); - if (masterClusterState.version() == localClusterState.version()) { - try { - assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; - assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); - - // Compare JSON serialization - assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, convertToMap(localClusterState)), equalTo(true)); - } catch (AssertionError error) { - logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); - throw error; - } - } - } - } - } - /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 8c7ae1955f3..55b4b15af01 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -71,7 +71,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed; @@ -618,17 +617,4 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { return threadGroup.getName(); } } - - /** - * Returns size random values - */ - public static List randomSubsetOf(int size, T... values) { - if (size > values.length) { - throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); - } - List list = newArrayList(values); - Collections.shuffle(list); - return list.subList(0, size); - } - } From 8d30c9a392765f49b53a60e085f3c5de0de868ec Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 27 Apr 2015 09:27:17 +0200 Subject: [PATCH 142/236] Tests: Mark the entire RiverTests suite as AwaitsFix. --- src/test/java/org/elasticsearch/river/RiverTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java index 9cf698d661d..6587ca84c72 100644 --- a/src/test/java/org/elasticsearch/river/RiverTests.java +++ b/src/test/java/org/elasticsearch/river/RiverTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.river; import com.google.common.base.Predicate; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequestBuilder; @@ -29,15 +31,16 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.river.dummy.DummyRiverModule; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE) +@AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates") public class RiverTests extends ElasticsearchIntegrationTest { @Test @@ -46,7 +49,6 @@ public class RiverTests extends ElasticsearchIntegrationTest { } @Test - @AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates") public void testMultipleRiversStart() throws Exception { int nbRivers = between(2,10); logger.info("--> testing with {} rivers...", nbRivers); From d7d39e1938a9f128ae000b797bca9168f97688ec Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 27 Apr 2015 10:19:59 +0200 Subject: [PATCH 143/236] Reduce code duplication in TransportIndexAction/TransportShardBulkAction. We have some duplication in TransportIndexAction/TransportShardBulkAction due to the fact that we have totally different branches for INDEX and CREATE operations. This commit tries to share the logic better between these two cases. --- .../action/bulk/TransportShardBulkAction.java | 105 +++++++----------- .../action/index/TransportIndexAction.java | 100 +++++++---------- .../elasticsearch/index/engine/Engine.java | 28 +++-- .../index/engine/InternalEngine.java | 20 ++-- .../index/engine/ShadowEngine.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 14 ++- .../index/engine/InternalEngineTests.java | 15 +-- 7 files changed, 121 insertions(+), 163 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index d5009544a47..3a96f3aeff3 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -359,72 +359,46 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()) .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl()); - long version; - boolean created; - Engine.IndexingOperation op; + final Engine.IndexingOperation operation; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - Mapping update = index.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); - indexShard.index(index); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); - indexShard.index(index); - } - } else { - indexShard.index(index); - } - version = index.version(); - op = index; - created = index.created(); + operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, + assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType(); + operation = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - Mapping update = create.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); - indexShard.create(create); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); - indexShard.create(create); - } - } else { - indexShard.create(create); - } - version = create.version(); - op = create; - created = true; } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + final boolean created; + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); + created = operation.execute(indexShard); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); + created = operation.execute(indexShard); + } + } else { + created = operation.execute(indexShard); + } + // update the version on request so it will happen on the replicas + final long version = operation.version(); indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); indexRequest.version(version); assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); - IndexResponse indexResponse = new IndexResponse(request.index(), indexRequest.type(), indexRequest.id(), version, created); - return new WriteResult(indexResponse, op); + return new WriteResult(indexResponse, operation); } private WriteResult shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) { @@ -548,23 +522,20 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()) .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl()); + final Engine.IndexingOperation operation; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - Mapping update = index.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); - } - indexShard.index(index); + operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, + assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType(); + operation = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - Mapping update = create.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); - } - indexShard.create(create); } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); + } + operation.execute(indexShard); } catch (Throwable e) { // if its not an ignore replica failure, we need to make sure to bubble up the failure // so we will fail the shard diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 2fd801c025d..8e81009b653 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.index.IndexRequest.OpType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; @@ -172,62 +173,39 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - long version; - boolean created; + final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); - Mapping update = index.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); - indexShard.index(index); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); - indexShard.index(index); - } - } else { - indexShard.index(index); - } - version = index.version(); - created = index.created(); + operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); } else { assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); - Engine.Create create = indexShard.prepareCreate(sourceToParse, + operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); - Mapping update = create.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); - indexShard.create(create); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); - indexShard.create(create); - } - } else { - indexShard.create(create); - } - version = create.version(); - created = true; } + + final boolean created; + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); + created = operation.execute(indexShard); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); + created = operation.execute(indexShard); + } + } else { + created = operation.execute(indexShard); + } + if (request.refresh()) { try { indexShard.refresh("refresh_flag_index"); @@ -237,6 +215,7 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } // update the version on the request, so it will be used for the replicas + final long version = operation.version(); request.version(version); request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); @@ -250,22 +229,19 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi IndexShard indexShard = indexService.shardSafe(shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); + + final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); - Mapping update = index.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); - } - indexShard.index(index); + operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); } else { assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); - Engine.Create create = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); - Mapping update = create.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); - } - indexShard.create(create); + operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); + } + operation.execute(indexShard); if (request.refresh()) { try { indexShard.refresh("refresh_flag_index"); diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index ce79c60b527..516313ddd28 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -200,7 +201,7 @@ public abstract class Engine implements Closeable { public abstract void create(Create create) throws EngineException; - public abstract void index(Index index) throws EngineException; + public abstract boolean index(Index index) throws EngineException; public abstract void delete(Delete delete) throws EngineException; @@ -704,6 +705,12 @@ public abstract class Engine implements Closeable { public long endTime() { return this.endTime; } + + /** + * Execute this operation against the provided {@link IndexShard} and + * return whether the document was created. + */ + public abstract boolean execute(IndexShard shard); } public static final class Create extends IndexingOperation { @@ -732,10 +739,15 @@ public abstract class Engine implements Closeable { public boolean autoGeneratedId() { return this.autoGeneratedId; } + + @Override + public boolean execute(IndexShard shard) { + shard.create(this); + return true; + } } public static final class Index extends IndexingOperation { - private boolean created; public Index(DocumentMapper docMapper, Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, boolean canHaveDuplicates) { super(docMapper, uid, doc, version, versionType, origin, startTime, canHaveDuplicates); @@ -754,15 +766,9 @@ public abstract class Engine implements Closeable { return Type.INDEX; } - /** - * @return true if object was created - */ - public boolean created() { - return created; - } - - public void created(boolean created) { - this.created = created; + @Override + public boolean execute(IndexShard shard) { + return shard.index(this); } } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 7086ee6986b..267a9042ea3 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -364,15 +364,16 @@ public class InternalEngine extends Engine { } @Override - public void index(Index index) throws EngineException { + public boolean index(Index index) throws EngineException { + final boolean created; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations - innerIndex(index); + created = innerIndex(index); } else { try (Releasable r = throttle.acquireThrottle()) { - innerIndex(index); + created = innerIndex(index); } } flushNeeded = true; @@ -381,6 +382,7 @@ public class InternalEngine extends Engine { throw new IndexFailedEngineException(shardId, index, t); } checkVersionMapRefresh(); + return created; } /** @@ -410,7 +412,7 @@ public class InternalEngine extends Engine { } } - private void innerIndex(Index index) throws IOException { + private boolean innerIndex(Index index) throws IOException { synchronized (dirtyLock(index.uid())) { final long currentVersion; VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes()); @@ -428,17 +430,18 @@ public class InternalEngine extends Engine { long expectedVersion = index.version(); if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) { if (index.origin() == Operation.Origin.RECOVERY) { - return; + return false; } else { throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion); } } updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); + final boolean created; index.updateVersion(updatedVersion); if (currentVersion == Versions.NOT_FOUND) { // document does not exists, we can optimize for create - index.created(true); + created = true; if (index.docs().size() > 1) { indexWriter.addDocuments(index.docs()); } else { @@ -446,7 +449,9 @@ public class InternalEngine extends Engine { } } else { if (versionValue != null) { - index.created(versionValue.delete()); // we have a delete which is not GC'ed... + created = versionValue.delete(); // we have a delete which is not GC'ed... + } else { + created = false; } if (index.docs().size() > 1) { indexWriter.updateDocuments(index.uid(), index.docs()); @@ -459,6 +464,7 @@ public class InternalEngine extends Engine { versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); indexingService.postIndexUnderLock(index); + return created; } } diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 6771b432176..511b9ae9955 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -108,7 +108,7 @@ public class ShadowEngine extends Engine { } @Override - public void index(Index index) throws EngineException { + public boolean index(Index index) throws EngineException { throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine"); } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 0ff0c420f02..546d66cbdaa 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -470,7 +470,7 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.Create(docMapper.v1(), docMapper.v1().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime, canHaveDuplicates, autoGeneratedId); } - public ParsedDocument create(Engine.Create create) throws ElasticsearchException { + public void create(Engine.Create create) throws ElasticsearchException { writeAllowed(create.origin()); create = indexingService.preCreate(create); mapperAnalyzer.setType(create.type()); @@ -485,7 +485,6 @@ public class IndexShard extends AbstractIndexShardComponent { throw ex; } indexingService.postCreate(create); - return create.parsedDoc(); } public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) throws ElasticsearchException { @@ -501,22 +500,27 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.Index(docMapper.v1(), docMapper.v1().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime, canHaveDuplicates); } - public ParsedDocument index(Engine.Index index) throws ElasticsearchException { + /** + * Index a document and return whether it was created, as opposed to just + * updated. + */ + public boolean index(Engine.Index index) throws ElasticsearchException { writeAllowed(index.origin()); index = indexingService.preIndex(index); mapperAnalyzer.setType(index.type()); + final boolean created; try { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - engine().index(index); + created = engine().index(index); index.endTime(System.nanoTime()); } catch (Throwable ex) { indexingService.postIndex(index, ex); throw ex; } indexingService.postIndex(index); - return index.parsedDoc(); + return created; } public Engine.Delete prepareDelete(String type, String id, long version, VersionType versionType, Engine.Operation.Origin origin) throws ElasticsearchException { diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 3673b7f889d..2ac8608fa96 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1354,34 +1354,29 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void testBasicCreatedFlag() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertFalse(index.created()); + assertFalse(engine.index(index)); engine.delete(new Engine.Delete(null, "1", newUid("1"))); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); } @Test public void testCreatedFlagAfterFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); engine.delete(new Engine.Delete(null, "1", newUid("1"))); engine.flush(); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); } private static class MockAppender extends AppenderSkeleton { From 1b8b9939124a460e0844e57066b78466539d57c0 Mon Sep 17 00:00:00 2001 From: markharwood Date: Fri, 24 Apr 2015 16:30:12 +0100 Subject: [PATCH 144/236] Query enhancement: Enable Lucene ranking behaviour for queries on numeric fields. This changes the default ranking behaviour of single-term queries on numeric fields to use the usual Lucene TermQuery scoring logic rather than a constant-scoring wrapper. Closes #10628 --- docs/reference/migration/migrate_2_0.asciidoc | 5 +++++ .../index/mapper/core/NumberFieldMapper.java | 4 +--- .../index/query/SimpleIndexQueryParserTests.java | 14 +++++--------- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 91462645bd5..4d9ca005706 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -381,6 +381,11 @@ be used separately to control whether `routing_nodes` should be returned. === Query DSL +Change to ranking behaviour: single-term queries on numeric fields now score in the same way as string fields (use of IDF, norms if enabled). +Previously, term queries on numeric fields were deliberately prevented from using the usual Lucene scoring logic and this behaviour was undocumented and, to some, unexpected. +If the introduction of scoring to numeric fields is undesirable for your query clauses the fix is simple: wrap them in a `constant_score` or use a `filter` expression instead. + + The `fuzzy_like_this` and `fuzzy_like_this_field` queries have been removed. The `limit` filter is deprecated and becomes a no-op. You can achieve similar diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 54dfe165171..280f139d459 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -34,7 +34,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -276,8 +275,7 @@ public abstract class NumberFieldMapper extends AbstractFieldM @Override public final Query termQuery(Object value, @Nullable QueryParseContext context) { - TermQuery scoringQuery = new TermQuery(new Term(names.indexName(), indexedValueForSearch(value))); - return new ConstantScoreQuery(scoringQuery); + return new TermQuery(new Term(names.indexName(), indexedValueForSearch(value))); } @Override diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index b258420a245..3b8a3512aeb 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -464,7 +464,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testTermQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(termQuery("age", 34).buildAsBytes()).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); } @@ -472,15 +472,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testTermQuery() throws IOException { IndexQueryParserService queryParser = queryParser(); String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term.json"); - TermQuery fieldQuery = unwrapTermQuery(queryParser.parse(query).query(), true); + TermQuery fieldQuery = unwrapTermQuery(queryParser.parse(query).query()); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); } - private static TermQuery unwrapTermQuery(Query q, boolean expectConstantWrapper) { - if (expectConstantWrapper) { - assertThat(q, instanceOf(ConstantScoreQuery.class)); - q = ((ConstantScoreQuery) q).getQuery(); - } + private static TermQuery unwrapTermQuery(Query q) { assertThat(q, instanceOf(TermQuery.class)); return (TermQuery) q; } @@ -545,7 +541,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f)).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01)); } @@ -563,7 +559,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-with-boost.json"); Query parsedQuery = queryParser.parse(query).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01)); } From 924479369feb1911085dc2e1b4362286f7129117 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 27 Apr 2015 09:50:04 +0200 Subject: [PATCH 145/236] Release script: Fix wrong argument for string formatting --- dev-tools/build_release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 02620c5ed34..d5038d90364 100644 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -248,7 +248,7 @@ def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None): print('Running Backwards compatibility tests against version [%s]' % (bwc_version)) run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version) run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"') - gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true' % (target, env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE')) + gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE')) if env.get('GPG_KEYRING'): gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING') run_mvn('clean %s -DskipTests %s' % (target, gpg_args)) From 467b4f6e2dab552ddab8ea05f1a740792926ed75 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 27 Apr 2015 12:57:25 +0200 Subject: [PATCH 146/236] Versioning: Added recent 1.4 and 1.5 releases --- src/main/java/org/elasticsearch/Version.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java index d34f1fb2f97..794dbdf49ab 100644 --- a/src/main/java/org/elasticsearch/Version.java +++ b/src/main/java/org/elasticsearch/Version.java @@ -227,13 +227,17 @@ public class Version { public static final int V_1_4_4_ID = 1040499; public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3); public static final int V_1_4_5_ID = 1040599; - public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_4_5 = new Version(V_1_4_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_4_6_ID = 1040699; + public static final Version V_1_4_6 = new Version(V_1_4_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_0_ID = 1050099; public static final Version V_1_5_0 = new Version(V_1_5_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_1_ID = 1050199; public static final Version V_1_5_1 = new Version(V_1_5_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_2_ID = 1050299; - public static final Version V_1_5_2 = new Version(V_1_5_2_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_5_2 = new Version(V_1_5_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_5_3_ID = 1050399; + public static final Version V_1_5_3 = new Version(V_1_5_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_6_0_ID = 1060099; public static final Version V_1_6_0 = new Version(V_1_6_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_2_0_0_ID = 2000099; @@ -255,12 +259,16 @@ public class Version { return V_2_0_0; case V_1_6_0_ID: return V_1_6_0; + case V_1_5_3_ID: + return V_1_5_3; case V_1_5_2_ID: return V_1_5_2; case V_1_5_1_ID: return V_1_5_1; case V_1_5_0_ID: return V_1_5_0; + case V_1_4_6_ID: + return V_1_4_6; case V_1_4_5_ID: return V_1_4_5; case V_1_4_4_ID: From ba4ec6bca573bc3b9a227ba16d9f95e5f5fbd16e Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 27 Apr 2015 11:25:58 +0200 Subject: [PATCH 147/236] Docs: Updated current version --- docs/reference/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 3288cad5855..1e63d18a4d2 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-reference]] = Reference -:version: 1.5.1 +:version: 1.5.2 :branch: 1.5 :jdk: 1.8.0_25 :defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current From b3723cdb280c8d133978675795c1ca3baacf5030 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 27 Apr 2015 10:53:29 +0100 Subject: [PATCH 148/236] Fix to let eclipse ignore the antigun plugin in it's maven integration This plugin should be ignored as it will make the internal eclipse build fail when there are NOCOMMIT comments in files, which are expected during feature development. This change only affects eclipse users and only when they are using the m2e eclipse integration --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 86dcb460825..cdb74c02c0d 100644 --- a/pom.xml +++ b/pom.xml @@ -1626,7 +1626,7 @@ - + From 2f777e473620fe169a170ef6adf15ab78778bb22 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 27 Apr 2015 13:56:02 +0200 Subject: [PATCH 149/236] Release: Add bw indices for 1.4.5 and 1.5.2. --- .../org/elasticsearch/bwcompat/index-1.4.5.zip | Bin 0 -> 80472 bytes .../org/elasticsearch/bwcompat/index-1.5.2.zip | Bin 0 -> 80917 bytes .../org/elasticsearch/bwcompat/repo-1.4.5.zip | Bin 0 -> 76619 bytes .../org/elasticsearch/bwcompat/repo-1.5.2.zip | Bin 0 -> 75603 bytes 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip create mode 100644 src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip create mode 100644 src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip create mode 100644 src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip new file mode 100644 index 0000000000000000000000000000000000000000..da4b85f084e22400b6af9912036d83f9a6956f87 GIT binary patch literal 80472 zcmb@t1z6lql0OVVL(pI|Sa5fT5Zv9}VFnvqf|EdSXK)Gb?jGFTJ%T#~2pZsp-|uek zZr|J8`|s}esrpXWG}Be}>FR2E8f7^+codlDr_3%*@{huQybxfBV9ZTiOjtD3U&6p~ zPzY*>Ke4O(D;QY#T{sw+KVHgz^XIt#`E%3vW=2+a<{)wfR3#XZmlb z|37=e`_HH{a&|Fs0sTYlaGv4}8Rpr!2}_(I!Ka8ag@=I=`WH0*p_hsMk9hqXs2Zp$ z*kelpE{~V1#Uvc8N2TP4TxO&tP%4D79a#>hHAC_=V`;NW_7Bb9$3d1dbiiOJU?i@X z9`ju`N^i9$gXexZ4Z1Yn#k`oGFw6NHPb+LPwpw(Y#f>lLf<`AZe9p9uS7O56ZO#pb zwi+v5?;BV*V5$7PClFp=zldaO9;wEJUHa}7Jzb~~CvKB0(t-|*>gg0)Eq>?i+{=S? z-mK=w3MBfj`7Q{z;!&r4zN9`W$uC`gro$T5hfBY_F}2>a)~BLjRp46_k4G%!$4Af! zR8EWKQrDt#wfBva_nv9(NbAw@!y0m5HQx0J$Hq@Bt;kWH6fb)WNT#Qoh4FxViZOZT zDj%d=g@FGieRp8-xQT-=BIKR^i#gF6Jkvos>}mMi8V>uwc*x-Iw~9l>+WJ*6@&T6K zw)%BJ`(9w*LJ!20!a~G`BnJ_SCPkf3@P3ig?*z&nzq^rg|G1Vnl=0W2m6t3ulGf%t zqS7Ns_=#_PJZQac;;)BBH8h{@RASK=|4P}(K0-?>JgZSJHgHaVO;G}XQ6FNseE@|c z#KO6>FW%*{6APDi4rm>abCdc=wqEK*%N&Ju&YkZ{?*(_&Sge=wAVhjvTxhT1v z8|SY8A4t#1%tDNG@FT_|%o3x4K`=pXsN+<7(l+5cO zE0kfsTwOI**3%5^KtQNMKG%K#uu!1SGDnW4*1466)yRElvon71RtNjuRzK6H$7}6a z+U)ZBpulLaQ*x@!t7Zk}|aZj=;mg-&<2YJr8(g zP0=4u+}5P1#a7PB3+j9w@yzE5GZASNL=moO(zY><10vXn9^NAb!mcNu9G#p9A^lgj z|LdH@@y`s~e>T7J|8vUzpYsyzXbCpuQxym2RslE7Qv{(T|T#sHs+| zvQERk#K8e=Q{!p)!^?)rsx@1~{ulnWe>IZ{{B!2HIGNZvL+ma7Ox7ZQd!qX@S=T*j zofeKvRD1%&;E7fLMXmp-o{5cCnva&dyHcUJ^$6`8nCZ{9=g&SxuBVS>dG%byfx%A(nSvcsq*VyBT0Z34pt_wD|!k zL5TIjorCFdnWI%5GP{g0|AptXXHO3O<>CA1Jo{tD{S(Y*2mZ!_>A%2iQm3KCe^UA! zWfK32N@tLTEy&Krnb`tl_ZR&B7xj0KkB>%($cwLXF)ZbMVL=?c2JR&Ye*p1cR>9yg zy#zg3wDrW|{|>8+xcL5_QGa%7dg|x2R4vufr%oM(K^&b&MU8M4LEXO2<5bL?-kfby z#}*5CREAMKe}l#<467`~Kg6#pGXo1Bu}UQo>e0t{v-dNFt}TeVA2F!E0F0QJh}e(B zN!{^*?#`~3lIXv=_&j!+671 z+3tV9XF(5opA0C5z+0#WgwbWlI1<3%!0ZMS3fU5Y^jHDc~4}KEWT}7>(Feb&+nz0ym@%0St3dg zAuu{m>yv4dGcEXm*pHc92KXszUAc7+RxX_idu;Am3xdl~(3OTiQC4>`&y9w+ych8-|4jsJs`V@2CPisNpenTZzM$ggL+xOZiHKiMK@bnFeuzw9~v4k-Q_NI?2#bd9} z?na_Hl*-e}IE<@)v2z+GDRq6iu5J`X67KSP{r7avrn)4ec`i_ULyrEHYD$s)8!2xc zYZXVZF9`=6Ma_~Es}J+dkVz`I<8Z;%#VZXU#_QOKkH) z|0X`F^691&9mQV-dzN&04EGbRQcwIZ<7(&jk8$1q4_x<>jN)Pq z!}xqo}Bl*&kqEY<9d9)`^HvUdE&x^Ak2ZdsT!AC$~JH^H*M|FU=k^K6xPWlmh?F zUi@RkxBt_N?{S$#e>(CEJ1a`nhv&HHTs&?RH~bU#KTju~^8?DD#`gy#7?@|4{#S7a zIkWOTV{QuhyYVcv0KrGq7KPZ`+e%tNTtH4wh`m7m&rW+1p3wWtbCvL6@vt+n@4d?Y zuhEIFzM37=61Kj(;xe&b5!`ne-S0qfLkC6V5O;+Z)=wNwJ~}KkwUWn9vXA>nOGrG? z`{g+YE@#A196cGY942p0d}b_-s{>mQTI@h2eAvk+Ch_8OIxdrdx+_vj(tSDw~O9SAgByFkZ|d@{F40 z`7m_`8#-J_OLKl+Uo&V`-Uf#$D-!_U^6}}S=S&V>dOHp9MpmsFuoR+L9ic+Jt3muz z{4D@YN>^XUk+;znUB{qhQNcka9kEZQLG=5DC`2B}KYKgqCz=Al6`wKz@D5zpipIOJ zR3F~QqM53XV0NZ_0_7Y7+pS#}$oVcl8ZyyePTKh{pPk3$h=5pK99G8YEPjMa66S2IOSY5_j#gI6rKWV){<9bz@9t z=AB~ziP~9c@s3oU>=^C+@hc_G2UHLz^5e7kD_L`|^IvrNdqM=k zUThB#FzV#&7uUijj+J&UH8Kr7Qi1|T(*MXF%DWmRe`FTdSXnz)aY6uYkwH)4r_t4)&$7wIp` zlOjQOz;HF6&+z2gimVKEXnP1sSBh}`3_}}brIhZD7dv^2*18X?BI(l$iCJb&9yB`- z&>YDJR(~Gp&5kd>ZBzA#IzvlAl7blDBC%c`h%-z9kr+&}eMRteFu7z%Up`tb%*K#U zv>^-$o8(yv@1Z5nGh4PH8GPsQ=PX6j%Mv%sqBO(vZ}1DQkj74w_YK_g#A{YuvT7(# ztHK`vYP-;X4J6E?zg;#?vp9GEv=>3H%xEE*Z!wJDheQzB+(otmnxFV_OeiC9N}xyL zslmj3-{s$i5Z&Bq&#bVoBHov?;`17L^Y_iSq*lK6!5CwWyV(}f&*f`Jt`^6c3ZeC5 zU;Q>#Y%-^`68O{Mln)~T?{MY5rZlF!UG}HSx#1WksUb;r71H`d9bX#WNIp%ue4-@J zbtSm%I#We|G8BU_68_VGb9!_h9&|yDE#BK8f<=>hvLg2um;b1*PNfZsx6x2sI3a?{ zyifFYp$!N}3MW&<9kIV2HCJ(+h!{6G`T>Nvb8b|ZzZt{uYP?+b(+`ObO3xF90GVc$ zQxieLQFXj@6(=l8J@oS7$4DoKpVKq0zfsv2cCGDYVqP`&HL!7L@HpCU$@}ptYHh8`8nm7}M1QWQ%@6hr!3=U6TNR|DGi5 zpl-$1L~ZWjX87Y7{?}G#Wya+FiyXI?ByuctLDMRNk1u2t9`kikr_+Rjx`~n2f7wG( z)ZeDKyfYDCzcUZ|YBEL#s>RIS8zmy0BfKQ4o#kQ9q&kZvC7!H%NH|}q3Qv=~uHbOy z*Lxkf#1T8L1gSxEup{6wtf_Hq*5s>EM(@5z@?m6EUkng;?mhbLgn%=o5Rs){>;!N- zydJ9gVJyxi+GB@2$GmGrB+IjeqiYoTF=%%N+jZ1sjOsw7hzHs$%*(>njdQWOEIcH1 zK-P3b%I@bj)6Yr;y^RS;+L+UZ#hj!t*3O<4#@Zpc!*Equ+rUyHO}Z{jG?+!Su4zyK z3w?oG6mY;jCXk(|wN1zQr0Voy+Eb_h-V=v-W-Po;EePoG$v9Y#raz2ume3hTdA!10 z(JjTE#0u*nczo&2`d+LD0%G#=L#~AOAgb5yFzkI5Z~l+X@Ntq0GJJ&JABtYYFA8s1 zs4(jv&0tZc-4+o$u@`msIF+{%;uC&ZWaCrOe7V_grmu&jfO&4-NeU}T)++_k$(N%z zSFL^94oCg{Q>umaFuP9AN67}o#6|aPOViuL2x-?gdT39rOMrAGkgi5SNx=l@akoNc zE$U#Ai{Q-!{2Y8HwN3~0Phi6j-37FGum}MsQk>J#Oqzw-CT=KQwBMTrLwIxgs_P}9 zaZxeCMdSz}zRa*x#>N4uZE7WydcXo$=f3V%rIgE@#iY?{l@LPFbMGVoOP#6j7xiNf z0^5guc@R9c-SCLmUxTt*GV+-KmvRif?uiV2XgPM{vKX(70dy4CSCgCjgK zY$q*r_djRzKJnh5|G7UA{7IME{xsize(`Tyf4EpV{KNI>_U6XPA@Q5noBITB)(JKV ziB9%*)(N_v_9~_r@nE-C=&;p50Kz9f-BVlc4R@RFw$+e8D|R6`WLEa95>xIYvudw3z)8^usTpl}=>o9HO=~gl z5wnt&Ua&^mD8egWnFzPm&;2cZFXtupO#QjOWJyGM5b7{w2yz3VgY24cZQyf6DgSPZ zOzDb*2KQxjeN2mu1Wj;)nXM8Nh$0xJxN8ZEIF7hY&6^*RGh|Y?_9($5u84ME?*7vG z2uAzmCAeO7B7it5K%4ifq&hv`%rwO#RZawE%zHjjV-!_ZUq50P6?GLAGKgupVr0La zAQ_=Wm7??Iep>@mY;I)CcRu~5^d+{9o)G@*&$9~0t?-m9_6bn)&zf!t z7`*9T6FfV<=i}*D=~wsT0&VU)r!+eC9d|t)?pH`}+8#HqzTSPmzw-UPfvzh z3ifAITk2iUH=paD_JWFdkGnGy9oN6?eSdkZO!(gS4P>@I{tgKtlAvYj#RH~+}XoZ5W@!|5ApV#4jqchsW}8(DAA z$4MHnmBlGsZxF@Ns$-Vr&PC6cr7jPl&1---TQP0)pV7uvt|*ILF65;^^oDX_ft25v zXl>`4#@~++fWJ)9v(Ph@>9yu)m>lbsJLigTr0*WZwbXxuK-F^VSPQ{cwQU4jT=k?xhvkM&jm_bf_iykR|2a3au+88yCp1z}o=oUy`26881cn z;)Y*rd*nbCk%?+bh!A|j^TlAAw-DrOcP*LYIGl;Lk#?nBUhhmDiar-UR z4REX6Gz_XMFlZht-EU4AF2PPsk%Bpt9AO^%EJMk4613>!*nmUqu@OVkiQ(q*lI&E^ zGD>U&9rN6J*)+KdL2e>m_%xuNlT6lZcKY?ROf6v*p{q+TTY5Gk{#;^yu8F6ORFPv4 zw~}|1lQDUhk2gj&6&P2r9e0K|8&_Ggbvu$(oX45HDScaOnZG%$Ure)LV!M>76idlV zR+t?*8o6g5p~fXa$M;U?ApKose;U(PNvTnzro?%-ce5ym>KNfp+fG(eLc--)s}R=|w$js2 z_W4WP9DxE}vOP;OI_7wGmiXC*n0OwVk~mkZgiQ`Qs7pU*LyV(0r%H`p5F#HwVr z;B$HyyGOe;3EFyb46hL3!iCG%#@AzKU+r`nH9bsf`9Xmm^~1WG^U~ZC;a?$8y^W0N z)uOyA$L;sSDCghoW?1hc@eoqBm?sv3>3ewfQo`qr_hX=H7*6q<3RE z{Y!ml1fce*{ld8QcsvYp4N6ruV6kaz*pM`pI6XCm2Klp`C*5bBCoAzRQ$Nd2&-MA@ zB`AfWd8T0uoF1ACVd4@Ix+;wHrn6z6ib7NvWS`|&99%YQiQ;E@_>X$#=lb|(nO7y*R85dtXBPe+e2aBJUD-hJpe_ChH@z;^5`s? zt3ZFrC$dwKg@={V)Wi?clfMcnBd&oAhLME_mSH0dzwzihRC2j)KczXn1 z)*Yv)h#_sLBQ*=EAwe6vrN#8uynrDo7tTHN>7`9MC^NNz0=+^@iVM%4$wOWH zh4fzZG(kRttXRAut*tVD7N^jj*YsJu)1J%pW?qMk4YYz9O~Eszj&09)x-C8)r(?D~ zzD>Oqr)9P&zEQmar+&6pl?=p)%h<=@%0R_1M;}PfMZZOtuyhu!bp7aFr$xWwE3X7K zq=qVZrd_b@*-TI5bqv8kKT`V*kV3ntq4ND{@~l7;_G$C@Axmf|b%$xlkTTSSx<{b` zpwHC=c3Ar!)TI#MH^LOKg7_Zw3wx7|!^HP5U2p-xh|v-I6#kyxon`?vh)QUmfdV|c z_H3P|S$(A8U3fwkA${)Q)p#3>p1Kz`yS7sUIh7JZ;YO^prpbK{;aGSEj4itAx-PZ5 zc2hkic;we0(25cJv}t^wWjGt2(XvbRp0%3Q%-D8fU+`)}liAONK7(*CyelRWZGk#> z^Ph|B4c*w`g?JN;pY__R+^v2N=VT}trTJbS6!?#;A`s$uE?qS2nazAH2vCqpQ{>LD zZCSafS}K~HDd^~d3I2#4K!o@Q3E>{eE*Z-%!k#@?mfabrM8mLY2H=I4vsp<`-k7TgoIhKp@=9-x`=Vm#C9 zqM9SOz)N&uB5Hn@_q#tPXR?#T?1Kf-eTdy?FPs-QsuoN#dwhf8(2X7JY>(Fpe_V;LHFWN<{ruKGua^-ZgofbSr*UTY%U~!v1~CbrfSddJsrvqEez=@g(mhH7vk~8_YtxRy zw;vySID&W3S%?hzS5h^U=jixE z%>4RJO@|JBKU%sqf^D2Yi|GwZ=Cw=Ua87tSkF=v{A{@8H^eR3pj&I*C0PN;$0Q1Z9tq zC-pJQ=_j@@I`frPt%3qaWUV6OC}jdh$d*o=dbf%MMsC-gR<}Nl$Rw#T7gB|j+`mI~ zzc}TkZ`31c7x>&H;Oen`wz?HKB4?GueAKp25gbIMrhQN|4r(7kOp0Vq)8}r5I4eftgkV_QaZII-} zd}Tn=B;ew)d8W<2UA<%$e#Jlhqh@vA9df>S>^N#xKR^&&PjVnI?*%z+%Bh+%3m#C6 zHXvzzcj^r0fv#Ar%c;)5vPDXwqdin0YG#$$%a6`m;7lm?LYzRkXg+DPINKJi~yb| zaBp@L9v@a^bX$^pli zAMsX|xJk=IeJAUaO}~bnU&l;S)}Q_#`av3_a@3`F=P~Z^RE+S!I$~l*ST$1qXKX z?VXa~=e_gPKXc#wsg1`ST}U#qiK6Asj5bpURk9e0BI+Q@otPGpU~t$rKYr z^5(vQTrREG&RL{QTv}5E&pGX!)XrEWP1t3=6ZG=Ayg9tm*{Ie0j5~i*cD3}qcFN-O zgm)%_;H!MqJUsKL-x_o@Uw6VLG%{@jcf7~%^iP|2o%ibFd-wwn1cQB|u8Dr}Z9D5% zXlFtN3%wQP1aZc%(-Ww%CRwulIwwDvz{0&iL_|eNu9$q6B_%xZ!4S3`?iPOaw0X~Y zWT#;^_4m~FdBTK2rkCKA56QK_g~#Uk;!dcM&*mvQ2kz%vfpd@JbJ1nv&X^D5uttb3 zFLw!8b&dmHv>>!ycx=ruXZZ*Re~5z3c%dmFAL|sNja@BhD%#r{B7+45`Ss3*SgE+i z_Nea#U~!NmN}xsoUbAtUYWJRoU|~^gX=wm(+4xQ6dly3-u{J2UCF`PhF{f0(^e$n$ zL_a~|Ng)+j30Q07Em9XDyTJA+CqVWjL(Y5g%#bfw_vE)ys38DAHMeO7rKE{U?|KLo zmK;T*1Y@)|RyDh+Snp_vic>8P+gnq`-qjEftaA!HamM(G7zwgmT)be$mpTMBjCt>+ zGh@Dx-HYA^qav4v5(!p7UYX$_2BD$=iP$O4Ev4^b(8-9!_yb+gfebb#(n&EDWC>(z zqAkG}$a}zP26K>s(7|NqGS?lv zhyjvuh=Bt~(SdkpZ-as(kbyvUSw)jU0N-Uy<%?s))mPW=uHZ=E1yJ2_PIvs zn-}wlWv{Mj=8+g*X_Hiu8-qGeKVZR9z)2!RpkGHvyaW)kG8=n$e)mIn?Uo+zGNlNZN<_q%lG)CV)z0XRkY)mV|VHFEXggm~4d^+z*TmzdJr%I>WY1}rU#FJVoD zjx6iGy#XOMSPNT-qCX^i$3pb58Yzy&Az_O|)l{Z7z1+?1G0->F6s8uv6(MU_UgVdO zhvAEF9EI%!anL1+!gz@sDc3D~*_w$}0C}w1GSc4VmN~h&82Hwry*v(h;nxjgud%+B z>Cmt1agIoMn}Gv$$=CJR!X>=n3L>E7Up8vwd#`i~#APJrA5GFvG#eDM*~CqXx>*DH zP>8q2m9lHg?okYITIiAKaa)FrUZA|g@faoy7_d@5WSZ2y0 z4k{XEP541iq5gF4s0u7+mjsoC^@t|i(7#geFOEp6(Ml;|mjTrblh>yL;uyzpn?{vW zirM8rVZ-VPR`fF(+a;rFDdp>IQc((Kb^Ux6vz5%t8c`KbnwsMB45=Q8?)Y+R#oD{ZNhS3tz=o{7TN>^jarvPOt z;ba72TtQZ-3xI4k)eX5wW)`ue9W1;lzrA-7c$Bl0tWQSquqbD@*b*2iJ71bLET z0ZosbGxe>MVdn&Vr>|VgFPC4{@RmDDZj|-&$t3h6M1~V2ZCWB^h`VBS5xU^HV7lPw z&o|{bYHXzTyG9Zc@Nj+IOiU;55{r}~i01liDKOVm?kH80^Aqnq*Rj>=f_v6a`HklO zSCL5sUYwWKmAS}YMg$}836MA&tRM@E+0|-hNp<{@0R$&b(i>I%Sdq8{3|yL4W%Js( z)tY7*k`&ukqYK(u)yihE{icyr1ae%7R*ValquP;<1RGq`mh!X7d2BjnVg2fnR&&zu zu9n>{kw5|tF0j?;d~z z4j}+O06`31450_U2Z5p_5*a0hMVmC4E}1HsDVZXfA(;k12cWVXx9kFRX?O8L ziHm48i($K9*JZV41!omzC1o{cMQ2rJWg5pB*SHf)Ovcgq7yDcJ@A(7$1N|fY75vBi znfx^&xDadz{vl>MCedpmJR&S2+(Vo?aFTJOakO!zai(#sakX)%aj|jAcP~c#GR!iZ zve#vJWmsjnW!Po-5X?C#cs}p6dw)3p0Dm+84W~~2x2|q`hx1Fh=fnlO@^dK#90nFa zgV$YI*qwOSULV_eXs836TH^PlnO@c-tZ>7eLf=pf>t;vi$k z3$F#Qg|JYfKy{6^fWQOKgU|@yi16OLYZ_QEqfpp7ZZs`cSU9fb-|Fw>f9H?n^q$h4 z(w*^|O$lBJVFG>vfd!rgp%VVcW%&Trk4fTG>tbfFl0(=cYn(I#ouAm1f6rz9Ms>%h zW!Gh{cy=#;Mz1hh(I}&fUu<*cdnI3qv;0oe_{$6^f1!)wLHZP5sk7pak)z>`|F}ek z9DkB)#jbwBAff?kizWru=2W6=fF0r*nkV3ba~CwZk`*mt6yS?^;;69GGLDv!#^2~- zcd!~8F0oTF{x0Lh*=XnExLAf1f3)l8U4c1|f^+>HiTYyr$A~2UN|$9|9nY@uO3QKZf*h8=M|9}~I3wbtF%$m&aE_R?>CS|%L)5@)>NvZe+~*jMjVH93 z*GCsVgJ;1wu6tOuXjMI8+So@DE@V;JCm4Q@hr~Fplit{5>^QX^*9R)S4as7DjAOvl zVye*5Z`8J_9ye|8Lky3^OJjWSAH&mQY|w?&F50nmndbCSh7aMXG5U_Fhg;#zFoCs4 z>ykliE2dF>`r#gP8%hOv$Pb@y@Ln%ec|gi` z@KltQ^t*74IXrJPLdrk#J9AZbyJG1;AD(#pen9H_y}dFZEb^quF$S<)P{IWK)>fhr zWvxD!h(1Y`-WLc_AA#6whE^X^1b$-sL*SFK?%vP>Ehhp zA8{}JFXZo{D8gSznxPgmZk7DsD1O}PFO)uR^(XySP&5Q*jHc>v*&qVBIM{8rEH-C{ z#|KA;O0?c~n8X`gO%AvoY|j-ZCFr~@RJ60p@K68E?^=6Ku*p-VH_3QzUqGk{y`bTg z@e}>?BfMZQ{VxRGO9!j)OVjW$$Rh2%=ekqjzwTM_yHik3?x&OAz$tJ4F>ju`8%vc64E$P&EXK6}m zx*59Nm)-I$rEpn`FY(n|H-34*Xf`?WUu{{^(W$R>BHZ5DHpk!G+}zpOnUAaeY}6>y z9@LER>tJW0B15Or@ay|!8_&7IT@BwHt&~th5UJ;!)=Vixr1JF_Ln^)Y{j`0JSDKZn zmhINOo^$vMP>mh+xtsD5=Q-X7FO~&70xevLlg}K^7Iy4It5?lCUv{XWT2MCv=(4r# z47cqqcYXayQiCXh5WPCBHq+BV>@{P8zwh-AxL%Cv9Dij0H(z!uuk#y>=;ty`f5P_p zj7QaHS<#Ae=-6a>?W8YRocLS{4uNs>VgWF;km&NRv2LEfeP-YZWnfgG3Kr7$zf4np{rk5Cs%3d$AM33o8@Xd!ROe7xE zHO^W^7te6378+zJQq?Io8&=lKFjBLLm@ zI#c8pb=o3E@c;J}txws;mQy>_#Vt`LW`CUjC0D>a1mTw_czE=n4=#IVz-n^-m%>|(+BiQ6M zd)X4IPj^*6o4h#pn~&pIwTk?=-w~2+3tE{I7ioQAz-)S-gs*B^F_VWNz!&$UnxvZK znq;h{OV+&vy*Mx6I_JseDY!{lXO^gY!M*Ls5vVKvO&Ha4H(sL9AQ3In`4lYh!cq#Cjh?WpIxuWIlS6(yz!!{C$ZLLwi2DL%L|Bepqn#q zE)r3Qqoa$VeB1LOuOY85Zc{rszD{#mQsykzI4v(D?ou``6E&EMIQAlhGJiKYZzE1| zaPFKtjiTe6Co?FG`g&LFI+IM1P#>8zhzMmjP!RBe3=niP85GEh^1c%g(L;VKh`ve% zo&)+k?w9Q+3c>~c!}fLrxivx~Bpx=WXR~d+s4?F0x3AeeT<}YW_x}FsL9ua! zKw-8~D(Sh+LKJ6EV2=M2{hBYq?Iz%8YrtZ8j)+?DY|KqD3GCGcEm5&*WO19PDI$?~ z`J5i3r*r>GGp&DAg5RT#f2?jq8C$S@-;YGv=o0Lk;0!C-AgCw!v`E93>&y^Y8yuJ(_mik;iygdmavijwaseeF3T5nt{B z;(%V!i=cN_ANT-)<#{|IJ%Sd$f26S>FQR!Fvm6H8JHViK!<~{6`S+Mm$R1VoTuPYp zcV9thP?;7S83)3RZg8}u$(+KafXtJOsDYN?kH{}rSgG6n=KEI;Z&j0DenDwcMB;Jv z`E`Rv!&x3XfKd`mbJpAS_!9?#HPO&ti|E9Y zR_qYpw3QG9XJW?A!Dn8d*!ME2bD{pWO&3_>(ES%Ume&tfFyH<{C zK~O619a;NQHSk7Q2yv~Mg+M{5+SH4(8SNXX-kw##b{5y`3TrRR+Q`f6#i%-YVj60n zxEx!e{PJ7z#Rjb6hTGu<2V2MEmmH2xJdUb7u5e42VWf4hItEzMA1n>$X~Zcc{ZI?| zPIdw_mqMZLVk-TC2B#~hTr~;Pl7!_Pxa1I?$wXxdr7ul2LlWVjP3*%BGC2kXT%)>Q z^(WtJK?*d^X00?KPlI;!3vG3P)|xpzO)g4w{2TZO##m)1qBAo`coqGWop^yi)c>!9+rdi+5VX-^#iV#tFk*fFlRY z_sCuc>RUozd(z7UXMe8l$yyz1x$OkfZ_$juUX$@@_KFt%OvKiid%<^VTqA)is0}F@YZVoq9Sug04`+urD<^MGNRAELy&r$A%-r`B+P@(= z$vh@usZ0b}O;s2roiyp+&y)PpTDZ$^sR+;UYkJhiFN>C(Gi(wv{MMrH!2}6Ys!pk& z+{Jyf$i?n<@Mc%2|B7}2f5%Hn&nf2n%$M_P6@SOTTr}Q({xq<m9{vWY>S>_CF|_rEIdMO6is(+P|_ z0(Val2c2G(1){{0qKWltNCA*2=$3PRB5hHKU28J3#ed3$a;Zl1?HeGeMZV=`Wju)3 zkwB|rm|fDJ;k2ZY9gtm8d&DDLh>nMjq6cG?-RoBBiN~0HQq2aW986X6qH(s=K*wxaYsffAtKA9u!c&cRPMWa!Z832~xvF6W(f?ON z^0I0=Wvr14Aug;FPp^Vw=L5J!{vma`qI)Z4oH7$Q ziL_4ZsO)q-yp(MWUy|4g3xwaa;%mcxT_3wyTeJK7(#cu$@Uji3g3N|{>vDHKkhmGk zh`KJR!@P9(1}MTn;E%WM8pp)PYMJoe$x_1YQ7GjEy`1=PUo1aeAgd|K{ z$lWe8<9n&9@pzFJ@N`S&gRp~#FXJs20x;(;w+H5Y2@u8{R^O>)n260QwQSBI?^tS% zbV?o2lTJe}dkzNu+9yqtB&Ldi&xaIut~8Mod14!fl1`aX`+Iavq3fM!6hL7E-E7Y@ zA~mTR5>E?&;wV2ZyI$o`ySy+F4;{Od$!@5E>9hMysuZs{hMyb8v@6^hf;-M$^jqQw zOd#B6yyOho*u^7Kp#8kTq7IIA&`ikSiim(B63IaC;#=UmH#n?!SQ?J$(vAkm%N~gw zENSh*$98D`ZInzG7$D~+ZCx<(bpLjLk8s>;o)f1yX0 zd(WR67{}l4z0Yr0{^w;>bfN=2PjMKS73Y6r&NH_+`@1=>-A^03mvXgs;<5JLP86B! zCEF)VGi0`Y09(I&LkHpQ+!`Zfj!E%Q&GMhmV6Ha-))wXG9I6F(oWzD#(95W!)`-F@-(o;bPT zyKyw++cD(Pk~SKvY}or29}bWTOxZnc{$r# zXC9CenLYB9?RYz&6ZfJgzS!yro98EH^MLa1ajvv*qtx=bk9GG(099g&;M*K#** z-6hjSpCofpo5!3x@S4hdOCK~D%n@IVHxOe`lccYOH6M}F&+vWDQNaD!&Q#e$5B=2`EtKjXv#;?tU*C zuT%Gt`v}gm0gZIlx*E~kwhkiHOO`GS-lO>1r*>LY%Cb&LUve4Vvreiw16Ld@TDXhK z-ui>7#ebSe3F-JdH!-QbB?@lf0A{fOva4nhw!aaZq?2px+g1#d6Ega$U(;~Qfw@kJ zkF2i701F4hbPAgaHmT}C&0oAO3f-|B{?jo-EVFYM9q{8Axxm!FcNbv#QN&#OW_uY@zNS;iuo4Mp8iL+!Z|jqX+LQEN z)amI`W8vg0vvZxz%|`wXt5JH@U(u3LP zGbOE@&s|fy?Oub97V?-i^1p7j_h6VeBbl7!8-7g-p>a`mw?!%*(lR@9Z_8-IS`#Rk zkFkk+(cB^n3c|H#P8S~HdzgkDMj5XmswmO#k;UI^&1*$Vs=p_ZduQ{4cym08zvi4A z0ZF+8o^a%98$m- z-M)y6$x%fYF!?5C--0!=-`np-!I8M{xFr!b@epPYr|}DhtA&F!t!~r?aaczr#flUB zJdg;tk9h=^px>8sReX%<;wP~`O|hDjps-0}-cepZb`ur)Mu>OD#(gZO4BVi#y4NU( zdH_`7K({2iQ)IhLWm~y`+v?L`%ri@cVe)>k;n3GT{O(gY(_o<%p)JVbRN4INYoA3j zrD;rDgY{j3@c9jwpxjI37m&oQuXgdCv-gXA?>Uf^D3qzLhqmQrWzcT=Qd{UhS3&rO zZJ>NGW{GMUo8;>E54JDHtv>NzLsP{bv5XT!8*r>f?y+BCKP0`q%Ag zwlGk1h=u?5-~z0K?eJ7owifyz&lJ7Il0T2KE8j8M#g1j#y8|pKt}syUkl-tZgK7NN z!wt;M-2%bwz58u1F9Xl`lI56w>2!bDAQ8|7!DD3AElCD~@uG||;#+|Fx!dWxBA-x# zvx&q4JkoEsWUi^Pm}`Q(V$^toQq5*bP*>o(){sfECi&vECR8Hzt|r92_?7}`OxSA{ zS+dNd_hE=PLf3@1O!i13oj;J|*+4!WZHt---I#Y2Bt@_vuo`n`2 z+Qz?9Zjc!Y+p|Q?x3Fs6p}$;jk#)|n?(&p-pxDYqZlmt#3_)A`X|)al&R;k0;S`#L|1C)T%rROdL~Lde-fMBErN> zZ@a@|-^1jZ%EyxocUG;00}&M#alm+DdSzlxy)t7nn>bCHm0rla5eGN?Qm{?lW6MG=-iU7tWH_4z^ZT!$9(@HmfQVxT3b*N;e9Wt+40S> zQ}1fvIGdt}lUlDqIbOFm)Se!~&Nr&_F$``I+=?MJgmahn|FQR0v2{f4yI&H=?3kIE znb|RHGc!A8cFfFwa-#wz&7X}8mpK_M0{xe-c%)nzN~^nxg)UQ6_<>QL ziD4pHgn{myMQu~EL$QOsgs`J!?BuVwsQatHbqhi;EuXPz}W8q8&I+X`wm;%scOBBIj_ ze*io?sad~1dRDG&Q8&$$4{B~nH7C)2U1F~qXn|f)hOi_Vgba(COP&>jQPUi;o z0MGC|XvB8K+mtDTS7P6A$*CdEE=;TRD9{r3#3&S*L$rFq#yVY0UemSJE|%^F`OOH} z!R_u3ZJz+;{#CpH-Vw)Z2!_6)?H3M7L3O{w4{-bJ9>SvjZprzTL%I4Py96)t1`A^% zi9Em6q8%K&`o%6}?Fm&r<8$uRTHei@b0)ytj`v%LbxY`mGY&tLVxC zYvR(axiuD!yV@?T)saUeoMWlii%UJjY2bg2e+I-hm8Z@UkpayG^8z-t05b4s2dH+Jz%IIlbijiB~Hq)0j#f$4&%K$gH;^!t$6??+sOC>~=|J zRMk^gdJm6Ky@Y+ZR=hi=ti1LGHp+#M+UzW-nI%{e{0Q;qko< zd*k8XGR58&BaB?J85eD1Ht6o4wc}OSR%wC4wGS=!-aPocd&x9T(oy+dn1^L*zhLkT8g-)>F!PySPQht9%v+ds`OYiOpe}+`BKRHZK5TtV z7AvPSP)3BUZ?_sDNB2T}`#nX$IIyL$CtHs!5iRn&V4IPWHJDnXsY6O5wO5EW+6@K#HnGbe6L&7$>@C>!0zt_ z<7S5uZnV4_u|Ww!TF>AwdP0hQ$`(&W9|)H;9*3`{5MvO_$!~#=Z9GfEsrp#_wDizC zD^3E3!QzLto{Y2A@s>sbHMTV-Zup1O9CN|#t5w3^-;@dp5GWvd|$sLN#kF0OkVYza87AI&MY{hCODtKO7h{nmA_(OalK%QaSQ3NMK zxXnQy2UVN&Jp6OAoN7C!E5@}xhNCJeW5v+52Y;R!g%$A+fa;Gp1KZ7>g7lhZTc${! z6=|mlf~8UKM$swF+o)L>k8*7adx*9^t>#S5jso|ff<}?pdQ7oNim6+;-P9kGbW*vdlW&VPIX{_lIehb!D;O*<`Z*FCCvsMf8$eI1 z3alh$PE=1u_l8BDxYI`24BGlmc#xy+VTcpQRD@Lz1g(8w zNqQQZL(=KK;3t@J0&l+q2D#(tR-bB+1moWYDv)>@!@R`yysp0}AR*&D`g*4{SgsAv z>=rQR2@G0wbrQK^wTdF+4|PHxpX31^tnOw0G?rtk8-cM?I)d4qLnsd|5&6`QDphpK z6v`Tun188=bKusAa2hp_(|ZmZTr^_5AddRrHZLW!I0$Ud-e)GIjaC!`^(Q*WaX-S3 zL#7o5Lg zZdtg!+-7x+PyD8Xmq)Iq6WYhI^y zQz6s~S8(@n=y08=SpCH) z@{Q6Fx~-{8=Ar?ApxGmOIVb3vx>H_N4PD@yie*dmYJK^DMMear!>5c8S16OnaiN-bN<+oYJ_sTUQPDD*49<^l+6+Q}g}6 zuqMF_4P$tRDvLNIp#d@Sw3cb`;%_Xw(ZWLOYR}Zt5nH{Xk(mSjz@h>REa|laMmWn$ z;vxV+wZ&**AK{2#G+VxayhOgCRa!FCp9{DHYVSZbb>oUM0&%6FprmDZxoKfudEDom z;*nCgRsMq7%>)7AAk||I4+x5;K0C^2Yg*fT#^dUdxp7zB{KOR8s()oEQ7cv-c5nI*e?sU+eNu1)iHJPNp<@I8Jwf_)N|u= zb`&yDrMe1N*0x0?@h{N_7~P{uRAaq=zC2NA$F4Ohw_l``>Vq8Tb>?$H5;+SAhX~8L zp{}yz7<%hr?NT-5G!El)j9z;WF?22rpt>-WPzwq^onYjSY)q#wjqSP5D)bgYs)s(# zP;23@zsV%P;zlRXN1@YHh%2KuO54w+Do)Ld2xKc5Na7&Ti$fY+ZQo&C3^cYOQ_0B9 zUhX@RtGY+7lK?)V`5l?tV8P_Zsu7!w1AP zV{^G)7sFK$uBzE{@)Qn5!8v2;b;O+2E^1T5eENoiUCqsHl z*-m4?{q8VG-<)(fCCtHB@X?|G=?Xg`-O}#Dc?YnpMb5fPN&v{52s|po5=t@sF);5q`d6)wBf(pCZdJY!+-#1U4!TyPTL2wFV31dj32?eYA*G z*{2w)bjp87!H14Jel!rND}z|IZQQZai)QAaRq0vqg_4h1p5Y8GUm(=4dqc2VN54;> zr(h76lD!rH8*TRCi0cS>RPa3%dr~WV>CZ>(CSJx^&{Ay^<%RWM;QMbJxtUrqm^F1g zBVW$?(xMmr8t(ExA%GOj0nPJh1S^@N7k=wM6CkCn`fDjt&9k1HAW{dN13hD?uRg$` zw+d^5A)lLDt-LA6cD1r*T3MT3Yf#>m>&5SrD|O#1kf}Jh#jrR9!A0F-uJie@Qz1 zmz&^XeaNia%e%YB2jD>J9<{-tf-a`|K?C=@f3YjAI@87_i^f@vwF2w^1IE2{THdrldo)TRDtRKEIe@6L^J*b-+*pw5{WsJqenI6&rm zCI|kw2Zvd@Cq+wCTDl3Li4_sxo%l?aYr~8Ei;`JXKy*h7ug>ly7iSg4ax#OjLnr^GH(CJ2gn!bNd?R(>%%yAkbK%dVqO|S%4jO$x1D9j%TbX zrnO$5=#e!p>PzkBr>!p_;1+r4vE+r)x2;5wX#R7#XJ2kLr#a7%aXRafdBOTMdEUZv z$&JiWUrIhd0?P~CRVJKYc7cw+JH&zAWUv!{K{Y|gGo{(RV`AQ^MmL8ilnwZR8Kt~# zM=0)&-2c3dw{!s~--FKba!K5-^XIZ=oDju_1&(<=7f$D0Pd`b}BYCvM7w$ns0C3<- zyVCny6l%+L$?NrnDNG{$7#>zZ$)R!>8A9C~%QS06k;@%cYKwv3)zWhM|_W9_EwKbKX?% zRG?hW$Y=$3;`r_2idsYDTJdZ83e3CU*1I3cPjsFJJoztn7NAcCvTq6>BsM~FV zBP(~9u|t~4FO0HI(eGCumA5@|@bkQOqWk7$s6=Yb5f1*2PlrAP&K#iOQ?B!~87s2X z+@33LN>nO;X0(iYm96dO@1^)SR&t%K3XZ3@L-&>kHu^fGk?WOk4M4uVg0#2z*pe>X4oWf)yY)!fP(S_2^`MzAD zOI+*0?PZE2Mhq}cc&N-(H4641S~nilpL|{IC491(PXoqRPY^ho&@Gp{_(2zm8())P zyE!Zj;s1i41M*QjNpK+HTx2wD7tiUU{&32m@=RLK1^DUe<{BoksVirQAJ;2dv+v&+ z{<(~u2MxNXUV4TYPaZgo|7{gs2PE+V25E{FxBFV5yS)|iDjl$ST!Q)<5=udf@3ct< zq%8y7RF8G1r!w^Nl>-E39|*1y>JxE!!Lo$E@&Wx2yFFwXdvsOLAn& zSR(67$#oHFt~nAb3|4Zki^Q3IWY6^HyX|gCL3UiJ?0U{{rnO94%9@W96(~`c#B;*S z7L_v(-TXnsLP8mCu-a`8gc-SUWdHJLhR#i!+_FWNEkh6?xuJxgQ zpJ|O`6SDvWm^uC+)gEAUba?uUAU?9-qrPWcafp)_bWU;iiS1f6-m#=Qv`kGV>G6Rk z;=Pga2_a1IMC5}(;|8ri#u*OUUZ+?NVx=?>m?^CQs_fr05IY=|Quvk)Slc8s>tyD} zxBRJ5;>MB5Trx#RtVPTA(6t_9PL(1Ebz>uYWH6AP`Gn%F0>!oXfo_oz z-(q5sx^snqV1LwDA1*_B*dM}q?jm_TTvAyx2c!lKy~DzT!quX2p~&qNc9L;*L+!*p z3FtKQNiiznW&BHIB0!%I#OjX4qg)URv16r5(g13MhgHz~-$8fo5Fs$4`yMI92X<8v zRT*C6`dsNX4Hn8Bs2zHQfp*-I={DOKi&}^YFRyjoA~VNWx8_oOsyu+o!pQvlFm%*Ad|OQ)Y6mD`^kMPj=ZniFWdby3vc_z{Z@(vQLkZ zm*GB05#-3B#c<)&$9_0@U3w8xm~O1f*9VMv`@_mk=@L7I;SLWq?fwmOR=~)v(naD= zooKNw1N3m^uw?{dUTdM%D$aKSU6Doi`ij-^9t@inaxW`jdh^M(M4S6;B+rF*;r1`D zmS;|;Nx?o}ayr%*clUF^I{HO^tia0GMQoXzUs!S|2`*T;j;nHc%;(!0?7@%rS+^Ic z4{&;14d;gN#}u}vbAqxlf7hAdFJ;g0W`6WX9XiAUby~%c4pV^qr(c{!8cld?_ML`K zNedg(+r4t}_E*#HDag_T%EBedq?GsS5`4u)B*%LS)E$Q zLLYguLBkeG1_05V3J~LrXyL8#4Z8p_Ac03%0=a{U-mI!4`>}0cHlI+$IZV$3Giu_E z8PlRgpO0)U;MIGbu{PRB;#mrq#FdaD3UF>ShvDigZtoy6mDvBa0U2CGWPDm1huyFb zuG6g?BNQ8j=T&457}KL(?PtpPDRk{pckUwBsjS0=B`rbIwys&oc|LV|o?I(>=HTN# zIaCjzc$^{Hbg}q>* zSncot4Z}&3^!xWRjRdvbu5+obk9~J*to@~ zS5^-E1?X2@8XZawkbxN(5czEbn9~Z+D%=u~mGKhI4E4(+%+0ubA@UPGZ~1(aM>dEG zd8&t_>lBMRP_b^Y?QycFU*^+)3-f(-`3nMdpEV@bjc4vGi+h%IWTV8&9Bt_t!l~0d z09BNS^h+1##N2~xDY;YI%9?wUw3aBc=y$Ob{X1eBH~%_N^Wpanxd@1}LG0v!-I#25uWoRp~!1JuW6p$b&=^P=;m0lg?H3|8>UQbh9b-g@(Oz5nU z_1nFB>na~0z8wmGOm1e&UJUPL{hx=AsbN@4^I>q+gU#AA11RSoM-{K};eoE`^DLhi zXUF5a!5RI#(M@>d@7JCNe!sT~vv`t17O_ zS-yiiU!T^EVmEBL_pl8jj;jycCrgT9a9BWBYL zNzRk9XE?BtUu*A_D!x5;29&RXxjp?N79Z=~1xyU{mgFMe!Yu$WAas7lYNKDz=J(bF z-*iTA@UfeD+I8=JK7qxV0?j&mYw+&+7XRCJ{7L;hK9fn8pV=AY)J2C!G+KWP;Z{T!nKBV8^i^tkJncS zudKdezjq*=#lJzRcskg$aN^zZFYQoQMFe$aWf|I^c<{{rXixhpac?FuMGRK<)D4NM zDjzZsG97+95qnql9`G_4;Xvo2|Cs5~P%t^8-=1XaY-QQ4I^;jj_V7Fe!JvKweK`dg zT@rQl3!(s@B-y?fOYR^Ae)MNC8X;Y)2!eS(G&)OOh)DNoUpxAyh@NQyyh9%_aq_^m zK!X|cqaQeogCetzF&kY*8@yQ(p0y;4C71RmZv$4Lff0hH1iGoAVV2<;(n#4 z_LWn->cEt@@xI4<9`&tunVz_bBoRHIz@3DqsPaAi^ZQVJ6ByNS3&grS8-AJ?V8ZcC zQ{R`#Wb7izEQ&p=I*_3-ukMRqtQo;?PcIit78&4w&!9?d*COJ;&1E6rEqxm$J4Q5a z-C(K_M~F57iFVEZBV9Q45O@;tJ-l(&M@i^5WNx{8cM%WTNF8gl-C}00-1@SjZFI!U zd&dY$%s@%6?CwL^x#FsE&A6;)#iy_KH|T0fxir$8nA}U{C8ImIcESfQK_Z}pst|dp z&b(mR?Ke-qK=9W_rmW07GlHG<(T&4rmFiN6wuS57pBOzTqW6Wfs(tmuJzHmSwpr?k z31tqAr~9O0gl<0It~GgyI^a`@l#Ox|pcCGyl?SMAs;)j@#uY<;r0_%xb zzmw~HqniXloh~`o5t_f0Hu5s=FZ498Md_0cK+V!qgZ4Tevk!tt*rrdK?QJC!CCL+H)tI4{RjF z=JJK~iUq@$+f4u!WR^NPt(RH2e;gt=HJkWm_|6C7kQa{O0rV;@zvQ{U8625*`B06V zRee88s}NwO;KuX)euy_ce|)~=ig10Z+w_acK)IU0@6$WLrtf1H;hlJ*X3#?i_^tD@ z9LHx*6n@$jvG^X6ESqC--h|>uSkfija#^KRG43Vf>-h#^>Ee9Wt6841U@bJayW-K# zH_a}+H+?5y%yEPZJ%X?H_2+eBuc3QbRRRqsvxYRJpS-7zdf2vt%VKpkST>x{Y35QK%ML)Decu-DYf3`t&>=CmBpP_HSPyDcz|3YcnJ2AWI(T zPe=T_r1CNHQeB`fy9M2IB~ugq4mYyec=?y;ROKV7_E2Dt!ct-371b{)?GRr>>d~~m z*jY#9T(tQ!0HfU(E~uWj)5nmigcsTKnXvRdYeUy7#dYUcXz+dRmnfnin4%ePv1+V z8o%W%Y7w{$f2?t_9i3s}4XYw{LmyYs%zI{3r}6_Q2%+B~)nvFidT4E|3KbyAPR%mt zO<~Yu7yu&7;bQ*2@F0NQJ{`FNH*(%H&plQ4xDnR+F=d*0zG|!|21N@7Go+qQx9I(y zbSmXseDo*%x8+RKc|934o>$;ivP4TytP>A`e=O~E8v#QecB{WxF6TdlSGxW&ZJ`;v z6ER&1pJ4?GwOm1bZKmM#nIq%u$hSB!&dhj|4Dh>*L@pM^h9yk^*<-7GRyBoo{lkWTg&-q;~N?3`n_A@&O7@>p;q z%_c|HN}{Jj_$MEyJHec&?>N)@sRB#Q{N}+&|3){l%6vRW%LW#hrUblUq#68e?tfob zyc70Ys(arsB&{wi-LUq*p`m`jI-FF$pr7YF^))M)J$cn^vF<$Vnm;j|2sK2FIaK-b zUOzuWMiW5Cofw=bN0ZX6-pz%GK6P1O)sGL>{5)dK_GIuy9?cL!gDkjxAff(1`LR*N zHLmc)SMK&lB}P$9sw48xi1}>7M=GS=?}g|cuf+JqU*m2jpK>dM?!?-;hh?E{)^D2u zdiO9rv5QkdP1?H;N^vF}Czu#~{dJW@KaFK?oQ+WRH0zIj)*{|wkNVYURZ53jM7SeE zx0lgB?Us9AC=f+1(0_DPYdfY=VirI(s;1G#XUTcF;6@{OxjYRrV}!oyEh%!tpUTK2 zcR-lyAJaT_hC>3+U0NaDHie_#YC=za633j5XK9tQ%GP1GrN^S*VMgn_{z3~Z6f1%@uU{futhszK zXoS!X)DEv#KKf3I*i$1IC@*1g0l<;w5$vLkVakU&l!vCO?nR`eM}&>u*R*v-xvlg& zEKMFUKZt^3KFK=w3&T;IGqh6E4tC11g6t8kUCY#7>^D9a<78DGeRn{grN-}TDzj%H zv2~-Hw-f+AZ`7PZr;*PK^KS`YvY>x=2eI$>mXaH{E1?{^xF3yQTjfC1%+V70WdcDv z?!7F=x3Vd92n}L@qqo9p~su7yP=i@?izi+nA9=fwTO#iC?y1enr zOvjMVA0D%cOgfW1bNYrI$=AO#BzMRV?GWE&Ta)*)=f^A9hDF$oT!cua-)231!=(Sn z8hZ1hx7Fqe4YoFSz_@W*5LLZLYeZ6cwr<%RY#q^}clE`a^dnUNh?dKVN}KV6d_%Bq zD`qvfeCHa`)TS>YBi{?=x?N}Qs?X&Q_9=pz33GV$W>fgE8|S|9T3kFrc{@f%E^ z!;!O#!~SnSvoCm)v8H(^6aiqr-gu&IbCWbt6C!2xjuzU97e`~kH{& zYYp%$R!k1h#kfIr!1EsXRD>gf$+$l`IeQicF%Pe#Wrmo2-M%aI$7w{yZ$X1r4lgPX zH09*8&m3gOjZj_^hP}4P`o;Z^p9koNZkBK@0(Zl~g&O?n2vTI4v0$gC9bh(xuXN@k zz+!QpP}u%{vHB{Tq%#>iCQZu`Q#Hp6cVYEk)nrsm85N_P0U>`J{(SoAXP5mT zZbnPDJ&VIpl^tY#1#Q~R9+fKjt&}GQ_5S{M)k`KZaYsp5+uEw5?!rd^){UP(1sib} zIL0Ij>a0Uk>k1OgkUV&bAW-Zhwh4F+S#wXQdB0W=+_a&iHb{02$J)Xh@*(R)+XRT~ zGcBn41{cqyJTjtkt@sp9w#)n;tt+0Ww(~hCAU}i={D)^Cv9j_EZaU8%C?s@0rYPHo zuNS;*8JqK3Qh?WI+Z0=or`IfJo_0r&QTBr>n>z1-KfdUH;Mhn6n}c52N(e@h%k`cx zfsNeR6OGoJ)DM(4`lcm-0lk%=B7NDBA$@naF0|>4rbDUL=h47V6n!C zrNV|gR#)!4$Hr*lNjpuJ`htc#yb4-7le*m#oA%1)(cjmmo)dt-uAJ%2lm`-5K=pb^RNN)K?0A_ra(@ zIdeKVzt1HSN8#T+Y_s6kMXD|Ecin9NQ#5;@ZU@VhGvQvxukyH-P-Jy8nigztF_ZxtyUJZN zvVSjN9ah{mSDp99QL5r(%<5Ks@uHkH-2~>82xAKQj#c1Ha$}3usgtwjTLn%#pSCOJ zO3#lKgSDWPyPwxS?yMG>;zi8lyRC_m?!P-#$tEI)E-@eV zpBu2Y?>ohhmIAxQp1v9NuMJNarxm6BttRuvul1~{loYC!Y35jCf3sbfgY0PE;l8=h zSmLU*TOF_Yu@+6#sE4ONVoUAHmKj|gC$a@+MuyA&HJqdj$5G9m3E%r_qRO;V&+FhM zwjr}H#rBY)R{FhxM4kyih<1odVOwy4LlIY^BF&P7&Z<$!%UYkPN-(k5WkH)oRrGA& zXsda)w_c}t9!D`68OT#d&bZG*!(!w zR8lsjNdzt1D5YtNAbZ$*nA~z!Teh{Hsh`>@=uT}&{JATRHO!+dP2XpHKsB|RSHV>Z>re0M3AljyT4&rBk$0nY=6B=-;Vig{s z@|npp_i0mZps#HW#(qSQsW>-S*1;UJD~#4IsOxG_`h1UppAjmQpoq<7rlKU* z(U;YhzdZ=g)ja$$(9dZmRc2HR#efh6%ksB8rD>^tpjFs9nvIG^5&^s;m%K*~YGju$ zOTtWxq@h{ppHR$9G|ym?sCNMBQ%B5E$k?P_kjE8;QyqayHLA$LjP8T6yD zlZn`O@tSFh^qBb+l>KodEmM0Qa`Hj6YQ^!W&e38dYxtJrzoZX9@w9@B-I=KIlG!OB zdQ=W6Da(jOSi0DhL{x||o7-31S&=%$0vyP?^#c*NEc_;2v`e`uaY{sRN&nEA3r{mBpzaH~Oyty<_z-bdjHN$yP-pFSV59GC9C7Cu3GKoTol0#akf_AsRoGzbK=gUn%c| z9;=Ce_~*B6JPXf;w1aYn0FRuK_1rM+a;ZkSB`L>jN^I$Z&Y!}6#C!tj5x($EAr1^0 zDA%LT)`uRFYzCMo+;qm7JZhO&9{*Ib3Rnc|IOb*GWCs+LYZ3JWu$<|Hl5!SPhq05< z*d3OyrRvpfXXwHmZdKta65wctDaQk!r>Jky&~^p$Eaa?|lVR$nX%7lUq$V|k(PJoN zuq<~l^5)~QDJy6=1OOl?+j!I__Th9={rMuwQ!Mf!%J}5c@>UFmf-3pggA!8;`2rXk zHSPqC(XmKV8P$`g;3=8g1(-}`Ys^`dTUFQtNMP?#JVuao+xKa1cvOnig4n=WcvyX88RheV@j@u(Aj4`F?qNpLIK$q+;KIcHYbmqynZNbK zG7F{cfl4x&#Tq!6K!T9LzNF%;!+YkG3f5mW9Kxe_kN{3=$u?gHMH^UjtN3@W=KHT3 zV6v=+($4Uvk1)td--z3O*u+;Tqjh;r{*&Isj;D$~q}ZuuewNA9*jL*nBC5mxOtI1pdr77;o#FlK)KR0&l8H}_ zJBNvvfqS(&)b!GL1BI8n+0{pNK}&1ALKGeVAn)HB`|}R&)A=x|MGjcS= z#}J24Jj>0EkWo$p(%o`KaRzBJ1*7hR_wA(OTW-sHk)|W+Egjuu&OfywimSj&pm6>h zVvnrMuByavzJhYdw8Z5O!O^?FUlE(V<~y9>TWvglMZq;5^C=tyUX@ejK5t*nGl^>N zT}uI(X7YoF`@43^5kdXr%h{_`exo1J{CP-K_wIGZ&sC@8aBD}WOVP9s<=It-JH2@Q zVw`_zvPT9$el~K*ti}Td7k4^+n-?AJ^*h2h>`Nz?W_!A#?#Zoe;ZgPTyu0t2cQ%Nu z@K`#NJC_z66q9_Hqf7iPGLv z_#_Af&6)X1;_!^r9&$d*|7(~58{-e+O4*(fkIPV=ZyL{2Z{oj!S678_W;>FpTehodhpv9Jhk7`y^bgO-&-oIL9)V-%|k0s}D*5mqwVI=p{luR9IaV z@?I3TFeR{lqS<-lJ74Df7+5iV0;bnBQ(D|lXYVh!9^G$IO4R(eluJjziEr#HjbhH{ z+4@5mDKnxCJijTCrns)p6t3W>M*gu%?HYi0T7NKo@BEW5^!d^0@d?*Asvc}1EQiII zR|y@lGk@8Oz7c#l0e7 zgFsCc?ef2w*K?=XH@IHI8*R)KFFkoS{Da3!VvkIL^f$FC9SXE2^V>+Y(C1G*n!6s_ zqq66b%wEOI7;*9iC`%{fBj%1wdw*zXV$R+C7wwuMP{VQGj1h*RRpst-#4oHU^MQ()|>BW9^miN%`QVhQUJeB)F); z9#^nwr1gOK0Pj*5Pi~4+uh+b6Ah>1d;VgcWYvK4`A~j$hb1#fWXK!k$dD#kU8;&4&3J=#s9NUs=&u)?*Mf!FTE6 zp;p47Ro3{LsLysXI}RDqH3;8u&NQ6x`-zO_Jdb)seGcAQSxbEzazyL;r0}g>7+maO(Pt+vyGmYKjUQ$a=5!SscO*FP2Qy2B{f!^P6{AZkcgp7Zbvq+^sj ze1bIKR%OUz%?-f5SxV63`ENyu0k{P{MqS0#Yf1M!8O06JVwg4v4kdtPu`ses2z20 zktH^X0UcwhY>2Bp1n{7NHv-vk@k>+Mcizf8d0)zHAJ!vy(8pJdm9cUPS_c$m4|VW? z2Tqz;t==HCi3b)unbPLN9Dya7d(10@=4v;5(ktPbR2OB<`L`?wf})JyQ`C1T>)r(r zEuE!CIws?!5wrY34y<9Ew(l`C9IqJkZyG@Q&5WBF8M+zN1-#f&BtVlUCiXmZ&NFP* zGrx4GrK!hy?eon^1zj`DXOcnY8{&gx#}fsP-BgU_^Xg3LW$y#pH6A_j;p}YjD@WyY z^5xb%4)76i%fKG7g=mWIy6(ycsfxU83~w!B=04mKUf(??c@C2bD!sA&22Z+4BY#L^ zxH(BfUt2Y)bP1Gi`#}HxWzyVs8s}F3NnlPVDgPn6dMa35Fl;jy%~&(r3m~oY#9yi# zE`?_mZLCA3k99ps7($ov!Wd}~peb|wJ`TE40V8&Nu7b7jx9>wPmAfq-FDSB(V^vGl z4|S{2O2_Oyeh1&*Ng5dhW1fU!IbQ{7oAU8Sk_9L_dSz=*8ZNsN6hZlxXGIONR)Axn z{xpSmIBQn*t+L23r@r#n>+bznC=oyded0PVO7l{|z8~zRvbG^pbWG=fMN z(1m4bXFF>r3Dhgx#2Y#L$D=A-?b>KfoU0uTCn~xHkEF_1rY0*NA%`bYYU&*+wu+td zzs~_G%%=tSo$`M+o~X1p05h|BQ)(Y@RB>r6-;>%BOm%Ik!MW}i=XV^}erEoq%$a>w zjLEInz0s44+RDq zAG#9$Z7?5nD=Khu&eTmYG0xj8x%%bvc!I9<%rZ zJ3$u>n{-9Bhc0~>KzHh*XtB9_Cy$b`{0Y@92NVr)(&Y~DnOJ9vQJ2NY7t7a{?KSK0 z?SBaRna03;-@Bb-;6>qc-e;WKU_{|l-%mkJ>!~mH4pXE;G?t(1`@AbLc_w%sM!nGa zNcM}}y#=N9roX_I$s&n8+hxSlLLGnUgS43)n?|9olO^46n(|i5C~#tr)Jij<2hJUP zdYl9$cELwI<}F6A*V=sIYzcZ z+##%mBg$rG9bokPQlRx{A>Arcm*@ix`n$;>-|fd=DT*-EWF#-RnG7AEWU z=qP5^k;;>NmL<&Yq|p8f!A4`dHP8p0uZ`^T+ryv(3JpO{1gWd~pxqOmY;;ghQkm17 z>!^m}{U)@-9ycLTZ;b{*4K3^F#MPW@TuTBJv)qBo4Kw>-D&+oB32c;ZvMOd2^XpTD zjmFlERg9GB+Y(D4qNa>no&}7WhJ`jTGkiJ}_<6cdgRoa|`&Qm8b^CPqFe<>!k; zC_lcKniB|nu$Vb|I@a-Y7ma|D2g4N5&`3J?V*~VQ!O}-3DDX|&fo!2EM>@XFK`=)? z-;RlP@cj3S7BQz49?_{cwz*pYKhAnR%NymSaJP9VWsZs`hS=4<$F4;}b_?ed~ZFi|jN&g=v0 z6eo>Da1)V6XNNWbAhB+GnX6`JY{&FunejmhgtH>OSO3-}9?8YNdR06=wqI!~p4-T$ z4(fjtX2_lbdySY#o00@9a8W}wwTYTOgaB&&68(B=<@qJeujcNQbg;9c0xe2Sqv!^Ee_<dWA_h}?0hfOy~0lo5TWgEekSIuLk+>@ZhZXqY;DZ=?jQbviT)<~_FP`Z%5J4! zTocy?X(vr?BnyhZA>LZE%?!ARyXbahxmf)Y9lOwg^%PY%khO|LQ7Hrw#~;7G1n{KY zRgL`R<9zA?;WLmsKwho9;02?8B|^|{)fWnc(LduF8TqJ{;N;j%9}J;k{I-ZRgB*Nb zK)_Dm6pZ75^DJQzC_f6uQI6S)yKA=f^yxfJYz_0Z%t zEP(EM5#ho|RJcmLmGm+>u+IfJg_#G-ka_)S0wGTA%I|NGB(ISedWU_D!?FIp;fXV` z8^(c0B;I7fvI&a$>k&5ZkPyrU+y0dIfbjZJ$*si9+_ixu%)%}s9i3#&1`&0R=dCNo9xUc^azK`&O& zE>hsnn{ok{`FF39K6VNHRV7!F(z;M(%7wDbC%{@I!9l|zbuEa# z_`~Kv*9k;XgM@|LPQ5A7P2wz5@nT#=uNua)cL|zED0=^XANDJ=sN~@9R8mCV4ElhB zmMZ#sA=JH;lCno^5wAogqH9CP6hJo0>rcfSBfr@wcf=l4+ZybR!M?pWIeQWn#f#w4 zXF_+7w=_>w5c*4qKTH~MTuz{wETn+sx;g2Zpnv_$dXr9i54fX5xMRAExAtjeaD!g0 ztexeAV!$#Y2&>wq$1(o#DG9S9%!~1Y-6Wrd+!hGc#M<^<3C{gEA@+`|LOK26?SES^ zad0fgynx{zcp8G3RHMP$&*J-o2hBM!$g=CMx#_o%n#Bp9q(A{HkEfXEh9S!7v=`n= zt=&HP9hJRoCtPwjAdZJJbI?_To0mKjGpO1?th~&w<(Be{syLU2bk$WU`H?$>W<{!~ zFY*Y(nD#S~^M-zNUTc0Rb?Dh)b~S`>4VABTNm8UnzX{2>!tTY${DQ|}7*OD|7YvxZi3&8J(}@#KCysHdf9}(t#8G45e_vmqoG$vy6)T8Q;zxc&hyLjmSlP-50B7 zqr`)Lw9Yvr(x+y?Ylxq*kDy+nidOy*R#yeb+NeVj$v)LPMVG%FC&|uVTF}Y|_tcIT z+#I^cqyM%RqF2u4CwOr ziB_+Xg4+W^wr2HAA6X;FIH8hiksM=pU(>GFSxbB*MdsK{60}0~)Tlr_RZz?^?y^Gi zN=LovADLvK&SEoSd^lD0v3DNTi@hXSsJX3gmUl@mU4=2z>-UJ4p%RnhidM3I0LIT*qUGx5zA0Lhi+%(3r~A%Jv`Q=t%%S2^4QzJ`r=7&KN)Y~Gz$JUhShe8| zDC3Se7v+9bk{CLrt`>?Pi1SOabk)U0apS3;*m3|3!t#a(T04hH9VBv(q-$EGtC%(| zE`xb?&q2}uSc3=0aVJM=7^4V`lErDNVzS+Tv=lkeE`%#vDk+5FaQ3XMNHJm%HtEit ztKG+4=30Fr=vn1{YxXlb(`as&LvfHe2{8++#pYq0!MHZCMK&+dPbesR=+`FR*lIz4 zbDrKJ61u)#t$caQ`M~>cvrqUJqakrWL45xKdb|05m)VwS%b}P$BV!t63CzbL;V~{Ze4nEqh+A*@_ws zp{S~SOEyKKCNArc01}%RrdqD5>zsv$g~EIVS$`5angp1D&TwGQvM-Bq7NAHr57Qk! z_t#V!q^#TbrT>e#I}XNzaFR25Ll-SzHdC$RIN-Sb8v`C7b(1c-0$J&9oQhw;Vd*hT z@^Q`HO-TSabxgWEkft&#H7Z6!IErlz{WSZ02pA`?Li3C zovEn^u(J;GC ze*yj9UXuSHyWRZ%XSe?!EcE{%yWO1e|6A<#|Fz!#K1uxlA-lcJBu8856Y;72x&2R4 z&&~heV7Hs*Ac%`oi~b-V{ol~-UxKRpp56WnNs^=f7=`w;0nYz?6aW1Q|Gz-D`+tFE zr|j~t$#b3Wg#54dntUhI{H)dNbNipn=QB6>9|X78b!CxFplv56WM`iyr>ewj_kH40 z`?mjg?mWyMGb{0z(RS*AKd(u6h;`UDJNi!^0|p>!(XZ>P@SNqXOS&C#;oDyof6z%z zevKMt`jz(nuz~dLzjW4e!bkS+vn%<})#yLg*?$ZPyR221&de%jX8h%=IFzcC*|eVQ z5(PQtc5G~VZ{NSkfdT4&zlLo43dfSik|!x0qG6)cmhlf`8BPkWBc@q_?`&qcOZ8z zJ~C>H{qA>>?Vj+>e~_@S4nIZkk=^W6U4E~^hC7aqT|CaKE;=jt&9lu*_%37P@pvAb z^&Fe+uI(7r)wtUod*3b%2Sa@+CR^k6__Wws*I_)8YOHk_IIHSNdZEBw{74 zp9v64qr(1-f0af33t_8~>?OyP{=p(Tg>l;{^7H)r_Urx(Y456!`bG^Mu84N$u)y>0 zNq~L+iTC-Cw{iB)&wf-wDs=NiciQcARnp4*O4L;E_+l}$tbAIjU^ZE`5u-A8bJ7|%?UNb`|=BtNB6U8$rim9$TtyU*Gbw7_zUFe;@}+K#iq3JHOWbA4S#o0G8? z&D5Y_-3-{>pf-uwfMSyi=T>!oOsZd1!>X}v}UNet!}P0N(L_MIVSIG|0CzHE*_~MFCs4`-ycy%b=Bo2%Y&EI zaV}g6T1r}qQWLS3w-&XY%3hGUrh%h|qlvY7t^r;fSfNH3W5e!>qR1|QS$ud145Z0HL^mSvz#D(Uv7+G-y`Ww>Y~OQMGis!)v3*Wy6a=j>}Axl z2<~u{*H}&Y*sQXB7_NCaZ{n^t&*Q$?FN655O}AN(`5n5R>zSJr>tg}-k7<_QWU9s1s>y6D{gWpwWgOBB%}!e_!s26S zBWt5`lQ_v5%NR@S<_&oN=c{8uGr<`Xg?QBJG!aqf1e_!X^5%0o#GgRYhcLUehhP!+ zuoZX7SDkR5fFS%)gp%06vu8-gYp8$hjC(pgL76u(gn8!D@rn3b1zOJ!(J!i#2xg z2CQKfg10r(jd=>p>HNx!f3ttNqWk)9jNN82vPz?Gr7Qpcsp0<@#_oS;IOECxlx*xI zBmI=@OhbIbG@TS(@B6SPN=anMM(kkvB5pFUZ*wQg3{(odu!!Xr|4oRLIj|QhzBfno|DNvtzl)Ur3sCd_u)%|r z!kPc5SW1f~N@s?q#^wemM`wqp$C-$!$>~XHDH%yY4-sOqQYre-E*=4xjcX=|vb z>L;eEW-Di^tLbTHrU#;+;$%TV(8$Q};KR6|zL) z;`76^(+z^6C6XlPlr8?5b6Q%h2>w@&xm%4Q&3~`vh77cPy1f7Z!(RY^FaRH5ZsKOb zWa?>VWbI&X;bp|a$j->YAZYc^MLpq*u_b zMu}v^oJC9F1F!damJUX8yan;3mw1-ayvGZlsVG0fPXbL>j&&=UdNpw;^tz?}^60k$ z$KvmJRE;#5`#UsgpY}mtxjUH(8CIH|S-+aec{+Wo$;qr-99;ZR8OcK=ZZv@kw1aSR-Sx`$Rn@p)vOi>7Jj`VjzUut=E#1?dU?F zqw`0T4vx2EtvUAduPHFM!U>(bSj?4NHPo+CUUvF#a-;U8z(X5BB?rlk7dz%3{`89% zzM(uRtHtbOJ+Xy(^Bn?Gw;@mmefwsW1-qhwYT9JILDoiExz4ea-o9}s>il{CfRZT> zK{M@Us$ia#-?ry*0$9cOt|_4uZ8Aa0dT^|Um`4ONS-V+ospjiQTZ+lKEVsOwKE|7! z=??oT{WmF93PCxutnHmiRe|w&BAF#)?426*rI)y4C7Yr^t)p(~nLf!SvDM6-$~TpX zh7m66B>~pA#*wSB$-&gPFKtsrv5dc|2QZi@FJ-iz>=H(%&x2+;?8sxp_>62;C8`K* zi|@MxP8hc2^9{1jY4m~c1w4r_gWfggf5^_dgN!oh+-pb-gE>NMDL%xabG1XB zsaihg*(!}JiT1Ctd(;Ib7`XO9YWaACpMYi-lK~md??22lR23N2Jr#9VYbI?3w&eD3 zQt@h2R1v<4-zI|~IjOHzpDS>?WOGr4=Jlx2^$!^>QO$2>MR3o)J_MP&~<-Qn4Tq5O8d!gxG$+< z^!z47O>E{4;QVPNyJ8numR;CdIe6}7(tm8++k6;7xv<)%vr`A=Ahka3RG*$7Ge{+bWRNA!p%*EMv1 z=8HDJXp`iMq!|YylUYn+#&m+i@d~0)8-}IHab${}5uJ5^dam$VyR}ZyT1nu6 zWekJaRmrsw%aI+_cQGC}3Ihog! zu$^{Mz{2C_`9C9-K|?H)dHpiYP`K>EK#U3#ynwjnI!(G!Ee#yJHFoAxgOv*g%dAZX zS++zW=jXK*m&$`v2w((>GxB*q?YXtWXY0o`$wj#o|=XwigJ&@jrlQ8v7C39OqhPHN70F{Qp%fDhITI8l4^pwLWy-S znlKYGZo^07wilv78>2h@H>MaY(?g=BO;fDg2CnA}z4!s$4IS-5C4|2dVzaS-HD5kD zu6*(TOyn7g+|NqC)bFp?TQT5Jd3PC z=ZhF3#2JAz@@<)F<-n>N6w5qPMmj7&Q%x>)5R#gP z`4`FQt$uStNF5dw7=LFy1kFhazpvlL)LIC2fCJkyUYNy&nRU$gxDLuvc3!(CYx;5g zXrjBjF|30nGXHFhEXf$u2^9Hq&pZ0JjyQ$7knt^CH^qu{#ECCn;(wLj8A+^+J&csAzc$qx3Li0G9uSH`Wccw4yvH~Lau!w*RAzxxs%o#msV2mhx2EY%pOVQqXs8j#aTJbMpPfY;4BQG`Nz7dXGHe%(-dO|w81 zGBWcc4Vwz2JD=x}w=(i!0XECkxkghUOlV*Cos(}Q@I8-e$Jj|=`Tmkk{j6vjF*@Fh zcMD<6Cr3LBZ!%|MTy)t&nofT=!As;tZ_CJ8k8RtcF4sB(Op2Q0JV;EkTEXT}!dk`3y>Q7(_PPaf&5jP!@StK5gOU`1xS)x#DvO@k6L)wrWPSuOONn=QNvR_q80Tu{ZnXp!Z zWpf~RO&GcR2c4_MkR+L6?N06B#>6@1T*^Jq;DFng#vyNsuaJ~0^jPCZRyTT3jaGJM z|5Of$gYtNpv=hhe*~*O$+!ZQ0Syp7({%ZWa>QR}J@U#9D@lspzwk*Ha8rU|SAyk16 z-gC7yqlF(1%=+|t7`<*HPIugfmd*aXQ?_}}+-pqbOyZhM9oyLT{_>%v^qT{1sOD#} z=J3~hMHM@8coDyZ%K`T=TrM_7LCM<5X_nxYYPI&~9`Sg;;I(h1N?2E)03Ph^hrGPJ z7|Gruw4v#W7vE3Z1w$4c1ZB~c<2P369jIH7DEEtQj1M_A& zKRPmU-ck4#D1Fc=RmEi82W~ax+cNNL+uSetND)@|i~=Z`{zZ_8QJ4B`yPLERTbolz zi3FwdSvh63>%TuaBS#4~Deo4QJ01TnsZmeYF_(=@^RmZG&kUpSBeIzJl_QH`M+K;7 zX47C&bs|(5&=`k4G+Pw_L%qkTJQB;$__?6+iqyKt?9e+_qh^su%kxD2x;UnsbXmsA zud&f7dw1&zV7dyPc+DRaO4ha(Ut%;F7Qy{ny1Lw|*WsYI!K@(^K30ttNoR^y+T|Rw zSD`2N5@3EK>49!}#!s-;R{=h7oi<|>`O|=Q#l>lnq1@c4HrgI5?;Kmpc9z{&>rQWL z$~Y3FBFE793ahk})p*{_X9dGJFX}G}mEFIEYbI zfCpZkUsGs!a$dDQ(-!3V0nc$4)9;wx#2AB3tX{~r27N2riipX}SVz?hB;K2!0E#c> zhnN$qMB~@OmFc+)3WlLOD~=|AiMoDV|dD8`LFO3C{I&@l0t1i0ZXTao2FC? zA@Es2DXb=tA(@>We8eF@VlRg!)u%XXvH2As;qNcl@ZLflg!6=A;vTTfDZfz56`pi0 zpsfIMVMI)fAPIaZgPYT&m#$;Pq(c?aYI>6zXFQw9A3*wZae0P+6mBRG-q);?VHENZ z*S_S=-f{&Ugdcx-TG8eGnjDI&J8U8HfPcrL2Nvd^h!4uMm)J7JJdzW9^a-!wwim=w zc=`>oP}3%xKXr|+#se^QFh3FICU%39Gh~0od0Q(?+L?0g-kRB!1mjtXu-%cyTcmz( zJX7S&G4ZX2!FQ01l|b&1Tv7C=o0S0+MKMY$3#gU2(%{0@V9K*f#F>7@XlUv{4a}KaH0Il94I+IYKIH`jGAS)PgYBl%2J`< zZ0I9Iz_O*nOlSZ%90%9}IC|gI(VT%SLT-8@r0J{_UdtDLd+CndtFmYc0*crV)YR-h zCD5t_q}u066_Pki-2u0HdDz&rM_O%9s=eUd3_OH-wkj28c4TKe^e>Ya?$K8=Z61+W zfx&QBLEcWUBSRE-_`uPeju;|)ge{KTp*SL)02ij`BWa16&3akTR?!pbN7?-Tn6+C3 zTypBb>ZPKk`Pbj>2=eo_6qXlqcZ0I+89mqPd7fcIURE$3!-7=-d4uz~qvzeHempqc zN|Ue8jofF3=d*tB?Ia9}CpW2VQZkR=Qp6^|>TPa#)~{)Z*V9Wnnze@4am9E^Sa!gd z&vjSvFogJvb8kF(NuHcWL8S1vY;ba-dF1Hxb?m2F`;djDem_{)!v+@R@&!#?c_4<1 z5?}m=8$Mk~fiY-Z&wRGvxRkHexKkr||A=61o?CkfpoM>0jHz?zJRUU*^NP8=sKF1jThkFUXg&g+fAo}I&x4H9 zC*yC;!H+*CO!v<8I=!CV&Jv=lF@Zg^XaJD;R?h7fmr5rV!QP$=M15YrpAY9yWYa9D zSkG3CF$6S~TOi^DeJnMnH$eh35g>_r}J)21ot>ny06%}g<>e3^A z`nCP$69ntC8GD9u`H`TWlLi>^M%gu4kB9^a9*E=25TjF-hHV0N{NArP_hYpgBFp`7 zc=-q{4+o`xZBPW|$(0tf>&W$uCJ`G)EE7kahK+vDJnhTehH;N86bTq>mnBa1EWIkP z5~bRHH|1N%D)7QG0oV?7T&bD{Hzg)6pdzGV$`pyIa@H`vKWJF@A}m*5wQ;9lDu3T; zVdGed=>WE6ejyh1V&>Am?k# zmceUQJk@e_5Oo*()E(KWwPLqLz>gk*xWwW=MRW9La(IXh1%%`r#lQe_2dHhXgxYMJ zVoUl%Q;&4=U*a}L9dLozF;uEfe|cN8rKm(Cvdag?TX*MGjoz&$X(xzN&v{OtTOL5~ zg|T={g;J(^M{8p~i3e1+lb*LBJ+pM-n2dYyQi>fqJTguFq^yr#;AT!$C%RA0Epa2U zXYrYg^Go5jn1u`vs8WU6`DtbeS0AEXnx&5>SH*Zc zaq>)J-wh=abfzbq^X60IZ6_B>UOk`f zPB?N`iOi@%6KpaMS7H*PA*Cv2_w*UHRx`=pt7wl(E!ciIx=E8cOBG4vZ$AL!tFhF; zcHawCWDBd|qX~;1s4<&}xF6I|pyJ`6)}s$e3Mel4TYe3P<9*)am6 z2^4GCbyas@QW1Fn8^<==@^pEGWoySA-PTsUh(6=~UImF=^2;7*oqsUOMlWEHt6OrYa>gOQ>2JKYjhzv7lpgxW(be$tK_rosjlx()&6_Lm?&#jbE) z?g=Mx-+gy?#Lt5!#G)WMjwli7@YpP^KQJu~QZh~Hu+frwDi3SL-{_FWm;#cl{dNwf zpzt)i*k}aK^r?5Zt!uCz@9()$N8jR{uFU+px4{4b?As`P4}OWlkP~irHm4N+DKGrC z#X5j6UDxE|My|gRr?ERxLjR-7rJp8i5Y@jbuO>cdnF-9l9a(S?UgYc-<*<$;aILs; z{-4?v<^(m@Rir$=8pg1%By;0>Ngfoi6rM-aedJ@^SMG`MzBlk&ynzGY#)>@ zcuF$Sq=6N|y4`f+hq@m6`w#ZnyCcHr9reA0Qrtrap~%t}2aRW5jE<`D>)Xt%A`mIC zmHe7zPT-btt=KJ?={LK@MQ8_d{g}ldtA9^dk?eFB_3W|o(?scpit1> zRu3(l^2wH9-M+~zXACvkGi326&}$=Nw4iS$0;m~)1relg<=L83PzB5r7{C!?aIUa8 z+rd2?C(_9Ns8um{=619;p0h+&rw#B!LdSN~;4myf7#%d^{-?EFCsvhD-z_&d+|N=} z<*{I6UU>;a_%{ZKfpf0tZ+`BCzc4q$)_y*dJhO)VQvZJ7nBH{9DQ?tW$S(rY*v@f@ zCmaw-!|NvCQHEgf2=hD8Nr>nK33`wQbnrhK$p0>bd8V!CpG601V*Ag#yeT>_I6*>Y zQR_O(*c9Jf`I(#Cy3s`PnnO(6iQ<%@pEkbx&iT%D3_YF)557Jx9(R2sqnOJlL^b+? z!0uJo$U5D{F(1%Df98LMf4iyeKYabTUX1p;b62Rpw4kx>3k69P5NikkaLh1=gYXTe zpdVmPoURG%%0cPSn+(C`WJ2q9C@ZyQ#ao}7i4^r5x#y;o58`WW&iXlw5#CCTizu*< z3CM~#$zxWNtd%;Y2Q4}D{3@Z|ZSX0sz3l1(zf)@-P+w-C?t(yno(W^rrLFQ->POcl zKd2qRShpZ9AZRI=b=h#H$H$6w$yAFtZ$u1TSg3O7eJROurvnH!u$89lQ^KLBZs32Y zDYPdKhA_KEq{P9mc4HtQGs!C1L;daW|3;g@w=xY!fo8~hH%Kd{!lc|Ro5RD8vh_@j zVc^uEmoaajlIJp7s5&}_{Zz8xvtqDaHTwPi=MdAw38zz=MW@(yQ#U;US}-*hIm!!z ze8aliMP~gwwdpV1Z2kwd$Deui;=6W;+fHz81|my$ex5IVi`091NTnj`g*mlW4%<5@ z#FK+0zi4F)>t#EJNMD@zla%@+SDopc>wTZny;{<*qnHI$d6{x2>ACO-vd zls;I~s#w15Uz){wAJ5J>KR#&@KzE_baJOw>WeMJi;a5=P`1GxjU}b8hu70%sfc4-; z$NSE|xO4Z1A`YeDY(~`ZN+jJ%J?~{1~})nY@i{FFqRVCJjoOYpiPU zh)mTyyG9klfo2XKGAs>7u305nzHW07_@hQZ)N*ttCUZt0dXl;CPn20b$lOuWiZnSv zCd*jsi%}21)Q66fZzJaJzg2_zL`%FlqJu7(HwJ(0`^u|%L5I7Ok+zZA^a#_zw-#Im7IEC$DUPJP9Yk$nxCOKs&qjbtof*HG$^4ZCQjs5?nB_S`<`e7RTLO$X zj?u(3Le&|4Mvx?QqpGgdMmBW8UGTtiH^wJNZ01}$#NDaa-)mFIsE1!QE9m;pQr9OH zw1MeEyIT_rWB6%4k6fw!kmeu1fFPzro52b#QaG3dN!GV10_axZ3>(Re@>b{1Ht(F~ z-j8y``TA-wADl%$s`k>wSq6wk-;jduUR3L|dw?QFxYr7`$gr(p&S$$Ty$|rq@~KT$XP5dgH5BXTEpv?a}(G z{d#iK!8V}0@Sh>P(wLfqZ;$x^So@*@aGb~jRW0WzY9Q%yQUk~*QFW^NHtv8WXK_1@ z*ODy9*bAX2y2K$Y7=4PzzAGKLPnDU{f;WwIrX}pxnGljr(BaJi-EuTNm^=;*4R}_Q{Zl)WTJH`j@zibJ7=I ziX89@Ve)XWpSV+rYsmu zKyf9W{NNFAIpk+EM0k6Vjl55SJ!M4O=N#Hx9-jb-O2{0Mtqm+S&0F{Dr8E zq=G{x!Gtr9yjE^Q7`8~X7!6QlZ-E9hv|19DR(yO2GhYRQFp`zLvJy{GS%~Tc-108x z!Z;H<_48q4X0nvX!03~uAk>CrGUbzKrE(4Im!5-k;V#bqh9UWD$@;iEuYN`yUPRN9 z5=;;(5M{82_o2IJ^xa8jusv#3KC76r@MQ_2qkm_{PCJ7D3`(V4YB6vALtmB`%$VwJ z5PQI)k%n5d7ir&@vQ}CF=qDHj-3AAn5S1RsY{JtN(_tf-7`f5BLf#7OC}Ek{crz#T zm#C*!1hEdGS#&9F1+z?Do2;I>EPt_Kwv^r?>!3U?r15+)RFB&v~) z*AAxeW%gj#DSoLQAn7z){n@&7u%6RuSPRu%xvKbc(aiEqxp#YR{#k zcJgLr0*WOT(EL2Ixt3%nz0@(s1AdouJgQED$9a#3Fzc;xs#3EwlwV)DP6fKW_vHR$;H-8mk+xb&CVq)V1y%a|Y~#8O4L|+^6C1Eejxg2yla$BJ2F} zv+>A3bKo@Gcz_=LdxrzSm->cdWVAZh{yh=5X9(!WO;({$<~*NG761WdMn zJzLg!5pW=g8*p9VMcSmFmxW89@jKRfbt3O9I`{8+$75=~{wh(b<)t^1n#}xebf(Wf`N>w)daWQiBU3`n zt@(oBwhhEpV~v}b{+s|xwB1@z)7r%z)6i2ZqjqK+oz8Q=AKsJSq3j!|gkd@~PkBFp>7lT9At_(xPWcq@o04@STz70H6c39r9wG^OFYo@GT=Mb+~GQB)#dHUwGadQ3Tbu ztnRCXH!pmH2>vZv#Z>WDyMAbD)8#TFJz)Z=vF_YT=>A3?LE=2B)#BsI3lh{jpsn`& zQL?;>DgBv?FYCf1>c@G*S=oTi;!P6lAhmIA=b;TPaF7_WF<@B!AP2m^_Bh{-A}8xd z)k5SWXH!5E8%Jyk8G1;y#Xo_aX-CyuXXrY5K@Ed9GnqgfZe%+6lDxFkJHFu#lqP{@ z5-~$d2N=zL^7Zvp93WSfg;hJNj>If?{>*tdogZ>8n!Gc~*|cGZO%8RY923ijX9Q6o zR>HX6U$~`#9&%NS~@f@3zA9Hyk3|(PA0uGC1m6*x7)*b}Q_sFUM4~FoFbY z*S%Ez9Rx{Hb{Wb-uZ;k)7rYfR;yH`@UX~;*W zDw40U*injGDLPU~c5^PYImp(Utl@wnI71FiGh@d`<0<&@E8=%Y_cnb(S)ZSK00x{m(~$vCsKqn@1)u#zdON zxyVB@eAhBrrc_$4e>j*TVUUQQ` z{hQ0L6p+^N3(>M1+%>FxbjR!9@0R^QgaTE4uhI%96 z8Zzk@7Jl(_14Z%pY6>`y1m-K`W+!2vBu}F%Q;;Qw&Cl%ph_o0gVHux9QARYO@NEZ2 znaWt>Bl3YJ>kj#U8aY{0^j(~P!*q?bByv$d76Zb@Cp!n`h{N8V8Jwt z3|`2IkZT#XX%i-hmNEc}SPp~>tL2cGd)d)peS;-9;Gfj|@kJ1uX7;Jg>7)c{m)%w6 zsagCUJE;{yc2#4!CQxEi8Q0ot1GCMPY>SZlL-eQuOH{FUq$6@!IL7LScuFTE%yo<5 z=oNRY>f!zqLs2INtw{msnt<eD=)!=<~85vdZ9-(@GjBCKJo<`xQt{5BTVh?3I=^ zY+0Z?vS(rTI|Vg5ab1Y`G`03*MJ%*A*p+%H`Z*ah+uX@=&Z`q{DBOn|s=#@P%a${= z&IQ%jfrzve4yYMKy-L>xnteki5h5_?omH}dwIOsR7-AAZpzhKVX>hcbw;7cK8Ok1%b7Qe5@yJMAg?1$%5Hb5k;^K|4mI0{ zs?yv9mSi!kEMZ{_EoVIA#;TS)yh)%FqP^%vij|(dLuBg2(gz4q!k44p^B<*#C}euL z__DIC4h9J(J6$JaNc(k7-W!WT>*h8VwDtOtLSv=c+A(;IG1J`0AfGOR!~l@_(jR_5 zEcb&chwX{SI)r_}3i)ms1gYv}n zpAY|o?F`ipDwHd`DKpZ7mMw~ta*HHd&O79C5y=)Z^WSB+d{-OZQmHeS*z2?nMJQgi zwR){wm22DKf=ZR_RQF%1v%L=I`%Wd++r`G87I<6rYXj)P8s)M2`v0VdLouY6Ft)xH zAI}CuU@ld`XXnIWLZWzq^8v3;T6*Z2eJV=`>4CL_=Sb5-sSbJPjOaHR3aA&fQR(wC52(m`1(6Q z-nV$NnK(VRh8ytNtAbhx_~oRagG4NHSC?YGqq(;{V7SJ9e>tVB34q+d<9o(iSIY{5 zrzDp)%MX=2bCG(R5AY&Cw8ghq2WoUkJJTJtNxSVCzISd&ra@d7?-Y%@KGv$*-Ez9f z;{N$;Rh=c}tu4OilcLrDje1GmzQ7+_<9(pHR3_}8^wyZ3TNJp@<%S-WgTpe8Np}M42sAE>g6uiP8xXlhOof7Lc?V?83<}scN`9-x3th!Nh<|aMZ zEd~KbFtgV^9*!A7;R+&q&DP?)gl})dRIzQ?8O_@kQa=LTxqIOHoiF zy9PnzP|h2LR_no-$BkOh#ZY9wvSbp}HY_QwGO9oa0qPgnHsHX8f}&L%5}OG7!NyqWDEQ5%()qXlV@_!K@Rs`BQ&=nP3umEnvVTlB)uU2e+}dg3yi8mK(Da4vsc2n|-_+_3_nc?E#cMi-SsTG8z7I*4M7#YO} z#zJW@k**GeGuAv#M*Dc_VI7gcqN2(yQ)7-PtRj1P(SC6Vyp?y+2{;b79PI)gao|tR&*NM>v|)f8?E#G~-Zo zX%*+Zh0+@EL{oZDYIX;2>M}1dhNwk#Od#mfzde3KN!yA_uF23aAk4LJCPSAzyfUxxV84R7kNPmMs~`lasrW zdC6i~GKAOrueTlg@KbAvDdEV%#dtsxlB-z%k z{JNJY;lZxNn=T<9QLYfL} zm8sud(;G;Lht=iF`P0gkrsC=&65s;uPNZli=PW%424imPvY zm3j2tsV1MRZ}mD6%>{_|5$3W%sgkd`Ii>wHGDj+VLC+<9wU1n&F3YhSA-LUY)Q8Kq zy{bMHT8dca{{$7b-)Tcg(7JJL21`{c?%zkN#1xg+%L?B9HuaLBf&XHMGNA_iq2YVl zuVP53{;Owg#n`*86Y5p8j((^hR=Y(teL;$LOn;`TrEX#sNmn#z+Mo3YWW8vvxy!mr zf49bh^i>>@7)Vf?ZGktv`bqlv2im9!;kNG`brZK^owF>)hCphO$V$mAVml zo5w7GLF2{Y=QuV7FIZTe5_Nt^yH_}MB9zLsZDUj&0b#7FRKzoc5@o8>l7RL?T6tC& zO{8HH)!I^ZhFxx8xO?_jYT}pZsHJX1oMH_ihE7qAoq{N{hC%1zP%~10BM2>>e5(O z|DwK$G}r8!FHRzQ;X$Pp%Nc^Vi`%|_`e&Oqcs!Bz!#I1`JB7B&Uyh(2Y2vBsESze3 zHjJC7X_nGbM{Y@WzgVFc^1eUwae6s}11K0duVHZ3zCA{qE*luO{nI_FrPYf8`}ORw zm|q}TZq;rIa>Oo>9H(8Zt_M`U!K#~lAANh8kPXHMF7t_jgBJeBrhePEuv_1fgC+|b{93D#wEL#d+wTb&e*(6IOzFHMVCDI>d?8&>~=*Vxvc(bYK(%<(4 zW;aXut&L!IV%z>!SUF8aHD2o8XnOnAJ+pcc;g`p zek~*}DRNlLX7Ym`x*LOKIr$oT96kCh9Lua*qvU znjgw?U32Ahvufg!>wdH!)$#XIK}uFQ7t|Lut{2ClE7MjCfX&=)zp>7E2T^#d)YR0zvL-CE3P+OtW{lx z_6+G`>(vLketDoOcaYWN%>6q>$ZL2tnEKzR_pS2VW^*aiigI<~3{kOOJS_`1>!6dK zzX?SLs?AryNY`;lPZ3tp7SQ$u#QwlN?i!=kq@iWO(y{*vV@b-*ui~SbK9U{t@1QfR zD$u8z^C)lDCAZx#55QLCorSE-NGu~wv>?qn+B_?RQQ^!N#YC>=AIK^)ss*$r4 z{xB82@RqGD+WDtaAL$GmsF{%ml}o{CouaI9Id+4C%_nJ)()v*Y3MD@gYJ z$)#h2k2gY$kL89w8x?PY&xxp@pT7DdYDX>5)>C{1+FGKP6j@^Xl z(5fG~v825i2O`NUl;P6!sJ4;%)37iP@KpxezKqBFmM_}=Bi(5*b`s&8_8ggbtYZGs|P+1ooemUc2Cq?Vc6-{$VUpiFzyN4sq%WGnaK*`#Tzgf0FW zFd*oX^;#qU87~fcP@w#e9R=1&z&)o$z2RV-BS3d6!~CON0=l0I z(ZbAR)Db1N{?v&^QDgF}hUUrrwf4gO^wc#4i(e$!K_&g{ z+70;22D{6g(mSy0H}0b_TTn)2q%tTHP0DhuXwCDWbADEhHDE=A81IR0mROfUuVk8Z zg^XMAM5cmO9(F8FW$Ljk&Vv-E#;*=iqfM4`+JTL@8R%HLnV~Xj%Uu6*88#DAG?|*%bGWgJCAA{i}tDjk-=CnpBXOfz^UQ=Jd3o8bV-U62D)Ld zQ+wwO$J-sVZb>?XJ5LT$mH_thXSa(6ytVY3>yVa&OR7r#H|)f5L+aUt$Wl}4<)YUXQQ+UI4{Lq^jR>1T;jmi8h%11Qb z5sCiQ^yapxBjGi-||T~L;hC{ zr5+YbK+{a#xOu31G{|H9Cd+v&&(@B5IrHZ>qGbdx1w2pTO3yBRejrk&z|WD3@32Fw zv?lF+qJ)g|3%jZ*>ry^FyK>1T_P+_QyaJaUkLD^(SpR&}Uy3QPqrUxU6i4Gi9@%Be zITWsf=eM_J@Pf>W_wmk5bR zkRvjyzlGhupbYU^RFcKJ#bSt6@jWrp$73#O)b&MHT8SJ8vk-o6yD!@ikH3~9X(n$W z{rgy6)FS^%ETFbN`wEmY!hCP0no?f$xF2;by*W_d;tBnP%pGekQx({h$Uz?K2D})+ zF=Hm-G4^QYC?|Q=TU>Zue4*@Z7l_cX`1^7S&P#5NV!n$mxyg~hGnuEFxj;TDvuAE5 zl%;O8FPDC~P3BS0!+=gTqOG))4U!KJ1(2e2l_SeP`nxm?81{!%f|oIdzZ)?{gM$GU zWj$ixmy4Ldl4Nqxn{wwUF>6oaXl=0m2W#gLC0fvB>9lRzcD}T2TQ6e(H@M)?-E)D@CE3a{M(-Jr9kL-fIDHON%FLyk4O{Bwnh==bH^P_ z^b=Ap`#Gva9fdIqzpQRy1P_p$4Y91uUAD9ctnz!Zd$1f+f6;>0;IcKsa<<|St9hxl zME;J-(-eSsp(Ug6lU`JZtgA$mwNwIg7|3GCzFKym<R4#K|12p)Dz(EPGPz~>dVS=;^ ziKg2I8Fzq7HtUoj(2#}Y9UjS=V0tXs#*ZdSd*t0A>L!Q1&cxtWAG;01G%&kZUcKft z!X)Hh4+vv>oo`Pd`#^&Pwx%-}5u-wouGyw7MK+InQ-XMTFRH%|R^Cr$PoW~mVtgnD zcVd;MU>AVHiOmYd8lx(Fh_T8c!xlsR_zH+&ivsEteZ;B?sjbo;^h~;|Ty4oJPR;#U z=~tL~UesO2MuE??l~5?@i+rHB1P@5EspRwmTk!VIIb{$kVh=6WU@5=xMO(VPE|$~+ z$9yq1rZ=D7J^iyZ0K#7Gvt%jhvi@NzY9MnZ)z_H|%%K;Q$U7W|Zuezbarc$Z$_wi{ z_hp76B4blD)L)E*t&&3Cya~MgNG?hAZ&AzS$L@TcTuoSAOWV8dNW`;(A2oL3MN0Ow zy#xS|zd*6skZ6SQQ=vU2o8~djZiNch;Sh8*vOFb(0<>6G*q0?rN<9-n32ZOAB9$G5 zZwK{^u2hyupa?tFKv=rF=7#xg8OxEb%55EcjaJMl=6pId{nu8(-c}dP8?1O#lG;1+ z_Dq*Ne7B&8cpFg$Jzk5te@f4*1WU1nN{8GplxJd^cxgO)<^w(WmdGSstPtTt6=!bg zi5-{7)v2xvwgN%HTIUwcUt|UBCU^R9T7gf}VN~lVl11sW{!%b%EKmtv+@PM5h#Q#c zWrNWUa}4Qryc0pFa@Ee9Y`)dC1mr#Brd4-!A1&TnyR{$YAymtz_(OY4*p=G92$ zL}K<{eguzZv)`QHQyB5+a+B~TN0SEoEy}8s2Y);~6fs1P(9p?m!?kKVr}EmGj(-;FmDidcWOz7 z^yY|V>uOWGZ`7ks8q%oGO{jctbPKn_m2SqiC^#NboQ&Mbn*sbR^QxG}9+p$#nPNn;7|B39|00%hS}efYJ~J-yZ4Hq4gG0=g zECL9ar_v}+2jYseW25Zgt%2Xy^_Ij34fma=R78prdXQuZ@J=JgH!w;{Q6WT$#v05K zJ2sLZp%;~pqJ_>+=RKJ$TiwXBv|q5?5eADgpRF>#a@!hGk6cCG@iMC4bUV3mydgi& z8Ac==xmo-jJF$_@X-3Syg3Y75VL%hCVtB>;N#9L`Sl1SI&T3qS&@{;!uWJXEY7v3; zWCw*9S@G9ow_N;kQ$DlN1E#=C&N+PrxzB?Hr1Ows)!+XJy9k*O+*%!qe{d#;>QkBY zx*MY5p{L{WtE&(R=8rID!lp3^@(0zv z(E9hDBL)rMf=Au;dx9+FAB{gYEiPl9E^w}pNL-##2B{lREQSt75(loafkSr7ni12v zG<$pX@T@*KSIIaR534F37s&{}sp240z1@XqUSiM4575VxEWCjmTQc0rtdN2hCC< z2(B{`wF;_ub=$*wRv!RDWtMm$+DvVj>JD$(zf&1aoWr-D z0R@Dx%N#+!G4d9+z+%{b7U3~S)_YrJB?5j(+i&#{iSn8GJ6Q@^CWH`+Po9C|mo(j8 z-Gs4-lGL!G2q4K6qk*QugO?`)mwv;mup%0Qa95>(Az3>4!xsGzUQIuoQbNpsoK{k4 zN6GZWy*&(|PeE}0n%P$v5)Gw{Vv_=@5NQ%op%cOvLdJj`U_=03O$2fj@9(oyq{6O& zOpY7C_D6_8X`YS=DfoaUdMNhAv?nZK{~kkyzb)1Eda|g1>@umx&V6w4*vNNJP?Hw) zTz38Hn7@-ZpsM7l-9f)r*G0-*!_!< zfDi8Po~hZ=TkSEwo|c)EX{mnDPA1YX{}2PJ+N+W$c=A3h{tA;-T7n#UZstxFTZ5qJ-(+!!&r19+)2 zKV^7NK!7M&%+Qb8;hxvwn4;0hU5S@P>~dI?_w2Wzw72C*cqLs{jF<-ZXUJ1yc6d1yI{%xTKL()F5qtsSTvgp%$YaG(w^74R0WzWPg=p! zqwzx;hO0k-F3_N8r{#cerp}B-8d+|c&k`u2Q=)ZM>c;3WurfI8jEv1vhR9R-??eFS zw9hW1e_8piVRj8k!<3Cuv?*-wAO2)q_qGiMBvrjvm-#*i|%FDkJEcz_0jTMScvif+)h zdPTB+t&Mp`m)1;)UnAb=E$6Xln=#8j}y&(2qJYU(o2Z6r4l99#)i{T^s|~znrSN7jeX-x(Ng(5TJQCDR6tjwSx*MSD#Xh*k!w|a94@kctRlpGi{YywGGn`On1jX z*f??KY_{+N&l&Z-BT5ABH?ac*7FdQ}^y<cC*YQR-dNOMPuE!E>}KFp=yOG> z@2OQ9)ROrJac1T$@2S?%eM!{bltVX7Cr3oB|4=lrPW8kDB_7c%bO&ey58G%HAp&`N zwOBi~`_y5ZzChz~0D3Hz3F#5Q2UoiS%o`H+SxvCUz|NtKC7iYLyB$1s_-7-wr~>AA zt=SRE!)iPZ<|kP3P*Z(nZFff7Ly3B+ny$!803-r;c71Vle3(@IWY@%bpZ8ZV&O8m~ z3A<3$`@%oL??Q2VF(a(j8^b49|FJqIr&t;SRIO>0)7g#k941wjgKsfoXAg*j+BazD zUK+=*u`ivh@HhMdNJ!}A9C6NJ_YlXtm2jp#l*O%Irz=sOt30J$DVtX+uIPB<{ zzIYFhSR2x!Y$giOX+(9#(xihVpOYOoZY!r7a0+;#=GyPu7 z(yhx5yzV@?!;(*}qL^jya5BL=Q<5?uTHn?tYc-OQdHljd2<3<^q8+KcI4%J7m2!($ z86?DaJ5UD*QA&$ahZY;3rA3?b);Sw<3MUrz&(hx&ljxAO%aQ0O@+4rhA7%h6!29Yn z`*7NaXAp5MZfSE*#}uHxaEB-WHo~$68)}X6vh&a`vCf2Da5t7 zn6s{S>X2c0L}gwpW3IBVA7CE?^)EiJ!1JF2?5H35U43c9z3f7?U_Wq(Eyy=EH7x1H zc77=U5brpxxNt|vcm8V?N?-asI%{BoO~U44A;|`T50g0Xl_}T+(u{Qp1B;2f?;jyXA#SCHUnCf_^)ggIk!NWq7^C-R&d3Mdf&hd+Noe5qAEyZHHbRQ5PniO zpa>wP+G3{V5RdnCCOq;&1XfkIK$1;E)W9uqL;`$&3?^=@b&ZBp(1)csJ95xuK=T@X zbJZu{QH4EZb=UwAs|JMp(vFRvQ!u7nT;K4+SonGnUk&IkRt zEMiadplR&u638|Ak{Zmrw< z`h$-EqB^t5na@gsj%9e*C)(m~AW{1Be({txOxcXnQur8EiEk?p- z8A+S$6KGlKb?ysJcm#NchNL(*3q%?VfY&(jnQ5#1dVh!uSNa!BI~pL6>DzGW3(E^m zHM!$5@5BoVEbyUo!ZAAiH`kW^d>^cj!VoM(}|D zJNm)<&QQNCdXHEsQQ<501-xtfIC=hO$lZlZ}xu*u-%#J+R87ght&*sID=w-ZHdazCGEDG2-Zj94ZUz2FQ5a>ffOfq!w3Vk#8Xp3dOBkI} zb~rAQ9_-q+UBcvi*2X;mRNLun;BBn86d2fXFuJ0ww>o9$U9i?`1Em}oTM%Xq_#ueW zLu{5Cr%Ik-vhZCAT7eNtkTS(-vyl?Nqi_Pr%5Yo{k4JO;QONE=8nA*sbB?j=J z5?-YfN`uZb%u`i^awkem$3!7wDOo)3uv!apx-@_wacN<%2R!$xP|AkB#2`~$>F~kn z#!{+W_k-)L*z#DMJrf(@h-#N7e}-psg9AEn_RgqfX=PyfLeZKZk@!N_j&)nX`YujO zXxB5yn!N5%yHn=eJ^kq6Zx3eVt~H$sMOWjkFeE3lmGF;5anx~aa2oiEbeXw`FHPi-#HGz~#bc-eQ+#FOu0nWEls#$X+ z^>zS{DR)($>ky)S1Xj`43bQovr``EGbtB(>;4vLr^2D?+l2;UtwYCyU2?2hO=@M3%ufwEX>4Ne}iiU8fB?ZaZE9KM&X(av&+C%h`+QbwX_R> z{IvQ`A8`D%up*U5zR^@UoCsk0(l8_t498wOz?%)YBG&DN@hN?a{nXh$MOaS!wXNMe z2I1y7qs{rCduO0)etDmgt^-lkGk~_SuOgxp+dPZzW#0x6^2&Gzxm3NIJfq8VeT%~M z!D;HmkskBQ?MPW7-~n0M!Nr=kt_#N1^NIihOKd77{48et*cLL$1Q#{eJ;fm?uxnQf z5E*wk5T<3HXZ%#HJ&7kfeq0vE>L0YHRUN8BdJJRJTKncbi*0xx>EmOgRUW?nGMiw` z55g<6W&zIXoSkBzQ_?>78MG`{mB1{wCHLbKmyq3j>Qr0v8KS=2ySd@e>VgKid3R8u zGAH1%o4I7EQ2KufU9OEFGnV9lF->v^_eZ~SCJ~)9c@~9W-_MY{`sEM!%;zjzkKFQs z-lXfGw$XABso>Ch?`4f>>mi;2K?tnfSp$2DRAD`KzkAY#Yc~z+GqV zYB&wkqQ(_$83Avth4Nr>P5451QB8ZqYpsY5ut-yKw;W&wha$neindAsl*neJ-$@lb z5-I6QhCn(@wB7Aq<485L-vTnp>Wean{EFO_A-H%2*$PD>5*V<3m|`mYT5B-IYDA)x zDvzpUO3HP)1W4-Od%8FQLHPDem#n`mcn)=ic|4CU46_<(8OCI<^8tx z&X}!&r=RT`22|7n*zPi0FDNLV(5)+6okib^PXsubkYN zRr3`9Kt5*O@?t{W^;kH0g32iniETqL0xy2bAyA(Z1lq$_a<==Q*n^XMNA+}f$#(WO zS>~De%>1`L$dn_fzd@VJcn5(`9D%b-{?0Ru7IhKdcft{`!q+TUp{SN%m%zMFk3xW5 zcaa=;LNy^_?V|-*SE3FSPA`LG^3b&FL=NLL0aJXnK{{e;MD&zFson*Ok@`XS7fhFc zl5I)X?h$Q#(x)D8#PP47`c@=h$){3Y0^`m*q^{H)MsG+Bt~<+~Wl6_>B4~PLbMM_| z7v{wan3>37e@^c~XM?Fa5v%a3|pxY111ffb8K=Nv;nK|Z9Q3@-P2*GPjmV2M8sxL z@t@hf%sLc3dyj}pr+>KOA9AdcO?D=NELo;uS1=A)aiU|c%UxD@tYjqcSFaO)wM@yW zMi6xZM&5e5uDs+K!MN;UiiBFw`O)l>3>@O5;kxE%6S7K}4;flz9un{QKhJYAc;~H@ zs=Tf0jSC!M3VV!|d2no@-czbawOlKe5>;J-=`^&$p@>d_#YV{_G~aOyl(GqxV)*5r znx=LgN#Mc=En0*eMs}8_gX$5yGof}UJ^gl%JEHfHzP5**1MQ;exW^XLtbTOoa~VSt z{Fo4b1~}S921P#XkcMm%+MKTE?h2lKN|_#Hg2e#$O^DI8jVnH4uAvy z9d?%1jjZ_{9K!vOR98HB8DtoDT^Q!^4{;No0hoONlCxe6@dP#N-XYV%I|(-uxn_w8 zR6b;|V-rROnamapNXew3p+=us242#v58RO+*k;K+AaW^(ue>B*JA2Vd;g6T{hCg$nkgy3MeKSkvG9uav z`Gt1+aVw2+Sm$c9shN3U-FPRAXX841V+Q~8C=2-37IGrm*ktq7utl3s^P!^lYc|1; zD5s!Z%gKYj5v9 zMR)W4EToFUGr2bZRSXdk^vtEiQyuPw&BdgxpCK+3`a_!E@8VsgU7jDVA^NJ(%O& zyzCXeJjGx6R9<7^*}ivUx(o8+ZhRzYc$1+??)l*k1c+Vb9kNZIw7^bzp zCxcB5DW=D^#oFzWfxi~TM4{gIu;yV_kZnMk-CjLrv#1l#XSChd8J?L2V4({u_eGE^41fb3Pzr6# zmw=6UKyKQyG5H)DL3yl!qB;5z%T_3ttn}LAI;|s06PZ=XzxM)v6#8)!Y*d6XuZl-1G!Jes zQMuwQ3Hv#5;6baS(~d7m0{I~oF75-N_!JCgWIND#g@-yKFS(^LLr%wZzZl|eir`wH zjaGXUn6XoGbERbDO_*YGyUmF%8=YW00(cc(xdr$!JNqW_lsANZpp1@Y+ua*a!1eNr z+H1Lvch2-x=r_}XoDw9o>bJ(|7^VW1*H#KF9I)D#=}kYBX-pWP7u>0Xad>8sBmf4% z%gH(`0xV0J4ht6KvGCx&yS+p0C|m``^q{11zLsi~>gjc;xn&t&`msWZE}fdstRdqs zK=HW-%`p@vM*WxAI!H4~m}U~*Uu7P4JV2*u3CO{40Jh#S*@32fJz%C_HIT($Xib== z;$of;(X>62xl8 zCcUF}Sn|+j`c{9|9g=tsp>3P^XcI2!%FE1I2(5{Im#8sZWyCdz>tD3bq-uwqr14gH zRZ4Dba;)ARVAl&MI`pHVl=hCl6oi(;&%@{GxGo9a7-{m%aW9)s#?caPEU^Ua3x|8FYMcPc+BJLBXBVdt_aMBF^ zG@-j2iEYGYjNo_JjVn$5T~t)3>i$)-#RAg|Zt7eKHE)`yz&*dNVt5az?hf`-a{6(; zDh7oi^O@^++k{X@+5RA+*Pl*)G*slNd#)?vT(7a!^>8PV_O^I=LNVK@l$K~f$&8^Z z^ybJFX~GjA6P*fRX1UM}ADLYQubqi)bHz48`YfGMvJ2Dwb$ zs;ap-7CRKg#_mh0cFB+v{CFIIgZbj{hVjU|VoONx7A@C;Y4rUAhTZx1YBF@;z!oE} zdqE)tkFA~oJvr@Lzqbw!+3LY4P?Q7%Z)=7;>M1m-P;Ce3tPIF?1`07*hcwF;*l{!6 zwGyO;%gTt7^ai&j(T6L4D*6wHGW8F1P9#W9VE}Ajdybn6_Vjc-)%kqFZ}tq0 z=5_3(;#Rm~K>6gh+sDkR9U8#d67xRK;fJPqFAYRB?y6#$Gv)CTkUC8w*~vK5yoe0} zU;vQ-FGdORf|~^gca~yPs)tw-OrmvNIdn{dmL$?fWvkQ4R7B>e_pfTkS6xH*E&umOW_lgYN`HVnAxSEv7;J-I&mE& z1Y_EKDu|+AHcN+GKeW7g$eYFRW>f;vhuywE%WlGTs!d-u0q0++{3&871uPnUO~e=L z(I%ycn0-3r2P^-rsB#-ZYc+JGVO>8S4A;siU<8(cZ5I2`G|dj=jDG(8_#n-ELGUd< zAVDq4foSloC}AD6m&n>yLsy7ZQrGL`M%+D%fNq%vN?XpM$Khln#$Q0ULvQ9mO4XnR6?IVIGVtj<+FJz9_rez??IFF4K9Jy3W z{*Q^+Wj-#f>(^7~iZT?@*iA07__QwYb4B<#MUpE{qCnm8e_HI?1hs)9AnB$9h-n|m zx>Q|oIcf+8*dZ^}D1ZB5~+fEuu?56!Xi$1c@pL=I=@wS?7t0Ce& z(bYErBnj~2T!>N-Mx+N#AV0(tZt88?lx^n2Jn#<$RYyQ}r;@q&kA$6pkmp zQ>J)5Vm9Z5hH8AB6Bf23|7{AB;AB9J?JwgeLwTUI z;fTLrp}D8f4h0OiK_CFx@&|R3V*CrTwKr_G=XO<*6TK%k<`8oWG8cmrbl`E!j8q&@ zP!ykJyLN;YK)xcep%On}!r(YAwF{>V5J@ zaP$M36Z2@p#__LpiNZaY6GNV#i+*{#kS0)+1W z+9xO@TJSd4!LM>83H8RNw3e{EyZ?~BxoWlg9aI_}GL6KZ|5cqtpjD}Jwof;~I-HmF z!*s9@z4tpWJ=zNuMooKs)oFfX!eu&vhtdOjyi+t85aq3iT=zw6Pb=QML}}lJb=#Ao z<6p#Vm?GsYynJw~m(f*I%8e`7+Im`**f0oKAf z@en*GB;!a?TN}YA5DITpWBHXHm|^+aH-|TbD7M5@0ADGa%Y>d24X5j0SWIqg-@}Eq zOtQ%cgelCMt)S;)cYTq3pt3pG#zmv@`XUf|4X1MSg)c3)3pA-W zbGT4b-qIU#Z~N;ZL;R$2tCkxYk4$o|)$4n%PHgoJS_&{NV za&P|Gs~lye%$zf%Soqw#Heah!!_D43Ei^uBz${{u*7Bq7mHp19ynI$x=?w88S^i0xZ09N`BzppKdFnfA-&P- zK$JS`HyZL|NQL}(SZyMPyMxUkrLEQX}w83s6*guaoWTIV8vbmLH`nIhL^<5UpT4y*CI zZSUSp%7+xQEac9w&Ti4}P60G!wrLj4!&d1EFs=@&Dp=s{Z7#4WT9RK>Tew=Rt9UUB zE#0+*vpCSi!DwvPT&c@6<7EECQbpk)z3c`wxqtP7m?!#vn}}v9 z#HA=g2xqX>q5cb2#)&T}T8UZWH;zQM<;YX|qb8Y{hU2pCpjdbv7Lls-9Tlb`eafDtx;VWJBrWMluG~ zsvtX&6v7M+C|3a%na>cgkI_sfsMAV)MTWY@@!piKdGrYX7wmH!1CbXVL4SPzvwJb+*&DAT5 zrT3&){#pH=)P*v(1(rcA8?^6;2eYxHRNg554o_T>+;SfQuzEjF-+8 zTqf4cM~KW;!RZ(O%Q^wK$8;=rkk%{JlZgA2^X9N3b^~2D+|Uh)lmaq|ahAhu9I^ir zkpGP9tbL@E@pe9-o99Qs2y_I!E902Gxn$9%kb&ExSuLAI19atysr5;8PTy>WL+_(M2SF476y;ga`rQ zLgriP~5eb2)bJ1 z)1EI)zvDOA4j$&PRvjP@dz=j-Pf|8lYx2~8-bZ+*-F={NUwcI%c%KOcMXZy7V!(f+5?BXzh}Epw%tWM3(Y>xCu`BtdR)1Z(VBe7gWSLEdONY^2{X$fZOM`S@mJtbXXHTQj6d#+lL&lbc_LwWUgU~ zN48|W%drv~eVmj*XaYk`FxmG!HMi^>n#=9&v|dYGQ96kPhDj>r$HfUed|Hj$h%;AW z9_%|yAUAj7{a$`LF1@u8qHTCpXC_bl-tEUTRTb}sC``-e4nZ>|Txd0CRZ^^DZ2U?) zLJau&kbsqN{+NCbnX_%Hz!Kjx{rC{Bv~E|=A9CC{t3qu~bfNcP(i@!GvqulJ^#*Du z9|jS6bjFEJ8h!E2O3rY6NyAR3J0Cl5)Xp~zh3ExwG)DSQ<-+llwC6erW32A{5_7>{ z^J}z+sVWFM9mA9Lgs_*mfj05$J$G>iZV?jX0-_f~$8T2NEL=p9b8{%-ns>N%VDTwA zK0n{prd>AtlLew9i z8)Jv9c$TrX)hE4!R3+Ii@+xOk!=L(=S;Nn41-74+yF_^B@r@Sgzb#E>0ry#3(huF% zm%36$7TL6!VlVHI9EL0Vh;TP6yN`Ek=9cu+J)-C{QE#L|y-K8if9-bYA$AS7XVFik zM>t17od1l{3R;f9o6bMmQPxmJu%cbOT6^B*ff6tpGA5XW;qd)MJ~F$Hxo|SUTa)~! ztbIGJZMdem8U(dHZX)(U^+fR@*#ExvG5;W6(=NN^p$m&*BA@)e%f{RY2;lW-qzas! zXfyb8r7`kntpRWo=^MxkH~wCTi+>x9QL0T=RGg8Mb@(g+3UZr$?RSO2tBSDEfV!P| z_DJ3T^GkfX<)pG10X^WpR%`x#AZj*oQh;A^C{(As>965!tk~c z@aof2_bp5gXJln7{z^8mz#Jh9URN&B8!2n~fk`LY09Z2}!&4>_XL$aK^Dv86^(esP z^sXJ6-OYm^ko^_OZwXp`B(8JV%(4cuO@R_2frjG8Y-UU9O_Z(Ty!P%Cbgz`se~vlx z_0*4(wmsW*BU2-QxMUh;^2KFEijdInQI%N<&9yX>-S__x^#q!0)HHvNWR)#N2-}R@ zlX3Tv1=M$%pSVByvjA1fZXV_;2=lP*PUAj4p>DxUy_uGx&FK3G@5@Yl0(3FMDP$@* z1Pt`fof#gNn6D-I`kB8A9>P;N6tc?thus%0mR~=f8!H%DDzv@QCG$h+G?DYgMo{IS z=3}!X+N77IZKh@z+?2aeJwEFHa2G8t%(C)*)re?EnQyMJX!kXl(N{^RI2pVY;$Mcy zfhcG$L6_C$3Saa3h+w|QmA}?Kewh{C=_F zL>8~ixn7eE5w8ir>eSV~J4iUQf8w<9PFNSRj}*a0+SPW>&JBY@Ih3&z{4B&qZ6+_e z^_?x`ue4-8w)iz}S{4=FC-mMTX;{xaKStk%R5LW1yz6+;cg}W+;KDgt3Pdv*?fz|G z?QKl4OU4HxTL(eKu1YCi%89ugCzEix$=0eKQ5lFM__$B*pg#F>#7?sI=c~1V>% zWQs;M4}rj8O8{0$=Sb&nWnc^0;|fM zSxSc(0CMN9dJ<4v)4N0^g{UwQ6=jYlP%iYWY61B|WnVetmF5n=o$r&Ljd$`dUVIc& zfP62@F~UG25I)ja1Bp#~eiWU@FIB4Vhm!GvqV!G1Y}NF!a0vIUoyzXq)|5`=_UmzK zs3hwFo0Uz zpHXt%y)AU3AY>jN)l#-|j-I2|Xl58eGHv*2-Il9aKjbDfX?Ts+ir14Gl-R9Ut3|kw zQXxEHW-hb(3NUzAU&$fR-S47RHcuI13^8JDNP`GzN9Z%)daKhCmrq5)!codx}C5wJf59Sfzb@Uk2PZNIujRL+4@~IbYtG9WPQ6UxrEhYgb%gO(J5oJM^Btkz@z8pINg=6I*^t zT12xU_mMJVYO9YI`V{XUm*%eV=gbiSP2>JskFPL`-9E1mXIP@(&pS@ztT~xVZ(@N; z&?L2YJpLUtHG=OjUiEe-scE|Wrp(ipNE;E8T#8MEZ|x1jLt5*1)o_oU!6C$AAh=v= zDEG}U1(LUIX}&5MQ|p{HFju#4|2K&}GFnH{tS8 zzNJmDi$@3A?;N7A7bgWXomu_0Zlkw)iSWqVx|VDBx98v$*=D$Z7O9?(Ldu5@R3#9U zse3S%1>CGhX;Wpg388b-0W?^2tIdxz?@iG8Mc%U~b zRWp}?>3zm_g^J(XtY>(F+L0T6L1s`t#*+We5t4V0iUnX`B(GHHN44B2(xK(hrqEAx zl_%vQf=Vgu$eQ?pp%j9^-@!anWpMxHC(rOcxx zgV%JAQvw0gnIAizIb+xA@)0FVvTlL5ByzS8;sB=PCjfcAv5t+sf9kEg+IMzKupff~ z4JI!D2oTTv>;+n@2~=ZSV>*Z348yPTTSe@Qv9@Nn4AI&m)F^jbqOY30x?*FR4RO?P z&(vCP*-fOkG1F`_iPR_QlC#(Oxfzw)ej|X@o*~ZntAH?1B<{eRmI35myx5b7RpUWv zzWC6;g$(Xk^D8fp(I*wk9IjtVhs#&y-7_Wyf1iaubBpMf24@FnTTM@J5D7q|gjj1I*EZB{thF<#JXM}l*v z8MuTF`?I$>n^4C?wzDj=gH1Gq@7X{}R~-ox-M`I}fso|vsU9Izg@<02r&wGpXW%K& zQ0QD$Ih{}2j21hG@Pr~$4dj6p? zoeJW>b8!hubiL~vYpKo_n2M#okHwYUaY(GvVj^Ic^qRp}wRj7b!y|=0`xk5?*da!b zZAeKuo@Eq4L+#UWzL7qUT$M^@x)O!|s+mK@`Gi{)^8{}HbMA|y{^Z|u+>06s*LCp& zEH4ENg7V9gh5%@^$rOd~_s^F9eNz5i{~w^qZvQuE^8chtYbWR>WgMiWXCx|KrKcun z_Wu`Enq+LQZw$hyya)Y6U%Wy=Q7%hsqh^en7Aviqc-Za+Xb~`aoE{^OpJA~Q`hRh< z$I+WI^?n)IW54S^d=u0E7yg-6VOn;RA0cP^Pg)UU&4#3G6jo_XQf`GqSLe%ys5q>@ zgWwV+ygXjU83b*6aFcf!0-<>YFC6>VJ}sb07d7cKj6FpcOHfp(ScDqtJSbv9)Aj3v!Z) zi_3#7TWAI?%iA6^8VJRzyc^XwJ0_*yHM`faSHXg395<5z-Rsvb2(TWW*2390H!sX> zDqRQyzH^)F+am?jI-Y!BsX={_sL zEVjI{0&o))I&7Tu27BD79_tBu z1ERX3d|d$|9Eul+h<~*z0Y&`~1QUEn(a`(1q_e=YV?{A}6G@=6`ce6^Ger7jPQbtP z1*d3?ECq4|F%tCef)9Kk)YxV5@NnC_P85eP0jB#j4VhWNh0Mer)(D~4yG&4cA4w82}@hW}6=7?L^cxORc76gZXu=?0( zfQ2Dd$mCHa6vj}FCGrViN(nK-NkZVyjDFz_Fh0^W9H8PdXFP#F;^Pyf+91&eSr+N7 z63Bfdf(H5qy3EHIQD@TjL{D>2vnaIW>)M1`v^lkYz-(f|6f|I-p38tdyD%Yzz%;Zlb;llJKc07aYCtomQY2!EqJ zHvhZd&;Q={f5c)prgJp^@2(>7i(&t-^}kT;|J_xtlrpk1QZ4?XWo6`KW~d#cr>4yQ z_LDJ_0#ma*Qy?P;Mj%MInS`mWrGtc_Eg)SXWJ6#fLnUS*$N$s1na4x@{BeApA;(Hc zq~t0`b}bd6bySE#wC>FvZPu|Qx=8p5xgz&b!WLqwkQ`m)$dM~Yxua4_C8>V1l&tD& zS@oD6``5gl^P2a(-}9N7_HqbjPg$1aJx%ScncAUjrKoD9NMpvaw+ojXwkxJe>&t`d zN-@XNI8VqOGC3rc;z!^dislkd&$~Jh$z^wghDB3Cv+j^~jW!(7Q>7`f*LU*}>?xuZ z5wJ~mz%Otxt)H9m3%9Vy&=s-mL)@JG+`>_iW~#2L8vdk6d~|#iQ`-4(u<5St|1zF zk0Z$#bI2ZwO^%L?sD{wEyvd=|mU|8HPsT>Kq$M)Wu4&FhVy>G~UObOpW_>i34GvZrc zBg;TI@IE3%tW`PqOTZ|1hT@6qI&>l%s#A7L*ZMOR2`X@ld|_50>|R%YX^I*j+JjZI zK+JP4sB3p|6CDJi!MZHYx&D`P1t@I{@2=IS5au1ly#BY3e(z|@q|r7uX!iVe%MYKs zN)*~464#Q?j(6(vI6x=YUAeP9kjqERS53;GFm;oT^Nad`y(`t#>$_hHC^{k48N6x5 z;-9HzLit|Q)@2jy`vO(ln(Iy=-qv;`Y7d^1dmoK1WA0ymJf@QAsDEKifl63)InBEZC#gy+@>m#h%bT>Gefn7cwX}U5 z^oIi~%xIKPZinoorP47RMk0whQ0x< zLHSL5*d?e{xM9#kPJudk4l!*Z-z%O-_HZ~x^ngreL*#B5Wh)m+&4S3}H0aenm1p<@ zAx~Jx4og@|br8P*?ty^Mcx;Ejdp3JNCz4Ukk2fZqZq}$@!tI4u7*`w@+TRc_EM1~0pUuHOc4n=%C|@WJ`Ist}rjairu=h*`_z&dw`JmCQ zRyX?A#tFz*pS?fs!#1_Sr~hK*_A1@-p~Bt1L#6D-lNz@E>!g@J7ddSwga~GTS4RDK zk?D5lNg4s`yzVWVi-cI9(_{L(K`c|}w?Y&6tA@kso*~`U-fQsYGcZp`Q#IC#C!mJ1 z0_l&tg`)gD9vHl2G#3b_?V|BAWVd?&6VJPT$#Pc+r&O+`?HPCsI*!_JkJ7!+RDN7O zBq*$={YT+h?75YCDspbWv)3ISs*Gkg5`H2n5EO1QEo-8&)v`t6 z!Z*W4v(Wm*AIyvS-*wg84c$EM-#uEPG}yvo|KpL=!4+FyyFPNTeNaaDpRU+J(Z{>a zj(Mla$$Vo*oQ^B<&r$k3FeZwLXtTT*L*`Z!nb4qn0!r8Xai@-)W zhAGPT6|qkK&bFsJtYgJn(^{oAn&5)4Jgl0j2I4(tX9x4A&wC8z9~{im+jirD%dq2w zs=9sKi|UYy0Q{~kFQJ+vu+_mMP9E7h_`0yb%+2=O$f9Hl#y6J_jOzKKgf z+dr|5DPQ2@=c|44%p$8Y;%y&`xj$baHeAnB!sZVv9f5|RMY2k&s;?Xl3E&{jl2c5UD848%5X~C5J|(t_(wmy2+5f z;ba{Jn4N+7W&D+kw(`xBot;xgsoNF!+C2gXc=@{~Kg-_SFI3XuUdiL{zx~w1?n9;y zd}VQB@>nNqRYlAGvS*)4Ot7tPo^76e?!Ds?FQSUi4%j%qdPD!-RV!ACn=1&P|B=7_ zoynM8*JSl846Tvi&G^#LN76m$=uz9wH*cTIfA~^L+=MZK!V7`qfbXQ~A3y(5g;21e zIp$)Iv)O{dS{<3LCvspzLu~UlHNVC4=ZOm~B#w5)S>SA>rh8WdJrmnsdY}0OB=$bI zQ&m6+>@H4%CP`-Rb{MRs1x6AM|1TM{3+0+LaHyZxSTDlkv3830MajpB_r+f3Iw*P= zU5(3R5s4G7ii&ECcysfR*ErP)s-*JtcSbVGPP`rGZzW+boxZEV`uTIGUg|j!nw8mW zQ@5H9Bt5aIGD0P><`g^ED4^bFpNvZgdVdhtWV9wfYUskl61!9xF4<;$CzMGggBjC> zbk=9%X*_Dh)G~5D;9>qiG?$G)S*r6ZGp&1-wUM?NJGm52IF4n!F+X1Cr1nAYlTJE5 zDVFN6ZQaJwP@UDh(Nfz8ebj=ewA~lwRZ$kKirzsUZ%f6Yd|D@+_$$CW;lhmC)7|`| zyuoJ~GYLXfz8E(R6(4BKK2(oK(aEv*|EtEp%z0@OSibB2XyunHjrGm4M?g^G6Wqb) zq&vlG;i2f?6McQ&M2jpm6^>ZzIe~-#(@boCOf?lXKg~=W&sU&>toa$yP{=oB;!kS} zoCSy+ItWO5i|UG#q$}c=%5+zj$E?uyj@C9FX!sVHEwW4MnwO;OkbiYebiuW&e>**2LYPEC9xSxK+A}i-m=v(g-s;zA7AXZMk%}Q#YI>F==giU~ zWw(>a(tvPL*pb~bT=@cXCwOxhy)`{eNIDkT~S#FCcyY5XgaA&`7;cM8;}lc%Kg z^cwgl#NW#TVitn9oFwU&V8PO&OP)kHc4@+kbf2mQ&mjbsiYRExKlMSZ0ry!~ENS3B z^%vnk@%R!b&i>g!Vnf!_FmNswj;+OrCaeQEv7 zAstaU-VLlc0dgmj#Ew(U6xcz8uys5-Z3P(=$?935TLHx$&`2p-SSX}$bSIlaSMpkc z2p|yqgTfCS8Z9|Ah+a6lTa)WWyC2PKBp@&WO^akkMA#^!Kx?^K**MzBz+q_E7Hjf- z(Qkh`DgkJeL7 z=9bbj(KunjVJvysj+O6~iKiL_(&0Lv=9=m<(O6@x$feoQ8bY`BuLU4IfFc^ZWhnsi zffPb4O92Q!Pa(y!6abYViYXRIZof-%v2}_u@tfKBj{?w3A;nS)fNcISJFzGdOEhT# zNDrpWmw7~Fdn9X3D`*I4Y(XPUD=2U#KtZXtLocBK6pknemI0D|U@?=@z%MKl%_0f_ zZurh85pdJB7t~8q8rRTeqanKh5L5*-(ZHLlf6Nqb)H2a5q5$Z~uBlH4Bp~)9-SDoQ&F;(h!o&`0ePjF^uf+t z3ZYdLQT%+qLmma`>x#J)-&RvhLADp9?dmE?QFho1t9xwk*Sjs;E_=bz&U&4Lim zaegcWGWST2jB^swd!V_-o2_49$Ul|l+zNrg{Zn$r<@vV}BC~@do)W#>uz=*8+Yg}i Y*Xs&8dXV=aaPW5$j7D7m)N0880m~T}9RL6T literal 0 HcmV?d00001 diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip new file mode 100644 index 0000000000000000000000000000000000000000..c7bae64bd25007d5afe99a2e09181ae5971017e9 GIT binary patch literal 80917 zcmb?@1ymhPwr+p`2@b(ExVyUr3-0b5+?}AoHMktyT@P---GaNj1b2NT|IC|v|8?)o zd-D>Jm&c-BsWIde>Lg`&1(*3Go)@&9A>Y>lD%7#@}|3Z*boj={xGvD=WQ!10hb# zr>yo`oLphwfW6&=c=P7>PVRrb9qsS88@L$inpqnGTy+`gSm~Jl&FXOex9Y5Ii~tV* zkCit5$wj<>UMa&L)yn)U*Z*8C>A$M>l@2q*zoY(D{jc)BGW+N18UJ6a|IIq>e_{Rq z+Y^p|rzu?rM}0@YzvK?}HO}6>`Q_XbYNoi_Yea>=ee*`>FSPi(-B*7d9Dj%G|3a*` zlC&j`2<9>Yw{vJR7bd%vR;m=+VTow&dAEd19X+DWJJ)Y`Z*N9HI z;xLQ5eC(1yy$^1_YNk(cMXd5^j^K{%M6KUpF17V@!MTd~;^1(xD8c$rJjfu|<80_z zwxYJ!P^qFSYb~vlYli7TVbJCY2`V9Q6vEd%&^{@3Im2)lV>gT=NnYcR9C^s_Fv8Bj zs@K8i0J}G%}f=)Sa)&tebjHZEH^dA@UwiEPpUW8UM)-5I4wmJ2D0<}&b& zVt=r+#CpDLL8}sJ*w^*(ticFz+Rf54Zx=2c+63xbhP|L+gRpw|TW{-J7Y~)q_Ex#e zwObECdEaJ=&tfJSa%xqn%3{gWXYP7upSZ6WI}W8q2F97UrTSzjFfxy{aq7jWGacmO zdz`8mYnWm8V|vZlR{8of z{#9}H3(nh>Dwg94>^id+WtGaJOgUh0K%hT`!m&iN0t!t}=oWL?M+;W(iPj%$>(CFD`TcqGY^Pg8f_6Q1vA7S@rlCG`Rs;aOyHqnIQBpn&ggasGdE@ zy=Ekxc92Iyoew#S`4^mX=f{5S&3nt$m^KL`ptw4#1Z+HiqH# z(^VV_805ak)4jdDwIvsC=@+)%9fGp5mpA_y3jQou%>PcG{&&Tf>)&ha|68=UR+gN2 zUnNZbwS4+ZZT=x!4F5v5s@u|uM>Vu!)M5sGV3LX(QBmUt6~aQ9FD~LxOHsoe1;8Oi zfQ=y{@gKj&WKSrSFF;_&>b75|F5Q`8|CeXe{-+_3=ih_wXs>VWU}z5R3iI@7VYUs_C=b1dmrl5@7#!SpPx9-%;tZvHmkqzr$KSH90*c zSyf@Uy}f<-4b(6e99($HS2&fO+uYAG853JmElMARA%bZjh>u8^o8SJ)b4bl;FosvpSzpV)H49;&|HE@#L#EfMt`WfUpZWdkVemg3a&Dx`07>!F+yxBJVWB8)57B$z|Af_Y-k^x_6(`$Y$IAE@utIeA zzP!-2KE1rS9RDJgLE!6b0X*EF!1gtLucLnj%JSF2GW`2sOA)&lUY=K9UYd`6dbcp( zQxbg`=5ycgD?L+RLghW<}c09UaQxUXMbR25HZoG`x91ys6@x@S4z=d zd+J|{R|B_yjg>aC>NA8OQXmGbNaYQ@?=3hbqQ^U08XDhVB3yA2kxUyp*#7Hs__8lTa8|_ z^C>Z^g$def6p&7#%bx7P)|0H%_1)fj*P|*XF76BXuQA~dDQtZeXsKbLpkL&cy%zqz zBG9@9ZgdW2ztuqHe;z5VA{k}67%&5*b6I|%lO%tQk0wo!PDLeyA|c5Ihky#Fp2f%?g5#~)LGZ7N8 z_?ve-y4Uz8ta)qsS=lRnB%&NL6Sv`+lObzOYpQXqpWQJd;=kg}<|~~u1absY5betA ze{dY&0X&LOEb3XNRY|IH%?S@ORM)p(#My_=SOT?rtH(WyN+r(q!OAAL8|ylQi>j04O(TZ7;6rq zJsc_qwUO5JHWAp&Z(BE)w%i4cENi|vpWnY<@+j?0wE6E(Bk)ak$` z`}4P3&{X!u@FR~gl&I0>w%)zgd2APo!)HbLkzIeFjgKPXFjPYB<0CG5GDmt@L+(3@ z;Xw)kh^|J(bIDkHg|G4oN}Zw~b{%%McJus3ObSBoXwE`7IdNIu7^f+GU0B^)z%U@>I+WB9zdhj<^_Y-yFG>=j=Vv=5G zwEb`1mP)}P$#sDlDnCMlKmQs_1RybUHQ;w3{sdT_wxpKq6)e?%2C&h;41>M@2KW!K z&@5zadOp77vSUtv#;5*vY(n4_o91hg`io(zE9GPeum-R&GUzhU8T~(yd;B-7rbr1% zipd)Ph2CK}xR)#+Fz~JV3qkepaJl3UBqP7Wq7bgj{)QFnPb@#F`D*@R80%H~{=;7U zgYjQl_@|`T?~b?gcQ4jrGI0NJ+3jEv1N3^9tob0}KjGDxZ$$n1ikI*&OSR!^ z`sklZ^>2tre}jnr$HeuQI*h%FL%(shiD{+@xX}Uw_^QZ!)Q;q&6a!--IP?qY(o^b@0Ir%OiPM9y!W0;_< z^u~0-mLf`aB^G+pHu3&83Pw(HT zt&SQ8;+r>nAO9nKIs9L~{)e9Plgr9dtIv+P`C(ac7HIZ8X`%V}7_fUSO^Eo?RLHnF5?^b>L@wH@|>Bl%}Dj!};Z?1HgLu ze3vJd5E8`u)$k=;O)yLyOg^C!>SiKWOQUg>5BoNbx&1mVnq1<<#lfwC< ziDPDuSP314RsviW4eCZLgXZa~J@qAxxsmFO>be-A;5a^_iQ3#1WEM`BE0k9Zq(eJ9 zGk)PHKuBe-1FM9sM2o6I)g#YD!q>yLCD!Av>JJobw{8d5L)4?!3(GBo#)T>c4+OED z>GqefbQJd#vz4fbuoiQc9)uKYIZtCKH^!KMEUrOu^e4tRR2!}WDx!VcatWq#+IAg2 z1Abs2|CHGL$&PkCMkc^wC~?b9d*@4MJkzq?#)5Liv`==WxXHmtg@s7N&5sqDOyWdg zYDch#Yyde)7P2jDCRUOE)D-SnBS@r}$;&}T2}dsMRy-&Lj5>z$^clZV$xrNsW?kau z)N)5TK-i-4J&0Cc00SBSa-8B-c}_@!(i@<93?fuPtn!jYjTz1scdM7O4sCh zZtvu&qAkOO4eP@XXYh0bgnUCbWbsn)_F)`^=c{+c6q{c)U4hIU5qR+7w0@$~D_;U1 zt_5lW%5zdP8u=Re9E7)(2eXoPqsSS@4AW&1x?3vYZJuunE(Pb_rs%&5%!1U=hp6cg zH4Q?b5PHv*C5c}XjSp#>DGVN?Ct$u9*6*<$!Ny5)M|7cWFSPS82sv<0Q>ms`ovC5s zkTEQSZnBQ;UXh$!Dg||X8~1*)`e$5vAe<2)G#N`fml~p*sXrNo0CTti7M0#tc)qul zki~00u=tL`h?hbHmpJt%67$O{xA+!O!P-Si2Hl~^DU%!_iuZ5)E0vjTQEsZ!(Y-#O z0PncZvln7f8Dm1DrNXxiatTFe`o#G}VHhVsq2bd}Gf5yq(S^I6OErk zfXy5Eim;l&y?;?8nm>L;JcS20LGq+%gG84bs1U4)3;y+R*W1ywDrw!DL44 z9ZoSwmu4KxPcm`f+1q*D)3{OO!}YWVnu8Bnv<4Qjt5S5lYh4&b@K8M=E3ogEn2C*r zwSSVaPmS2R=iL$=CSA25iIfD}#Qqt@Qc1P|6OJI-`!&@%c!^F{lSE9@%%5!%U9XJ6 zL3DE3!%@jcAVsEefvsdq)( zXmSn`Bb1rFk|kd!%rhOk#sK5>HUT0oJOdmMYb?w%8tb_bGLxG%DV%0LH}YAv4jLTw zMV49iIA|06tAc>kK^dxC7(NLAd;FL}qaO^VzpUcCB!B>o37ubj8woAc><&UnTskkK z_IwTWd_ik{!z{i!M8akOYu$}GuNrxZrtVWo2n!SSUR`|0OciJr$nZYdWSFYW!lkCYi{2eHyxb9qK z%2X$Nz^Z;cQVL3jy!Iy;9&9irnz>AYFmN=;UeqMHHD|~;R4qB5T#{aY&1B1dr}>_pioA?^@uddT2}AdlPyhdAWRJ z=8LoEKDa0`1bMHe5Y8{9Dn*{OJN{d9AIP9)7TUdA*Gy@wa{y=#ALUqGw{Br-IR9r;_ZSv8>}hFPD~= z8CXDALbcsx44f2^WRnk8C+EOrV{bzn$4PO@ry~cX{gmI5IHL$C`U=zflOp0{5uUt3 zm5h9~ys1nFFrQHcWAGZHkO6!^XT>HXFQ`-9!DT9i*{$# z+_mp@e~15w*ST#OWO-c_qG{Jr0OYx!Dce``{wE7h(DRQx-G@Gkkv2N@RnOUOx;xr1 zkP}}c{ooQMy}qZs ziOv1og|KJ2Equk*gtR~IJ(=H%k;Y|*8ur!aZS|$X#u4Z+Mm5h+Ayol-x+M1H#3EK>~J? zWV(yl*lbJS>sKKqUXuCVtavg@D0kzG2`!|91H-t{Wz2oX1VCLj>w%-gn=M4y@8{o_ zVrlJmJ*mSaFC64=Zhd2U&)za&LMCwVdl5}@oc7KSOcotPt~3a+?sl-#hx4b|L*&IX9 z$L1kCy_^tP7EK5rP8c@S73FG1g__jTz;kdXaO@YW=r6|vqq%TIA}PAcov`kC68#%{ z%YPL{EXLhu-A1w|3>kS<%j$v2}cN!qDRUSa}b)5almD;q9nsUxB5Za&{^ zp|6#HB2x12&f}?F$}kPL207^ zS!8dj+3k#Psu&M3Ay|x=YV>1W8#Sa}jd8B1V3>L;h$z>0qz06VIkKmT9bGQap}rxq zn=Ov<(@sMN$@v0YnuQ3cXdg--M3|ekR6(S^9~Q2(n;ztQMR?gfLz$qt-m~`eZBa$g zVX}(j7?NgYwCy1rdS_%U`pwU`(xZ<_;>TgxdaV>1?_#V%SFk3k9P0Bk44%rHA*#Ix z!dSj532u<)^t4jg@3zytW06Q-AsH-R?NZD|J*wo>_})LY@6<^l4Y%3oodTWW4>VTq(fdX-JOVJ zdDF`~r;-EZ;mEIAo)^^|@g?)ajy2(iztWKCY|@Te@VLJpc0irEYwLvFDia~f8}Y<; zeoG(h`}8`c!lcAD1;%3Nq6TD!B|@0Ug5snzC#K#gF^er$IMP1I&EThofr~e z`MJuj*JJKug=gUKty3!|U%YsV@s z4mUj5{(XY?3Q`yO{NP%q?c{$@;ok9N2l(g~pCZfH$OYF^>84C*MM<1^G z)-j$BK(vuvGdM3)!mr6&e2@b9A$oGUc+3jZeCm9qwdeP$q902WO<7ik?qcIK1Pq$X zbw_pGQi?8h88++}>dYO8Yu7WQUb9PV{j6s+yNg42RL45AD;cbAbIYqcCybgAmun{BBh~Xx+oPkSb!FH~TL5 z;{Yz%$R-*2da7^{tnW?W261+uDa_jz_Rh)Sjn3=z6~zoS^BJq7fjCmtjYM=n(ShRWblWt6)017T;5B-|ajw{&h| zjhZxip(g4l(&!?Kcx=THkbUDQ*H5U$TfRmO57pQV|SW8=sRpkIOtbc8XyZk{4@ z182(Q9Uu9T4>-L~Wh=ZJzdxTvdY>Y`!2kCoGg0#NL;P!Y!vXxw8_EAv_U<)#=V)g8 ze_!7JhabxSNPBHJ`CWfmK-{WR>P(?5IH{1*1&j(8YO7HhuyZ3ZI~!z!z>(c|Q> zHQ~tjakzec0}qeXAuW_%&gBsR)UGAh7yxE>IC&UAH1K^Y{EjPX@-|~gEyj~ojqHN2 z09i_T&0Iz}iD+m)6^eoFqPI^gVS-_F_(BWa7TCM|@sJQVKXFKKMwk%KGJq7C_~K;y2o(r=g(xkgM>h3aSLMx`%^9lPPJg0 z`8AL8S@J&}8UHkAKArl$M!5%g;$7?V)9u6|h8>CC-BL)#24l z(^8kJa)I;D3g@T8hnF@kld4hmF&*s~zTF3Yo*VZ>ZIAK0U0vt1I}Gj9&7skU(MAl2 zdzwa#v!_*EyJ?#WbE~G)y47~r{+VWa7MM-W2NG8OWe7_ z7?l<3W1>}j&G%0v2&JinTQ8>urk_cQl#$DJuX%FH z$(EN2S_-quvm0EL)yHQw4w>S6_CK{9Bwx0tZwKgFln>TStD><_T~(Ox>!#uU*o=97 z4Q1ODv$CEVIYUWKadc?$-!WcJ{b@7)>sZt2*D>2NE`3&`HhZ6>>G6+Wr@4Qhrdy_w zl%qBFP?gNF4r#4p*-x0WkgWb?*6bx5U8yXixLcV`(pJqTIR7&DfBZ6Kew!1t8&@&d zpG`kz)~lgG!hOsxfZ3z|*zI%m!N)xBxC|zjE-#S^C36rot6^jbr{TSSLavuEL=DxT z@aAb>?WSjBiM z*_n8RBW?L9h1Ke#VLnn3T?+{@tE=svvM2Whwi{~cOPUi!uRu%T)j`Y+6=oeHiyZ5S z4pXa%GDgg@ktSBCoLZ6kUcH*SgPEfSwqR!}6Jz&{62 zp=Az2mJ5C5mVomtYgZwcV?&w{&NG6A*)NCzxAC#7{z2p8LgHzx0CscO5Q=q zAdKd{D%s*;p7td1@?1R&V`aInNX6iRqi`fXF#kEe$Ft;1e#(~~r23Sz>;)VrcQ{e^ zSnJ3RQ=6HejF>-Zn|PseT9xbj^lIw&uQ9`~otM*FD};X?yBz#Frd!4rUefPv#fxYn zzFrv5)qi zkiDd9C(&p*o}PA^ILfbea(alM=mo|0Bhsg*+-2c%*5F7!l&5JB>r;LUL|tGy z(8if0huIx12;xgFigfD`EY1N_#~b9J-W4JQUJC_0h}e+(Cj6p=`_Rt&h3w3#@+Mb- zu@&WrR)vrqFCrvfD#bK-psB!Wc4k(SiG<{fB;XW;Vzuo`#N%9Ydc`P(7KW~dg^(LmUn?qV z9>g!%jEI|gsBy1QYL$sFBkWt1NsYokRlSq0ZC{~A4|RhOyXL9Zjf{DoM(cqv9}HW7 zGS2?4j8auwyAS3B2#9Dc3HzctovU%Lf?9W3_PLJjGOqtvnJ-hNmu<@UUUfR&iB%>4 z`@Bkw={~jmQhiOPu9KUAK*bhC9{t1gv?_O^w#&##Me7FDA?Wbs%tdv7$m=DAdU@#i zx!9uOVT849I#JhQWV$1M_VM|=q#{OtR_8?9^HnFw7C{xw&l(b%Jvb;YyCnwK8QOgE zPY*9rV=d>&)Zy^&9eVB(GwMaMup>n8W{p_ba3W|$4NJ!7j9Kb%BJe~F3&!X2l1WY^ zvSV?{_obOqNLU9scw7hK(e z!FQb7sGMFW46;H%Nd#5|N(4;g@zc$x^G`oN?I{KArHH2#CcjHI1@>X2+8W_M=9KLtd+1ou097A zO8a?rK_m3x@j2f1>xU1iB{pj(G|@PyAD^$Pt%D}E5fYW@O0?M;mbO8?fU+U3#5O?d zP#WD$!_pyW91uI?Nq5t1>Fn0NZI16azX6)wCQP)VLu}^R04;CJC-yB_3;DLKu=b=L-bQKc0)Ci3& zNA~U8nL~$kHVrL%pw(^C_eGhSr)z`KexekjkS*H|bGud<#C#{W9y=F}V;Fn{ci~DN zM-H94Nn_A_eV%-mock9cV+DNl58(>9r<{8ip<^X{6?ZMi7lC7h8SI`=&;>YXf$z~o zsRQ4_!DFH`@aTPNZ{|C>!@Y7_HAx@C?6Kmje;B?|KVt4NDIR0V*ym$^4~75Id5CZ0 zihJ%lx%v7Qg>U1Id*<4=sU7LP%^R76f8vgNaofJx&=-VH;O22;*LlcibW7)lM$XH3 zc00An90|z>>xq6x6HRe;+i4hVP&tN_k;T{L5%>@u)ezl(iuIFO^B&E?W9NQ}s{bzh z3Tu~nWfv;rJshsqSzE}^khv8G;KJ(D7afYVH@#Xy51@!TG-{} zN8LToR`tkkhTGgOatK=FIKClI@0aik%o97mKL6uR2u>`|$hJ+<$N+q19^)^==MD3l z=#lgI4m>+w9uqWqcifhC7{?7``ivqe@fCUV-K1_B0#iQ0X|33E9-Y?>At$lI*OpDLut`ks6;nODY;}b)sQL~^yJ$&e7M1nl}cgM@G434%-TUd z+hEk-Do`y1_B1@pg$zQ4m4au2{bx~#!hA4y99y6I2cl$!_~7qow&;3x33dH(-*IA| zf9@|KtO#hqKNs!KifTr)32Z^L=h*rgMND|&--2_l(?1@CLwqbR6`q93$Z7Dodo7p{ zS(rFb1TGR2*@=Kls~xs+aSlF(f!{k9`L({cbp7@8t+zKB$01Eqm+cWH1d z@|c)qge!$Xh=FEz7jon$9=s~@4Pfv#GBuHk=n`_3(jHP3mw|NmLa-C^A@One9MYL( zx5N0pc#wK1IZ=AJE0uw2_i=C^@o{K_W+%tSN^lAq4l$d^5{8|3_f9Z05q|g_N)_+M zc(5jFq{vZJdvwP+n}Kt8YH%ZRz36ak{kc5x#&z%=GAt3gC{3sujVo+6@=y~p9-ZO0 zjWnNGa2-Tf;x)N;Xo$MEmT2=zJgAklYwJE@;C0YE*r#gkUw!<*D&8&?yRzy@ zw=eiOfgOs7K(M`CLbG*g=Upb_9#&k!sbthsZ1;?r0$+)X90v=6SV7<*k`O$zY2 zhe$_sCDhYx4+DFE(1CGPR|qr^YajK|25W}EM|34x8}KOrPe+(@Sn#y|w$Gq@7Ve>v z(4DSl;4AT-j65kuPs{i0y|s9y6eD3rA2K1Fv_?OGkwfskzhzmg_8AKR5T?isbxmk+(gZCLfH=3lMqLrQ z1X@Nh$E5lS9luv6r$Vu}=x>Qy6tYGH5`Y9`;O*(&sngU`B>2<71F@NCjlT3t6EX`K z2M)tDa&GDL&=OV)@AwbHyD=C^_RmBG3DpH0y=z9qTbL_ULkIB~iS!RfaSN#glqQrR zs#B}=(^2CixRDsS_va8s4w!!!7H44YD1FOL-XN=zqUY?(`F@A2fwiOMZ9QoNZ^z?X zSQP25XWjkG1*PT#xwPVv%b}UmG-A9gKgKW3GTftF(NdL%3L0HPJ2Wj+JCx3u8b8r63fB=$mNH%CD4ONtzU=0lSQWR=%42f z$i!-sHA`P7@fd;zw}@0yN*UEHK%+*j18HP9QpZeZ?D0Dz6*38(2u!BCy+CFwbAr?S3ByF5D%mM`1R}avV*?(sM z8Zs>z`45Q4%9G_t_a=q2VwotkyT;;3ktHn=@-Vn|>os@eKt*C5anLdg5+1@Iv&`w) zCUz`C-C!xX2XAkPlZnyVU}U#V3jm#GwW6hRLe1IJH#3X9@CLf z=;s?J>==*GCfg;SCF8JsU|}~WGye?C1(pJ{fyKalU>Pt6SkkD>EUP$Vl}wvlmK?-l z!D7LB$a2W)V$QZV8U2>GLDHdq2Y;kBIU-p)S&Su&C5%;)g;%{;Jzu?CJxjf)QMmeZ zwT45+j+ZoE3M-;1{*>h;Pw70Jp8Aq83!xwl-`rW8Sl|FG$q@jT}-&ezILy87~Zj8t3`&B;}zS3GXld6>snJ{>C7xg78yU3na zbR61y(7*;?*aK~y1g06^4DLirk(;E0+GJ!ss~Ag$={GaJAGos~g>Fh0NycLJ18<+^ zi{|s@%iOds{C6Zq6q5^B-C4+4u2QF3a@}1jwV|Q=PTSKETVRszo6L1$0?89<#ILo>#8lvz1uQ%$3%vn>e(M7~=#%TjJNu z8&oShjvA+obg@OwSz??y*aFuXCiN4#MiA;bhMki(8D`g2g4_zWU!blVM%JB!xbb$l zX>8T@D*N@Ty2OLz5%_ZOdO5(QF*bt5)JjgX3*&mi z4y+b{!`zNl3Nf4P9FN&W+{h%GyJdP6R)00y*!)qg+sY1PiYFWW!co23zz!gVnN!c< z#|~PGs+CT)+wu;4iZENBB_GGnZ^FU>Z#PldbuySUj-79L}6 zEWa(Z(3_A15h4iV1#-fZx?P;PejmO!W6|81O(}D5 z2U=AZk6fGO8TWM?R6A&yI!Dyxyue4Tq(FJbR-2lH8WbBO8#HZ@^}zK=_2~C3h7u1- zxvJ_oh%9SBCKNywg>+FQy`YY5^>#8(H{JZQKXN~sV3L?WU(8Et>@g|*j*<7m@XJo$ z)b>BN%fZO~c!Ei4{(Mz0sguW~l{?1P*NZZ+71fpGlvGgB63GU`V5udCU}H%`vKiu~ z@;;#L7xQA|#Q+Z+skGyZsU)VAl=FwE6pFGZCeA~RM#d86tur3sJkOag2gZJozo2?v z7~b#nU5-TVrw~li^XKd8e~%_us$g90U!6Yb^w^ZGe>qml@m`UAX~}qYHMT0MfaIhLC{V) zn*$9}C_I}&3Ijb4CU#o{2vQp?ZOAX#$&#LS>| zO}zHEcu(6BR!wK)p$e0sBd*d5&~|CID>{Y=W*!Z1=0qY}&v>E9e=1;?C6#D)$yqJsi~m-A#5@w&P}JY8wxfOs=Z8{H8ZMii!QvHitI+JmfVdGsY{+R$9&{28L`b{1IxQx z#}1$yW$#9xULtwsr$k+<>}{eC7*ap<>1zEOQ9tZ}#FHe!^<@ORaHoaq8C9KjimM%_ z>m8;88V_Q$u79#x70v z8Tv=C_O#+txZXJ^U%&t24NTt&0g~#h;;DLuR3@YoZ81_CYh5^_1;tV#8xN-1t@Wrc z#~oP>mga9+D(1723Z``OD#lb&zDtTxkzUWB92h~RkdolZxBY=Szy|q2kz^DVKN+Iu zZ7RJqIPhHfO5oD;luXMRRo*SS*mix`Jy<;8 zK4?C0KX^C*zk;|DcFMYsZW4b|(=L4$ddj;GY5JtC5vwihv@R%z#6BV z;XM-x<@waie>>hN!fTD2vw@~On*DP??gl7P%4aJ$ zJjiK4VD9ciVt@lc=N`aKNAWrzJJ3bf%7X(?mC*pG#WJtsD!|c-#YvUEzR}GMRaRJ&EeWTR zM~T?q@b&FfB2!k;`iYuL6%*3A;m9SQP^!kI93LN_8%s(G$WsP3eNn%-m=YKqd^58*5CjE8kST>gGZt`~W5`uQ}%2a7o zG%!peIue4IfKWv=a+AVjkW@aCvVwxDynMP0MtojQsAHCC!0@FvWVfnSX+tPhl7yH@ zk#A>=QGUZmQX54bkinv*GUa5P@!(J_0H~B-TnZvl$8M9ZdD`Jecz30E#eC&(rFwvI zz;q>fMeWqsB&4mL-cLdVEbopC4Y({KdjDO@EQv}6RYj&%8Z8kGL&@m#5Nu2ypa^6S zY|PCrt}JEQPfL8RpHs7brbLyo@^bF_xg?8nJ4Q470^X5%tLg;O&FbdQz0keI-Y)sx zf~oZBF|;2^PQVw%EsrwRc}bPRNnud@o@s(EstW6K^$An%*0v&z=PFjC67Q0nijusN zdR&M(6QrP-phR|Eu8BmXqqe%4AX)KfoCJP}OadCZ81Fo?Yj7(W0DTFD64~=&segJ>}00hcDFjA(9rI5X(DEd4@ zvbmhtDyvLp5h@8gm|OqjM0rJSxB$XcjG3&amHU&G_?FlC=9G;K<{-x$)}(L`{~$6B4$98VvT@y5Xxo^n;|8dtL=tD` zp@6P?)q2hbdnYyTi*8~I;xk6TOmcffl2O$mByC8P+oU4ZUs>y+9#dL+W z;fTBA>68a)&bYQs&6i91nU}Z!eMJj%Al$^=>nD>4c>mdT3$`}@_#EYbSkdxRP10J4 z5$W=h6v-XkG8##fuH6?8S^zoJ-#5;mv|vqW$rqw>9aBGDn_zMI0|LV+3DE80h)z{C zp4{!_;V>+b;Jf$t#%8yNoz427zQtyG#EfGb0-NKPR`W8E?z`PPmsD>8)o&P2_jh)K zqPVdOw=lGJG;_Tro*LmezCGWlAbRcwbV_vjtxuxvYe1hi5x8o=Y%CMsV4sOL^-Q)0 z{Iu-pUsmg}O{hWsW(tk=6pb{J(#ie}54-cb)OW1m;C!;6WS*7&2WcS7vZOQjeM@0* z?u>vsvApMQpV0i$8&*BFv@rPNJqyfw!AtJO;&oe72pB~h+S0;^Bi^Zb700)TRFna8 zQR|$03h(C+7JQ5Xs1Jg{VtN z*U$>yx57c@RR)YwwA9LoeZ`VhJb;f_gOP7<ir>+@l7Ma0DO{i=@UcPq4FQ z!Wa}oD%9a@D)~bz#qH}oa}p{8QuuxjmRSLh#*y_M12u-|rV)itE6I8+rM3a~JF0Ll zR#zX2lF&@XK_m^G){WJ3_*N)+Nttg5cIf?n@OucYv!bquT34;glFn)8 ztP&h+hhm}`amMJX-OFxsuE3mRJqf8w&OGV5@h$z+J4FoTqn5AXAUg&UB0u@gE@D{7 ze6k2$$1yzRGUqe9Z+STrHa(R7a9?z3Bk8W9wL+1csks@kBk4O029k)u4wH4>Rl?XJ zPX93aOd?q0@^R`drVroTyQ%~#C!R+byn~Z-IV?p_V6Q)U5Yg_bC=tD+PNl*7j&o+T zKq;_!6mpr<1|~eD&Hxd*v&b~{*&j}(*~$3XB>n(~v<@^~6h3gmyN@gRns{*@y>v9( z4u{le3xi~>LsxO3SAzO5NLG%3e0NNF6;)CBItfD6n~iHY;PNqJcg0tO62k{11ws6+ zTu7`j2LS)YfhqTyjr?i*X-|N`LJ>QV6HZ-SlF1nra~R)V@+D8*g{IwuSSwAARYswt zK@$NUF_OAi1dx654q{Pp_0K#d8EbZk zMbU$0X)#7&3 zUKVe5z0%z<1NB{vF?;PWd-XBxaQo~qk?eelzj1sMe}^Iz^-)mhJ&Mpf!Z%IXce!To z4z*^HA0bD19QbdZ?ppXO*asKAcOd=j91KOa!{Uq5$q%q~ioB|dv-Y+RRH<}zbXCu4 zD?B4iqvI`o3!{kQqKp3zYwr-G2^WNGc6D{xwr$(C?Jj&}+tp>;wr$(`%C>EL{u?n9 z6L(@F?nKPpoW)t>BI8};$vkg9lrBx=;>+hoPYxkqBf%x^P5}UfEVw~VXsQF^`;W5X zRA)!WHK?+48DQKuYrsgrl*3^JcH~wC*Y7REbMkr!(+{G0b&ax&V}H- z3Lg0jij|$SP$xE&Th{OmLeec`%vPo$7mIBOZd9W-n1K>Gq27?q=OEmKi}(Y$wu1!| zogrJaz30SP7VRUEQ$5_}Wm5>r@CC*6b^=?-$Gcdl zF@HH&gM;Z$@LDjEZL0cQkh#=jYf#u8rdHO2rM)}@*Q&>1RI@`+xd&iJ9U|dK2=N5# zOLE}q_$dhpFG#0lO4B0VSd+CHJR($6_nxu>d$jI%B!amWZtTj`GjuIiELd4Gvw*+1 zCv=nhv($)i);x@kz6VjQdjf1X2SFZ{w$btxq0>B>Vx7|Z^JBN%%oTdW(9)jEnkK2$~IE z9A1J#UIe6G1jdPe+a^qB}Ya?g)(9*THR$5=FYk!*W0XTH9ZcG{Jj|TnA6+n^x(9s3rRYy<94p}a9}`xgtj=hR zoA_aLSP(=fv{w{qn47hhqwKR5eYHtB>M^icMqrcg@ea#0{7u>t@n{#)TZk)MWCa{+ zPG04QMAlJu<8=){*B&6c-eXhFaKV^047x+F0Rp`b9;mCX>|y(SL%??(rU@4Sw=w-5 zm}h}w_n9zL36%)N$~f(EiBlK%ZiC73Icu?fwDXH^bS$^%GJtI@?0q~(f7#Yq@B=ho z$CdJ;Q1JZn2fR^22JjCMR5FdEGFgatTldJaJ@h4t{ZN2nW#T4!~0hyZ`= z;@ayIn_*&noU>>LIVFQFMB*}NhuOe_9erK*U;z-3wF&%bd$rUkos1xW{NYU)n#>S4 z0^W`+T?M`PRw85JQw*K=p>rlCcT@@cw6xygZ5ZQn?jP?GrTc#`#;so^0@IK76~r%Sk$YL^pAH-+h%WI=U?W}?Xd%Y6JG{4;i8X!pT7t$ zXwQwv!zhnnw%g9@p);mjqD`}~#^_&S0QXTDX5>v-D#_aznMR2Y&C|St4AivLJc0?f z*wu&WS zOT}cv!`zh}-;nd4rVFrOL-W$cJ_5Sw6BD{FG7mws;193{2UuyP`3f{j<)w-@7G`S@ zsNGc_(1EOA4v;Z2jB4y&@_&)RyXjZ^?Uu4B#_zR)aegeT}b zDLlF%F}vtL-HAWAqg>pQ6!A;}CX;%c6PA2eWp^6KugR9LqUqmtZ!p^6WED2e5ipc0=tEx-Zqvtn@mJEJp?1TmaXQR|J!BwvPjiCOP#6>X*jLb2Kg zf^fo;YkStDPRxQfYemWT*s<2=n@)On0x4SS0_Jz^L8S=>}IdBvrPAeRPDo6*ruJHpgFFM6pbrDqiWX{;V8YfbBgI zCu@T>u(EPqD|1L_Virr9)n;gnHz$^1f9)`62>!b-#e@lP>=V~UbfJnhF^5d%FlFpiwX z=}&|51Y_f2l}y?QASF3ngPU^dKC_YcUp)+#nv=J7S>hzmUODCn?UIy!?6yM?ypRp^ z>5E`T*nnreLT-et7fypSu#Z=EEo3UsSluBTLzCjjhu6z$4ff{NJFtPe9dw~7 z6JE(ejsp+*LlcL--neUUP_V#i99UipiCqy@&Abuo9GxE;vDE$&cM5^F` zo7E4rW0AJHDuSPy#Zvc;QysL|BMyAy#pe3rbYu%2-A%2@s{ll3!@L-y>ncxwXr|ZZ zGHj#FH)kZDW#J_H@O)hvnOgM~(+dRhaRMi&j#DZl$n8T?n|a*BQFOTviapWBhqo;* zMdT@<@hX_iyG~wKMlfJb$0>>$67B+wx<-2Z0=4 z3;^K4xqA2%p9o49M%o~yt$Ls>n`L9l1 zonVLWxw25LJ#4HgE=u+FFTWINWMo*bl^EX`!K_UyH7@g6$|mWkbCFfib=GZf$2HxO zrp&lJP?K_u`Bn#ujvrWsCY-#Bw`PMw_#5%W%(h_A~>gQp;O~7Hb+HWap^*u9_mn6-3mchq*Tvn@Bb{N zJr2{g?m0X@=se{F=yQ*`4Lax6elz=>D4Yt3QNT4Ud}1cVj}wG@@Z$sZBD26&aPT3m_?>{tf6V2T_2csy}V z@QKMS6_kW#2YMi4y{&!26g`x*HzfBMN-kwIVTE-|!sLu(hkZ=h;{K}*+}h=^{|&ks z8r2sjL-)nwkjgx{YxFR03-hwcgt-!SFN;C#h{u=JhwnXO<0|F+-&G z#T`>H{%dUJ6r#&vWJv5)X71lE!Y6CHgu`^InX%m$xOT=UH&Udn=pLEl?z=6>*{Y3T z!BTxbVK^YKCG)8Y1PNMD0Axm$c>e9;P9XdZu&9VS^T;qXrE;xXCyZp{|%A z#Q!U?p`nDlmU+c|Y`JyNEt=+qqph0-TzHe6oicC_uYS^pkzlCKKFMS-55Kekxt6}ca*pGQ(AjJBO&m>=tejLvyK8ESJ1r}+4xv3{VwbIV>7V?R8} z;xxv^`9Ds<_lbox^omC2EgMcE6TCyvs-(tnDGk-@#}iqnbU0P3M{1W@CiqN_QW!@Z z>BlLCOj#o-1YrvZiyR#k9UOx3?~ny=k`2!Nj1f&Md1HrnR*WX)XziM^g!%KEK2MN$ zTpyoZ=9g&4F)K&ZA~qb}w`+MymczK17tM5)>NzXgO-G84Ma#^N?Uhpwps2c@#5-6< z!9@|Tg1VJI=M+V2Yu&O#7TcDf)-`S}!uwVod_hUHmm}8)A#Hw$(kX8Bk#s2_LUuT@ zZKm}&IY10E6wMU=;@zBF z#m8lUeXg*I?o#&2!0Dp>zerV@8~>&h#Y$jqu3A!JR{V<06N_^KLSZ1~c+bD}ChJ69 zlS5(U6N0^H{$TziF8(CoeIQrg_&q=Kxs|*_U)nyi6Y5$HExmvx0nPzX#6VyeX>#Gx zDM;03Gkk-^u!}$(o}Qe%p4uk~!@M_c1stwZL}Lv|8>w47AqUJ5;q3;U-vOMdDHks} z!)^1o;dL;b^4$L8=x#V?FP9miyZ{HlDg*UVr|RLAmZXX^3X{0PxoqziV6L7W?meTo}`_qOz75l9R_Wd1G71-J#cCpxMtUOx~QTbblE&Q z*yfe$5OM_x{j*I4@phY8u#XV+$ihTeF2Gf>=IvsQKNfYWr81i zOgyDqIy0Da9T$z3U!hcTwtLS*ylFDQ*ClqXgie5r!W#b1CFF|2V3zQ|n*J#}xI#L6 zl0LtGf9dc#um%*jFl$uu>L$PfAu}HZdJtFb5Pc`Vo74;L7<&WKU<;%|C3ldh447B4 zX14}cxYd=|$fnEpX~*|g>{Ms4D>^rF{_J2aEihAl4yiHD?idwc?TQG79nGv-bq}7c z*Vca%j3;`erP>7ZRVUj=d16*c{d*Lt$$p3QqGG9Hpt7su#6tHG-KD>Z3RugQnl4c` zrGpFE;_M)<1Rux5yC?2mELV7pi4z=+rTz5aeS5-#(5R@9=3f2$8$rUUq-rYd?}$k+ zk#l%vYlPAntF^|Ta;f5uF)}CIYX|D;TiuZUtgT@^z2;S^bdXT%=Jc(CJ!onP`y`gS zeiyXVvi#WpMyZ=?VBBlG4iL_<^7RQsWjJuh79~hB6OJ_rC&-T^aUEq#ipN&FX+tNO zmN^(;y?S~|KulOHz;;xVmKY8*gwm@ZQL@MAo4J65KNTvwBY9nhI3YS*Q1hs*Lt5ke zZuV}Y!|yeHCjnh?Gr~`>m=xYM%T}x@T%{TNh<%YyMh-uORaO-liFSXsBXm;@_Vd7W zvFfAcI(LNu2VLvS03sE29H+)*$IDp_HWq^%lcu-p{AK!9suDxai8d=FT-Rpcjfr7% zuT=ZWoPB^*8}k7zDSt_nL|tyoeZMK@E(K%s;Rj~m=H!?E0s30aUBMcJSNp076|Wq~ z^)3udNBZ0vRIcu{W2{qaW$s?=oEckZ7x$!4n-|fBqs_#!(`>0$8wC5c3{;&B4gG+Q`GG^{_gFpZURg7b2hzUk+!Tt z9+M7|AWm7wu+x83h0!XT4y0zeA)4c2$MQkdlIOcQXZ$C}e3a1b0o~?!5}wZv`U7Cp zKVoT*tzi#8V*XB1YAXm{91MT01Tc6Rma(gq|EGD-H);<{=Se!mT+q=2+_4KMTZhi;ui5DC}wO%4qPk_!wYfAFH9)e zBGt}9sk?>6)^+{sui;4U%UBxU)LgLD53Jb-HCIO4GQvx;01_MsI-qQK z*JDHGBXT%v0?F4&0(jLEdz!&`G#DzgHto>eFa zlvy_8mM(c?rX~l#Oe$Gx;Db{cOi`!lmk%3JLxzO6X|@X`N1o4bvWwm$NOdjO1HLku!dT~j=$sVlUDA-D_FSVsJ$4{+?#9_ z`MzdWc=ZG7FJY6>1q8*$QCF@6U2z)Tx#*n#xh%xB#X^(s!mzJ7WoPt(@h5n_jxpdK zsW4vs`edUMpO&JJOBmrDc&cO7s->Q^w`WQ43uC($!MHetw}MtnHQb!8GL%s#cLYth zTcnc1Q|y)UrWy4Zy%CS&S$VfaVX(D|?uJXCbjYLW3hUs_W_Av!GS1qHQ7mbVO?S?| zB(;mjk|rS24|v-$#KR3S4VZFL#C{$$WnCNgY;P**Q!|^%78}z$;Fi#jKBc_}+j6aa zNA5afvv9iI+120*`ez&U3`sG@7}EiLAO#OP(G3}$ctou6X4Fchs9S^YzkqT$xSqrn zqke&k_cz-AUn@+?l=1F?lwXdQVy}Vq{0hQ;NF4!COEn|&os;Cvxmz(OxE!t@Mb$D> zjNxu%f4(dyC>W<*kYN6@-6EL7BFr(lhF?EHV%(y4jHfkMHleD0x+JK8ADSC_DGTeZ z82*;Oi9oz#Uh|BmZFF=HvOwz0N)n!xr?mYy1RZ*J<1j^~v=1Kf#SoZ-nLp7X_Wp2L zQswKfD}RfK%$h8H{SLFgiKTh`A;xjT7QPtOOB#k{@v%&9SZ4ONUJ2DFn!`$xDV<#< zF>z<~2kg~}UW$PuCAL&cu#sFxT{kdZib=;N1tn#~LzgO>p(gdrO3RuLPdUmS{|c+e zcTJLU(bC11_HPbu&2uH>&9eYH84Z;$vr2{`MGr#~=d=;nq2t!nyDTNZbep@*$DS%) zC3wdsCdOPHK5(WP#W-8^w>;q39nNCKr+Uf(HMiW=twD%m$c-2OXdPBQLiQ#4l`27j zZ3Vyh9)k}jcH_MbFiz*_`nZ_lf{on8v!xM1zPNCYG^&eU>;9L9&y46RLBL2ni@eH| z?F0WV-vWv&IB3L3Wf>#tlZXYvPv@x5$kDi^B#TK{&n_g5YWD76K^KmrP=00J>_F)} z-MtJ3V%vDDhV#Y5Z(YnJj8#~Ql;w4r-KODwFLBC;&3v7RK3En^RfIYx6*DTX?woMaS@f>o-(&RqMo2gH7>& zFbof)YN{iu4qB^}%jjkg>;Hme7$w`>8)ec16U$=dHVYD2jG7y|^nvrk&~FX407CU; zZ55MBHH^&t@Z{=n?T!7U=#bSR(OTLQtN%2Cn`y3x(JAlizDBEs;d{w#)uRVAlDhdi zzF3db>$Dh8>pD!NOAgl&&JD@=`D`P3*3?(Nd!NY=S?AZV9?L;LT75E{_v-u;TlI9> zggz5LA%kEljMS5ImIE|lA`l=LSu6Ad}CG|zWKWgH>5_Yf07J?^g=) z?b|yInqJliUT;!2!3q3VM)VtrGW<}%O+SdzYe|*W(<`>pUqzGnI%$vn%Ye?7Hs|=~ zq}CO`ZQ~zriI(T9!WmLTqpIh30&$z?tSCM$=N^PV-%21eyS)`}Nmb zairJwr*kFWU25{igTZ@UB;EJt{`>do$@@pnMzhCw&41|-scABn>dbIzdcbkZmVWnG z8F}HL+d$L)^!R>%kx22b%~%mh&}bD)X&oXP*YXZ*UKaHQe_Q9r6Ii~YYEVDfvywh+ zZkMi|J^?eI-e;SM9a_e(i4X-N0lV7J%lqp8yHQ$LCpDN7qxL-Jm{wUfN2sRo*bUv zC^C2ix2D?f;CqGVAGvRW|Md6uZ|1nu9Sm^w4$a{bvEm570r8Zv75$`yJ9sNSY`&^9 zxm@jO(fZPPy{b%22O17K-tZQ==((qU;|be7;G&~ZZcEHf9Qh$cekhwb*ACpONrdsr7{7zvHV0pXHQQ#KwPua42n{J1;!{hoLEUc=uXO zxNo)B6#wg0MbugRN=HhNS3Fe#&u{QCl_Gbpkgp1LZkM6)P1Eh3Jp9F?uX(_t9W~K4 z0w)lx(oCzZwPZNytXot|E4Mo+5#dNvl4y z>0q^q@EYrw9e(fb#u)khxLFBKpdh#JiQKWs`NMCQM71YYtTo*e|9SNXeCfkEYWXuj z*-xe=;)1vaTv!!ZhHTGx7uMhj^9Zt+IXkZuOQTVB4Fs}0!eiabG1A9c{ zN93B;K$82;21Q_u{}ZJ2iT90Ry?!TaEaxnBD4bfh+F{vGM0#y1aksiW2JYqb1Z=JA z_AvWvw)aQkyTZM9ByBrZ0jc{Cv_$(yqU-Qc8%=yqK@-s$lkn$V)1zCVAf#=*ueLlo znTBNtP0fx-S*Okvo(u|xKQCzKuDhb;h}HwZrC9XN^y0VSi?Bc;xlU*qYqVrGW?-#X zV(>TheB~2XL9`TJ@en_S^t4r>{|!X6dm>{E^+~26Y_bj-=#U_OmdtU6x7i>18jh$I z)plak;}8U08XoSjLA;26ioTTk34=~X%5YOcbZDtvxIF6BJBc8hufsIt8<60-iqRb$ zA%9&Ci&p70|2qWI^K(UV`KYu9v-P`~P3JL|F4FT8Jc_~1h)HLtM?!(jhaggO>`i+M zk8-=;zk4qs_v1AAWFQawsf_KoEeijuw&@ z=34a;mN|zuxT!jF6-xa+yku>5X=HKcgua}|Vx|qR6tII`s;QKy)2_rW>Yl9-O)~Sh z@}{^Tpq3^D%*SW7>wxZ^Z0bT_RQt}%y#sWHF0yP(!ts8G7-jEAE(0gRcTa8f{$kbs z_hmi7y(79MOhfVvt9Y#x$(aOWwWRww-Zje19!!Nnl>pe&MSiM*uTo?GLd?Nxz3s4} zGKTL+$mz!+fA}K7Aa<4J?cpdb>K$H}0M5SEP?2B-0Mr z#jp8=Tvxe#K||;A8KSwH3FXcuzvE*LR~>=W@Q|Pvhu;Q&2PrgvAe!hk&$fdB?`4iATnmt@T7wfcl zc&1uU%4$XvnVL^+E*?Cuq?+yHEjGiy^jGo6P46;X9E68}|Hd7oTBk!kX4k%UrVy3MkODsG7Gsp*%A@@SV=B)UGJhkR7Vt3GgV0kKAJ$QmFXcc5X zIOLV*QIV635r0q;A6ZCk=w??&dXgN_bv_k%8=o)+{zMq8Dt}_>|7b5>*zMY|mW^9Y z80?6RTaiue8^pJp&#~@z8t&}BO4Pg>@ubKUN>aW{9P;<80e<-@fGDRI{@{Hzak&@^ ziz_zYa^}ctmL-$~yZ=7~cd&u{1fV!=2H0FrlmK zL0QERSQsN9sIq;Ya6CaH_f))nT$HP1J@uAOlhQUrI8~x9P_YWi;nzdkgt?|?W_6BR^*y7LvPzw7hHA31#Xt0`yzFE8%_Buh@6;~gJd|YZaE$?%COi9E z_wV$FEh8zIdR-~YcW?Jr=O>*#hX|T_w{DzK)jHh_ z5?|0vU~9tjFNhC1*57}iMhpb2d824nZ;@nuBo|3}M=5L%BWty34zNhWK;@# za70W2n)qz%J*WppO2 zJfL5XG&Zh^Z8do;*EH$nFfiRZh_@qfCxqV1Eg zdk%@muAU;{@EH|KSU-kETgzmFsKVK7Q1g+`lX*%| zbM$bLf`sqcwkph>_*G*)JS36sNU})~X*!>xZ2r7pmAaEyJR%%&L4hX%+6d*5v5iuRvKned%mAC>bg6g20&76%;tM0HxZf2L{u-K zspDc1ew!NDJT*_9@|U=KXGWn@(CjkWIvxs*YOKdouv)2pWjaS89!k4(yJd=e^c!YB z2bqVGKj2s{dJZ77WP4ANwW*@D$;T{~>GmXrWj%|x1dR;#+|ZdF?HX<;r;_DMdTv^N zbPnhh%nS8?P@Z^i<(#OBVY1VzV^->hfiZeg3OTC0XPK%<3(HFo^Mhh9PcctEDux8I z1BcRvn!IaI>vU4hs^W)4TW*=ACny`)-BQV16C5AapJKzV$3*J&YCj7kf7!! zn&&*_vdAi4iRyIwH3$Q#AnAE%V#-_3#0~)7vKcodwnD198OKC*qC7CgDQYlafQgj? zBR%V8c@&H26(}OJEOMP{HI<-55f~XQ6PUs=bSf=|2~Q{*+K8hB!~7i_jCy^E3e8Y3 z2W9a9k>Yt;H45|Y95D4nCDjRUX1X~OYJ(X$_IC72qtCne7)q}o>1s`M5kSddSQ;Dc za`QZiSWOgT-V{1W4@{gvDI5SHC=1rmI2&OLUlTUl5pK3>A4V# z;o_(QPGeRbno>rK@IPAnn+bK0;Y^N628HfZmgwpiDGr;zgexGMXUi0e=(G`$Xo@K) zRqH#_FqMEhv=X#ZntFNBfP?@lF7Kg{Y8lxC01Yt~MKq*nF@pxpGNqa&-9DOlpp4vj z#bK~g`FNOzdYH^~I7}fpnLEINX(KD-P;#zwEb$|VoqapqJSs6uxS1JrqUaTKn1x!2 zPQvN{mL{r5i7w7~X<4*eIX|AT!X-0WTusr6OiN6YjtE;Fi1mC*c1KfT9HeYr*Y+p5 z2#qhifry`3ndy&95azg%5&nW`S6O(8yLf$3ooJ0V@-c{zsU=`}nN0fLt1#*vjb5&Z zN!UNF7+wpmdCpY50|kYRN;aRhq$4{LsaRvE7s|yM80krZLX4Ubj15E#kdpM&mXb~| z37b%*MI(ho3a1W^>r}+3CQepbY+DpYRxhed#$I5q#vR>%8`Gd93@HjWF^1O~?=o3x z|IY^OV-+5ms5M&Nk7|tMqAi4!-1)a%Fhneq!={K-#}xPMM?s2E<43_;n85?aihuXn zIN1s}p2yn4tFGvnG#4a7PNBEwZzjDw3R1oJ6H3=)jxOSzn`-4lmsE%64GRWQv99n` zLaU+f=PLeZ05K}ePRQ2wxs7q6Hza>{m0ri0gk+ZCkD&IC!=F@h_OhLRi@%1h{V@bY z=i^YHpwA@VsM{#tQ38gOEZD(UY{o9eq_jvd<%ZE;c9gB^tkvG1DBo412lz7m4diyq zkv02Rybr?ZSxGs&Ir$X4b|N<|nrLb&s#+hdpTP#6py=9YN_&(#x)~;8>3zh33tifk z25p?@xel>Sn@*BbjjCTT>ziHR$5CQPa|8uvovVEzyGrgk@-D+yV@V|20~4qFl)8Ol z>z_VMKR1Tg1@fw&zZ7)7CM%a_w(|fKaS7O+-l5^z&<5J?G`K-FmEWUHpDdC$PW|>gbuLV>_>@ zP5`9n_ zSAy9U+tqKZW!`7Uwe_e!nMUnaYs?23d|A!{`&CHI)JgACIPSU2;%*0K?q71BziHm& z>*TdJzzYzv9bH-O3Ti=QE!%B?RNIg8?puiseo zOT^6wp;9MzJ>N^U1!gx%ZU&y9PbpbKe<4+{o9C`I>`7Q3v~1=+tVkyX9}4WklJV+% zXMN*{D6V~nh`>tJ1ZoznHMu-^Wlh}>edAaW3lLW8v3pD3!JtD$@(_uMirPK!OKpW| z)VxDRDx2-zIhq0WD9&I^_BR$mYu(A8jJbo6dSa#hjg(y0`BBgqJepYe&iAlixe z{58PPfA9{!(C=W7`Go?{Q#Cu@ecX}t6LFyyZR5j_wVcV+`b&rsF)p`OOyW{)8H?x2|}uH@?)eC7AzZzQ*mV3elmN_wxL zq<+Mr^;a-p61G8m;bmT{?{ry(!CQyDau|E;v2(vQ=vsRA{fhhW|MOx{!5xHdo{qyH zOX~!Kx_|QdTHBENMeR@5CleSkT%l_FQE;A;&u=iwV!vJR%y<61JZZeZ;}*j)mdL{$ zfJ3BSDMR=5pT6+@9r0hibIW(#8od->b?L+kBqU=`CMd4N6DUYn)g3G-GV{u-4D+DY zk8lU2x|BOFW{5l9`lRC}IPgK(5wm!sy{zaSdxs$q^82&VMCx1bVRI>LLvgQMYd8!Q z<`6;e4NtFCnWN4ZpV(F|{E5?csnxHydgyVL&M`#h6sC{TToK)qBR#)!z$r)UAXadT z)pMutx?Ug$^RP^_m0EU*S0hj!P80b&UjWdG$o-2rvU32>&Uftdxzc3}XXGt+cT>l0 zxdov00=~4>GvYwob_fi%XyEHSyG$!YjqP0=R^j|W3XX!8< zVA2Px3@p=5ydk1)1n1soJ9zsG7^7r{EC>k4C;Wu>B1LEBDAQ>+s9_Q={GOI+k5ChN zoFJsxYymuUu42Y(mZTq>9ccH!*g0P8B$!40u%T}ieByJe)5$7E5t<%Bt`Z+alr$i% zqw>K7Cba&v*7tb)kxA()id2C2GuSxT0SR?!c3aA<8>)S$FKF?NiWpMElk`N;=ep-8 zV>7Gy`c1=%q|anL*K)d>xfr9z==*@K7dr4{Rule0d4lJjN(`1UOpM4jr`9`7nxdGaeHjg=T)BZL?mq9Yj%R0i5 z2YY-L>rlnaukQgi3Hy6%uu!BTEB7w8Ld&Cpgw)FXQhV@Zf_mEa6<=x|q ztdpyAx4+@u;JSs#cN7UfELQ!QhHidPcomsSF8@$GsQzanqn=pK*yNn7rWJ{hsn6TNg7U^~Zd*cktlLo&9PjNFx<`E~Lzrx8)orsO8(3lX;3l>tb7Mnc)=M**I+Tncg{m zMhw<4Tz~9hU`FW`!dff2g8iyrTRbp{zKi?Uv`n)9(FTo=jym~g)O)!JcKhfqLpPMl zX92OC*a%JJil0BhV2f|)t;}=A0b;}-rx0Mq4-KiaQM-psG;Y1x-KJCggZV4dw+80s z1&VxP={DREP8k|TZHiF}FB5F?H_t!Q51dO@Vh#$|L8Lp8A}VcF_><$ZDzuvB8)bFQ za>_Ob6pbf!CgFTQ6r_Tx*8mf$*C>Km(7Y_t<`o8Xm%LMb()m3T#zTkg8_dVA z?Q2eX7b_kRP{c~>Fl?rpGpX^(L+}gei(dAB;U(MFXXzWp@p2#n9cXs5j?e$H(e56} z_v9BOGPzC0J+4sBs3<-2SQAja6xM{6ce@QcL7q5m7dremuFQC=)*YrO7LpBK*aNr7 zdcMUeJ5D@>Bu8!8Pz7|#8-SVR5QlnWG#ATx?cD=7H;A8!Y%q%-By$e;Kh?m=Z+1rD zk9mU|-P7#XiuhxHPlFsb-Hq}>`l)zg06K(??Q={J7N}2Nqu!Y-90TS*PVw(cIlo@% z=M7y!l1nzcpL5Y+y`-E(ZnH}6@iR<%65IMah7uR$yfi(Hd1yoL7-<|8DSJjg)mqbz zIA4Q!U}9|}B0dXcvjxWF>*3MieR-uM%oENVxZ@3$M`MUZwj+HELbzS)7ZL<;SKWm+ zqe=GJwv*p0yqy1(#_>l9LI|fHsH$$-hlZR&2a6-g>u+*}((eT^4&pPepcZfaH9F&* ztf95#0uB*5t5`kw$PSeA06AI+_DqIAj&lOfxj(G-_YeodL%l!Ry-!-|?PvE0pB|8B zVQ2ZFFOSjpXrl|tdK3)R@QJs`mVxq)aI)-6n+e*T_R(-keRqsOb#Q=_wZXFfzunz| z8EpwN#Xh*y+5{uu_(fGp@K>fgOomCtT2`KLxue_!JH!W%EK-#&r^vV)T)Jf`7iiF%AR{&tUB5`aHm zmD=+f2*@iBGs2kNPTyz$hf?W5VSy~Y0VEx&>F)Wt;td7O&WO)>2c?QITnvd}+RcvJ zC+Tm&T5w9SIuEeWu(HcRo?r6CJu|-`Er$fGTaB?d9rSQUJ-Vt3pr!MVf$tekL^@}S z2aR}{ux;4Tk>8kWOROfQ*yX#h8K z7>tjDpz^Po&wz~{M7X%E1bdrpGMx0Vc@zx_ugpI-ikoZw@Oc%*D(;x)?6Drhuor`= z7Pejva6Vma8$*1k@Mb*X4_(abXk*^`FQ)w7b$*MJ2e4CrGdI(2ZI9rKr)&v%c8>$*xW}i4_)CGI4yq8G5QlpCNcDnlD3^@*v$pC$m#n9K%nOvB zh$PQ63^o~rgMtCwY?h%48$8|j&^Lz`2P{i|xJ2(L47zWGH3i&Jo;Mo&;nZ>kJ;pP> zt3#E_nf*%pQu)PgBRPuKi=5r}zAFO=E6m{9X70}!d&g67iL$L*vm{s;5=tO0QfScc zji4kK@S2c^u7K&#HT{&$5GiD{H<*xjdt|ZJU@YtdjoyDqi%VNV0vzTn5t(K8axkHk zYfkX_hI`l;k@}|;!PMoTB~Zt%Q{QYaWi@(tjH{=lZ@i-fBiL|IBN_7KF85HgB3G#J zG!+8FTU$uXCeC$26SB2kcZ}Jmu(=F?6xJKQj=o_NY1)RV0ujvpE?vg{lJdMjw`n%^ zNa!Q93EbnTY!1onA$X$y+MadHYn(BChGMgfz=1bGh<+g#{C1}%3B{XYBPWF3*{*$9 z%K)=LLOYy&)@+579&IhQKS19im=YE+vEY!H6HD!@6C3b(j!({vL zS!Y5~9FE(p-R41>lGK_Pm@F3|KIYbC9u|?}E!dAu0a8aiy%DVrt4nV+IFBo&nLJZw zn8?S$2Tcfk1-cK6iN}9xFB*t^r5(30-3GuiETBuLqV816F2&yr7db*fO`q#F@zv8j zaPqV8O=kR)bI@+YLrfiVZf4}Z7tk*E8t(q-4MAoq2caNzyv_KcX7_t+cWt%(Ib#RT zrfSju_oX1!BH_r_m)PY272e8$;KaI+;2?0qA!UGxF3>#fuZxJuAps%oh$9q2p=N~k zi~|v5=secULf0-;dw|ro#-3&}$gt0RyDS)oR-+ZeQO*hZ&YBc(GRZr5X*Yw|xHpi=qgklO(43&lK#X*N>jKb63 zR`Y78(AXn1c&3^w3M6=3u0e?!fPnQjd`CowE&r5@jV=wvEJN^fp7(Dur$zDVWtx3w zPRUM;37sy?kyW-DnnBKSc_JGI`iNkc;7%rS1QEV#VT-Cu6XPPRT^ybuX* zOIq#2Gc%?<@roKNq3*F$YwnAadSIi}(Qzn>{J`p2){(F)7=j9;X%H0dNrH1`iKd(o z=Y~-hLijx&<*LND(lAI*en$g@;xp7T#3%Z*@|sk`v4faE=~U-bk@T0xsg-O@3RFsq z4mFg-5_^_ty3$*#%=^#BN{^?`?TG^Yr^5g2s^3WwU^xBtzlAr-_`l$dvMC7thlWO! z4NOQukIRFo!EGN}U4=Rt%11+1_6J2MqjkfHGV0XkQTy?KMd;my;RQu0_FC177H4Th z5STu4UZbuftl%hvTfH^mpqEMzqK7OL9eOZDTzTa*2j!P+n9-yaFR~df zWF>env@ZmNxg6^I?@RdbuVvB9LoC zfAZLSBwl;IKjq%3#3O43{^tZQk}Fy_{g>eX4X9DZ{|%_oX@dU?HHvAMrxAyM{0GE? z@+(G+p_Ue7!1OQ}x+Cz9N?yQ4UV!#*`$X7(;>CR~!GdU_`I>48TECKeL5d5t9_N** z32{x0%gDW0`V==5xck?iXz&BO z>3MP~<}ohAq+9z2AC9Wlbyp&-6KTmj8~H(1Zn)qi{`YJso=Oi=Gh30flU2b2nzka#l{{M!#{trd|-`g|(-(apKu5(?W6am5WQ;ZsqYKrx5=Qpy2d8uI^Js5QOZAdN2Jkv>MxcpgJI z5$9cU2~HxmEUlI?c=P)9yc)cM@+BdlyB^ngWeH^Wwzv5@9084>U4U0`H_N7kddjQo#l1vPa*?Bz{}_sMHf`1ejH0HjmKD+`!oy| zdK!Qq``flgbNQTm{ z&SDW?=B@Rvp4(}^h^$LI8mHaeOQQzH?pB1hVes14%Wv3gFEli#c~O~qa=8XsLkIbK z*=0IIhx9sYt-ZJ-=t^?6SXu+k9#zl=VZ-3fZS|sb8z`M;W1az5lk)^Uk`0s=3Mc+G zkhQ%YO2$G#mEDjWa}H-&;>IJI-Muv-xYhsmXAgP?C}}YN^ZM0kl5=+sq0c}Cm_qn< zKXCb*_E*EzKURu!&`WR=-B1v(g7c4=c;e7Y&cn9D*TX-DK@pQ;^gp7suzqw7|)Fd}De zA=)IcqJtDtb|WcgE755j692`ruw6w0;rOTx>lrY{=*h~rgEHv&X9r^dTyz_M8AljH z5F>>9E%Rrdu#U=}(w^#8l#mGWHxi>c>irCI{$sb|Ga@%}(rPGuFdLHe!YoFS>x@DdPW@(sBwSdqNNf&>XbiR;7kA-& z{dmjkUe_;n4=FO7QHt9Fa|>{%dUasklxJg}Dmkm~?G%mBTecM?5N5k1&zKl{_5 z>{*a6y*>bAH(r|n+=IM7;g!GZ*?4c?csz`Tz!^zo9=SkaaECY~pNL?rNFV8o@Fq(I z=fAPO6sk!U1>QFcmYl0*iv)5Bg-#lUhls^oa-4|84Dw^)3Td{K8{u<_&rE%=Kte6{Cpz^bGQd`}OluN4Z5yZ!HQ^NyBckioZ!uO=KR! zxt>ZJS?gCetaze3{DmxKXKmjSjf&IUWvK2LC?||a^%c;a_ZOY~ge4PN+ke(+tP zgSGY0=Gkhwl-+MVSB%)s-4EcuG7AS}mJF8*2SjR0nG(#(GR#TlvYD7`6%MjgqY)Tn z&+HL@78MafH9yp|hhd-E<@@Ml55PRF3)y52PBNUcn@1!_WL-xkwxmdLHBR`mhkJ|< zbQ!B07O$+CBpdJdnhv&yz6NjMAL+mr+|e)F{o(fs zd86a@6u4JkXBi|#1VTVF)PBbX60k8SH01xXpZ`#%g4uQt*YiGGE6Ks`J+(a-a{Mui zGJT#S6k(Y;ZD~CpSKBOY!rNa_DckHVF22N;qULqvR8iYw;vP6L8*{1zD>0nf^_m+w zlr;czN*{B*8e}Y5h~Zfe3ql}7GB@fsm)>W-F_XSAk|xGwz#O~#ego9Tup9s|QcqSN zPit#8-GyH%)DC9|1%7K!2OxFX_BOmzjo){nxECnAGnh3K<|7~C_b^@3(CvofmE#oP zn|A@ur#ocDl>sT#pz@xL)T>;%7_uYEE09se#wn~BD_e3@w@n&*Gj~p>Z?zXW3ICV+ zY#JmDlJUWCY1B*nuAooiJ#K_M{1v7lC+(hoNk5~}%`;*}!tD$mwb>TG3;z@L!YC`c zOoyb9o6a&uoq=e`O>8k^Ka5b6W=<*EO-33kDO{K#_achjKy|<){cmey>3Fi2*>IZh za8h_WTM417vTPaF2Polb;N9HC>0Q&b@sSsVoXXlcl8>HpC4-BlBfjqIrD#iJ;?p=^ z`Qi%0Y*`W#pQ_**s_-uQDW*rJ)f`ce!Y`^S!zLnZ;P;!lWpFK@rxYwy$HfJ^jz_dw zF0l>NP^p%8)$?)H_Y@u{g!sQ?vmJxr@WkmiyQxG~G zQ(Nvwm=u#4)9fX5+KbQEE?1uKaY;rK&&_&9n`ON8_?j6XJ9)+ao);MFS{tSpIG7PJ zJKq^+P`2HEwD!1j`V_I7by()H?;HIxkk1YMmY=tWOA%b?5SOW+4WKBu8DO+MAY=wHG)1osUK#{ zeG%M@DWc0ZJ&>6~g|%*ojz{%*WoqmbrdL8`olBHO*k6v}FBWM(ndr`!I{a5cY0n?t zEBLzdl07H$^heFnyIi61-x5tIG6_^;GM>0F3!S0HXRQ^8xgy=Sj8LD|#`Wvf%HhX6 z!EFe-D()N3;vY%$RTwv;o(ZJ=b1|W#)r>(Gl+T_i%Z$8iFcvkEp;|%2VG?YLWo(T%mO9~D-f5BCW#8YDlnv9>-|T}hjFPF53o&Q-$iD+$Pp=(hgqW$ zipW7fD021ajW~vuyclYWCn&zjA16tcr^@3TRp+4-)9;-_!LCvjcH4(OLp-6B367<& zG?9_NWyiC3e>^bX(?mRFB%B5eUM9U*ruB|2dLWBh-%B(7l6;1Usig^)TBTQ7ON!dB z61X+ULF?pKOn+xkg~6ca$mOB(B(Pk&a#_Bn7VS8NHx5Wn0tLC?KcO*nPwOCuK4UT| zmfu}$<}5gkNLMWm}j3J+la68BM3 z=5&@;rfhp6C>)dMup#-Ywt1<)LJp zp@O4o-tJb^?Wo61C-&#*Y4LS*b9it|E>1w)7`HaKe7!}OElAf!-JNWHzrBRtOcJ%# z6{XL&xbEgPZ-QNy{yf{r-m{XvXpJ7?C-YUNo}Y65aH(Z z{+oEA!AWb8Dod4PWqs;m{8&x@dKc?^x3uC_>Qnrt`>(R=t|k$^{eJ}n{FAmurY8U0 z+OYqO;XKdI(~8&2Ogl);NW#=j)=1Pm%2ZQK&d{9xm&7^#UqL{kMn3@H!=Fe71Q}j5 z${m0LkEnk#{~xar9B*i;d4W|fxS#~#=TwQN+TrGZ+p&HsnC|8CP_K@3 zZjJ^_hR3t#RWxJ?t=8b^auq7UYHzS@aVA;M2HC`hQZf-*?%#i(IBV}*zVEY!{|(;v z{Q&;gs)X|zGWhr5|2OayMKdq$x^e~wpxFuIWxvhZxAK(h%#4@;(2vmfZgb#yo zwiMDu4Z} zU;zL||Ht&?KacT$Pfyrwtg**jZf9n%cW&<8k@;@k2zGEKa)#j(+&+7#HVK3zno)dm zs7F)}h?$9U4S|@?q-8GY!dRz7d0dl^C>n575u!I2qI_zFuEsFiM)XxXf%=cn@HQib3eLb0(&dT~ZJyyKmyBLbA)>YTY z*3z`y>1%CX(B^8cZvX&brx|Y=lf~uc^gG!Z3Ok*{?tOhbeV08WfpPM*F~=7}w_kTpM+|jW+lSZlz)3texJVTTv7h@2 z`Zk{sWVZMQhuvjg+p*|eGkWncx;S~ddJL0eQOnaU%YDW0)E8yWP&Hfgp{&zz;bOWl zf0D{0q7$Y;+vIKgt~87Qp*&t=UKz{UbUEGHZiBE`nlNP@x0#FnzV!kMlY!%q)$T(a zhpzvyzpQqUo4BX(HIN#IK8J%n3jhbe*a}S#&n^kVKZ~K1EYXiGB7Kf_RQ-l92eF4v zg0#o3k5Ny4=hho~Mqf6I2LZ|&7b3U+IN*-%PJQo@-yRGQS`at55X)eEkP|-SWZtk~MldXd z7CuD!>|P!s7!Xmw(NIo7MunVBdU&&GN0NJP-F>bP< zIXysndPv=l8!$QTU(|PkVMA}qc3>+RLb1ff2k_A&^wiYfuaAq67N?o%uTeByi-&h; zsi=+jUCY`Ux|-U%x!PLVQ&crNZ`Te9Yb&gehl|P6gWS&E2GJ?aw#6}bprzVEs;H_{ za|ge;x!&k$*w)2#57b9%NrOq<6OZIBMw<*Y2EK_ul5qdxFkaJdnfUUb^nHhc67Z9; zsO`y_j1|Yd^OeyDcb~AmvrhA2ALije&-``fA$8_qP3Q}NgzC{=2FRlYa7YcHEa!nm z$OY)01o&Y3ZPAgt_U3^|w5@6tG8^X8Bt+juBv4M1Xp|yFbjM_wLU&FKfe+#fa@2#U z&-DB0qx*5o2ic|jeM^|+8n7A$(aaxb&e+4u*7+4Yvf#77TuC=`uLd@BEXtU!oy&4| zYG$Pu&!xD&EzI2hVn}C9QH)A6m_}kqG6IXPE8zDtY`pFnsuLK%1sK`Uvj{MFC`Mm4-=0r^o5Zy|LXhwm%qWQ0f_g9BlDIBfHm%r}xyH4`D{_tg z3u$>&A$T0;h{xin_72h_rifF;H{)+r2E56$j$Z#Zd$gB5@}+q}5U!{@!0lk% z9`o#CssjjLsMtMvZhz8xzmhv$?kLlnLpS{GSkti$0V=o*wmBdQW7SKPmWXOS=_url zfnYoX5u1F~-gMr1hUd3eMr#IZel!6;nLG?9ak)4>(EtHttc1O2pa>*xqP|dI6ry~r zIg`y5UdF2Y9^|y$W5FwU8~yvy68f0t2=#@zKHR2wGY(fh(q}y$ctf;Md)fhefD*C< z)_0BgPR{V1J2oFe5BMEZa<1f;VXm~!)XVJA$ge-nv+<*4R)m^itAH$( z^U(uUr3ubE#U!Vu@~kplS&5@2NmONer9vgoVWtvlwt+V_*8w>yxE*?4LoK)3;iGJ> zGuJFT=iZ=~W>jvD3iGZ4E`z&YM=|l?$Y~3j$hMNCx@Wo)A@*@3c#`kjz=(7{0b;ZC&n0M_-oSL=1;}@Up$_BZTn* zQ70Nxsapr9r;Cu7ibsa=(zb4`h8jI~Ki_l0)g{WLY`X=x#xvu#;}k6wY7)7s4@XKj z^XDxe=qa?!&F9OCpAQ<0CAQ&XhCZ56GLQ6u*v%llmwEsM4kn~6RR8o8>yHM>HPIJ{cif$U@dXk_Ue5hAze76FAb&0@$90 zVr5zt+)*Rn$~XM~mbv!gLsiogs|Eq(Z(V>=r1IY*I-t`Jq>#tj#E~2WA{p^MEnPg{ zMsq(q?YrE9gVIE>d0jCujsTU~%<9~DxjY>?;dNL$$MrUHyaq3P&r{9K=-!kqI5o*V zbrq4ZnbBe02*sHB;qW^WgB19lMGYC6q4Uaf^woFoZ0TX7R%QGdQec-pOa&DezmG#B z7|A>jw0tuME+zW52X0hK-SwZKx8z@Oksau*?}Y?sU8U6yqSz#q{KJ}mW-Un5%sPLw=uG7=q~NE^ zGWKA5*_jI<0jW)tr$=U-`+j|IWWGnTCs1 zKj~gwQ;wzHScb|ol#c#plv;f?hR$ir03YC(3i|f)_{zYmF{`rAKanhG( zx~lcku_zX+GxkX1Q&~qQ?!ZyAYD}ifQ=8$Sq&%ld7&3&8rcaq*Kw8g0dC#J9RquK6 zABP#B(~{0BnAgq$)0|> zN$8&>a3-%})#h?V6pQ`EFu{)Y+-l9cAORC7xpVvm@?=$;6b?62N7AmXY%5VezdXZV;ZkS) zJ*t&1_^YM}R;ly;w--OD;w0PhytcOJFB#bFxXUqgDeg0@xtP~efsc_+rp2=bl255> zf2|_3{58{Os3#N=3-~|Q^MtJh!)jpjNbPiyw;e_>F-(0FVKJ0x}Q1Bgy?Vp0QtE88~b%FP|Vf#K6*O1s5tzTqn+;aJ3pHIT3p_m#0J=7 zXR+(FdtU#lc%QeTrLD{6ejYHH5o9^ghX|KO8I30>yb@(EVzpEe;f)NxSU|Q+m3J+@ zY1AmdA6o>jL|A5D2Gw+GMn^9UZLE|ZA-)-`@P8BOxK-lY{`YgS`Mo%?H}3dwV2icLtck;ep&%X`Tb=|dt<8E$mr?2nwG_*DF@Ktls z_b`c|mJ1FT$IAb>A6W@QJuyo|OH&O`UmnvYq@~XN&}S2N_)4${|GHXczS7muv>8ml zWi?tPtWB6SKa$Ta27>3(3EckGzbae1*@-xC>xYiJ+V;8f=Q!ZR0|3FFA!$J$SB4g(~~EYwUmO2* z2B(Gvhl9kuB;+IT$fAS_MZe*tyO60?g?g z$;IH@zN(m|-EsSKk>q1PI}Ujz4&3z!91__U3`J*P>jZ79Z|EF8yP~;8JH@85nMuY{ zYcY@aBvua#6+D5jqoRi~j*gFDowvx$v)|}AXhNcbCn|Z8gg!+8Gp6+E%*;KUA*pr1 zF4N0J2AI-SE=%Zot=;C*`*TkLYNVdP@ zm@XR?pvS#6J`Dk->avdaVBQ;`L!U8Qfk4vc<{T30OF*|Q2xefJ5C!7k%hHde42^4D2tYB2$msNU=> zuJC1JQ7rm_V{NEAx~FAq|J9*+Io6PS@e9e|dcB_*=&H^cnV8(m^$xkD`HQQD8TT`l zSLxpR+AEawI*ZHEZMS=s;1hP6m%(FiZ@C*ZOkm8k%jYC~;6L&u<#O4C@T=Ls;S{3k z>c#_o`w8at8u~-GRcx?^hT)t=Y74-2n-z4z;Iv>|2e$rO>e1~@bQ@>OI6jR5CX~^= z4u<403Hd;+M^7*}6O?B#<9KsZ*BAl!_`)=O{ioH6PTulif!Rd)kQ7Zx~L8m>mKpRdV{`{g*j z=J(!E2C~0M2K)>iO`bOQ z2;gSet<+~tUe{Voo(^A{-DLVKzHuO(ZNyz^FE#qQ12ouN+cx*K7NM`IuF>+_Dh-C- z^ehdvv2BC{R1=x;U({-wzsCcktB z&J~HJ9RFgFeZgS=gyq0!Mfpgq3!w>{#8#Agr9oU!DSs7>M~b&!Cc|MuIbp?paZlPp z&uE4y9aJys7ZK`}IA8=RpMRQ5bKGJC598# zi7Q)Z^@`tyS@;@L-8MQS5t(WquUb&ze`czE#eM$O4o$B`nXqA8a?o_uns*GL-3yg^+WgsP{+B( ze!JZU&!!6>Nv-4%UK{zpS=7-_v&&hDBF$essPEKJ>c|&0{e{MtUL>_KYSEbO|GYuT z{h@GY!L(UaqhcCE|$g)e7FWFlzt=S z(tVc3!c?|qk`#*xggF$8DTG*x#o2@@$mbd61X)UxsmY63ggz7p#oQ~%=Xzp_xdF+H z@-hiUG{*ELFr~?xij(u4iLp%GvBlFwI24JjF~u^QOA>8tO2$3s&d?hPuDkoC;*(8G z*DX{yW@=Gp!n@Z;H8szQ&#T|HpvRZBWzvy&$VRM7mX|=i0jUNts3dcXn9<|EN7u5N z*(&-&MUV7&D0r-;8cnclluOR(I(@8_{4ym}kBkJPN8!N5Np4soHU1 zo`u@w0y;sPEa{7Tys%Hr9Vnm=nzP>fW{)FyUvqDSo+*R00AwW`68-<2MqZIiv8@88OBiX6fwnUgRnuL|z~C=cL#qH2X%R!UwQ zUm3-@-k1zQS`CrF?pU#VwAe!MLeh6=cW~WEWcL4DO~^Mz;f`oKF|T_)?hsk*F<}m1 zaeKgTzTWk}IISm|xRdzkX}k0Xc0XvMCpLo@x{Cd0yXqaD*KEjxV|Z5kswiYX+?)HV zE-gbfKFS24m%XOQq|1v`(nolC@%j8hRQ8&HAh3*>&gS_VZyHfHC<#a&{&>>&IMxMO4=XpKF z?B`;nJ@)B@TR(NAX3esb#D644j47wmr4&s9xCI4G&-ev7X+d$4Q85x22_$_UmOBZ! zTJAH;bo*V#nzak8hbWN#*ztSM%zu&8ghtSdf?` zat0MUE1ZU_uR*5GwmAEiJW@(l#5R)+e|2i#nNBqF`s~J#&Gh*GyBF!DrG;cl7Ox!h zZ^Mwwly(8=V2&#HnwL8#I+_A`U+v$7V_q7_lTo`|IAUUVnwO&yWP6@A$> z$!@EYK3avGqQm!&7R_lsr#<8eO?IYtt5i%gWSQ)qbqyosY_upcJHDZPB!QU7l0p+- zMRk?$eBjK(YKkXAjQSF58S@&{5^ym+x;4)}*mx<owwITB@_@;fG*x$At*)%l%KnnS_n*YiV*Zl$r#U(ZWktW;viKP=GN~4<_PvENi z1W5VOU6_4`=X+bVYg+^7eT?h+Mf$Ibw9W2@U&j~#Kqcybsz?nzXibfs|6fnQ|A!UZ zCkH$oW##>ME`!(Uc6!5cQ}JfrjpnLn$xL2D9oYzq8q5o^OJ9&$ADoa96FR?~0kec! zm|fz}1>+$H7J7G6F$8*+e=k+nw$#zbvsj{Um z*W)XqEF0ZZkMD4KgDm$Sbw)DFp*QjVgLwopU$cc++@dv()Z0rOfd-&8*vIw_T-j%MI?^4`i`Y zn+LVZEg{P8u>OK%ce;)_MC)y{i}npB+LH%mLOq>OYb)9 zP?pPSHQa7MhQnnw8{q|y#c&$EL6*k|3`=z23=%8H{ycqXn&bYItz?!LFv@;mTj;P_ zQ8sOECV)JfH(9v6y&IjA6~FWZ@XxozN#XK|vGs3E#(!sE45#WI*>o;-D_j#PTNh4L(pFGT z{2t`jM|GC*rWB)X>kPQt+na*UODW@zElHVKDPdOL#LbS7J40)uWX!ojGj+f~op#e%Zxq^OiF zyJq7SIy}y0L6YRaYX=q>rRS*|VwIUwu_AIgt*z>xG{*^x1hLQ!cHJmZiKk)X98JZG zOsw}x64|u3_4O(DiO)nw{-dmk=SZwy*zME((dQ$alH?u#)Y6VFlDEi0Th^_&+s5M$HuCD& z)x=juw;~_+0-J>mujt89?nVZ(gWvkh_W5(cxXIsP4 ztOa~ec!^1S@o^WZomr)fcqnNW7fbRZMP>MMGub4|380nU;I080PA8b2U?Db+#Nrd+ z(VSh$lBc%z8TjNtcI{K^TN&aGu95nsTS*+3uEp1Zi%sbMbD@kQ@O0v ztlsci$vL$Z9Zq35wFK^!AuJ6FUxY*F5`%((sH*#j39odfSBq44@q+^`xLyn-D-5Kt zGnk;kyNQxL=ZWVNA;p^oVzY>Xo#BU}8|sXL72w3pym6LheSICf_|a^>sf+f)OSYB3 ztdc(&|K@`j{avPSIIWnDiA~6G3{!>HJ$d1sr}k@2f2*2a&T?imN8R>(sIN%X+}^Vp zLA-6b_k2TCtD|Wj*d@5a2rjuPBB)dhB7t*gZ!gZN7&{15FcsJe7O9a8KsIn89coNJs)H*F5{0f3_8nc zcP_WPyr4S*Zk)b~shoX=nh`Reg`9bp{#68oa)u0b&WV&`FPDdtsA$s=bo;weV}ju{ zt|#9-T*nd~56k6EHx-@BHGONW@_cIRJ(ao@rTsBn`GxlIK=4W1 ze-o-BrXisfqzgkkcmp#B^#0b)sAy0hG_&_y{XjYK`cwtSjdouniy;KDd^Ae=ohZ9O zpB{Nz&w1Kw&G0I#lRG9NQrZ%=lEYV$(#$6kYfFW;{Rb-iSW9G7w&e(Ax_!O*b`&6H z;MC42o9FUCB!+h8W=KpIQYI_?Y50~CJ^DN_Vg(8dr__n$K1xtv3ohksTizIar6exn zJ&N^|SEHLbJ&H^}<0ehV&+h5avAiuxGCId#Vk+a!WB`$`^02b3#gSXB^8K@FR!q7Z zLdO{1%=(U;)51@B$p#GVmzw^CE#MVlX(7eE%kv4-%nV*2DobPj>b{NPHz~ood5wGU z&zLII!AOF3vk#pfl(Kaz8VLhFQ9e)eWwt26>iTU)jqp! z2+i=5D^oti56(5{RDgw2M$Bf(j-#lnp>s6Ypzu;m8Uru$I$D}w=H#F0bY0?6nU6(t zUU5}h<9j3itSgmRv|$z#KMsDTGb2^E`7QVlOFrF!1guxjE1x$Zct6Fz%`wc}#q#nFh_dboj7z$} zhG8XS5moS<}1lihsi!Ez>iFk@8+G08wrj zfb8&|f82tj7dp?8r2H7fTT(t{YR9#^gAL8~7k+u6=}9u2gI86M>}FW~O~~bYlQEWo z6*MTq4mIN?b|ZYyrWzk?Y73)Mt&l|fYuZ0|D^yLk_UMBtznLWx5^Fl=yS=q-8KZ2@ z9PZ|kf}(%TeWXhdH80?&k-&Y4Pn=TX#7t6w`r!K5y-`ytmYJS!>kSH1EIh$4Pg}~i z*(TEl=PXzsfsV}UfZ=q1gs!#ytze=#$zj$bF@_fQ9{YZd@Vqb&)Aj+Y2Y*LF>aX3C zCS#|5U2tyW+xoCqv;4i@>6`tT&&P*(u&%7$Y0RG>o5-^KXFM>0H$H|fd%bY`?%eS+ z!J0scE7CT7YyxoUV9X`jMay)=*~iRuI3~_qr(=(1^vIroQ%BWTD;kq-<{3UI?ifg? z#%X46fTscySs5VWpn#+*{w~+`TD`oa3S49co{ZrU~~78#B38&4ow*F zQ|2v0O4)wlwb~G`r3E05PBqh6iq2M9Cm}s52vetpg+XU@P&@#Uu)YNi5+7NluvSCt z*&q%NN%|KHD!#(4JBEEILv-o=YD6VDpcYn_gOyF1I%0ybrKvepF= zSad!Ho&9ZHB_+xqcU$+4@5f$4M>D&Kt6fvV>^!Wk_s9xaAly7%U=A9BRz0A6~4ypPf}Iw6^8+E=pLKqfk=QBN)3D`+t408^J{y$!4xf|>$ zSUupASJ~(hNFJ z{_Bv;pA14@elVkqm|2Sya><(OW?b--K#9j-W2hM)NFvncbm2YZNA>bW2oOe7h*<1ob;6%erFXz*>3db_ zyi)=lfXKL-SA9k=bqeJ%R&5yXJie@fQK-N|cOo4_eD3j00Z|hAHzV3XuE83E!hH~q zsX0^@1*%cBP1f8qMP#3IR+fNF9Gp0R3*f20Y8Z~D)w;gQDTAPggN6!XZUWZOd(=mm zLw+1ylKo8#Sp5Q$jWqZSHoE3sUULC~fCXv1`+QC~_f|fKReJnu+*WJ*`MNs$jiCF; z7xjB^{ql?^^oDH* z5Atc>xVw`!r2{TEn@V>F4NOkxk$pWb^2uBGfM={R$HX$G+9*d(UKh{AcW3*6Fjq*J z+UV_0?Vilx(Z302wg0>$N@d$0DzpZzV&gN${K38f)R#PeagTFsgx?|P3=|SjF^=4= z8Q&Ni`K4}uV&eMAC-S}#=HJw;+e<~n_DNadrE(DHPvE*yq5jNa-wX)5A9iU1AH0S zbEgpo01A}(@FU5}2=$uSIe5jJ0pXDb)b$drD*2*@W#Ed-P$639+obsLnmk`T1Owi6 zr~jl1szWy$a*_#j=ii0}BrY@^1N0NV@r6Jo(&XH{WvRYxF<<5S`SWiT5vmEz&JGz3 zoUgo{s^=gz7YOxfko$%1fIQn>Zutf`NL(a8-T{A7g`9+Wpmx5NZ`EP7lJm212Lh>Z zIYYplbjMK!l;)m5$NeLw^g8Ow86?tPBE^s8(Et{#aRzr8EI&=edO170@Fe#Q;L2>A6ozq`ry1%Eo$;C6` zKxhOL6om-Hwx6skKu`zRzqZQPl4K>g!1F|b0G0KPAyfdgKX?LuNc;!HE%Ixg0HS}j z^5HyfZb56Pgrl6ea+`C|4|)q7{Ub3L3ytv#+b66M`RE;)bJ-Z@8z>;QhM=1pU!{g( zaKU(al@AOHf#E@Z_?guT&*vtxQ<Kl{~kIA!>3$jrKgkm*J{bsNEP;T zbM{dSUG_v#JOvYHJ1UGz&T3uHRJ2~o3Gae@BM8VNS%GiPY=%f=9 zs7rer(yLS!9qV+70};X1HYs{q0@5sL(Kw(%TuY^F3BWo=hGlnP{1oLZ66@3VdV(4WyJty)^p721JIsL!!MRnA!HqD%hpx#2K(SuS8!`)4Ls?2%ijR{ z@y#c{P(aVKu_p(L>{AV6bP3-<@60**{TjCW{jnD8_#uiXQ>1OhM>n6Zeb=h zpOm*B0hRqAQt8>XbzFu;4{XBbE@LtaF8$5OjwJ)yK$iawkqKBg*rBdCunu=P=#>7E zx{dO7i5<4JP8q>{71aqy|K7@@e>YMMbTn)`@aDUq_(#sty1Y-2qHHR^P8nGBT4?f* zPgnlGd!wD)^tb?v?pFUNKiB>j=!2~=a17wWD4q9ll2+Lt6`zNj&L0P42(7hUF8D20 z(6^Tz=o~9>LzBw~e2kF4jxbuH4UCc53Wm;vp+HB z0eIhX!jHdFg-f4&;iw?|&AI7+X2xSy*CE(d11T<_07yRpHdBQTY0IR^Ncf5>uKe>Y zWHo24=~aQ=B9|zV0dodD2j_Rww6+0lnO4IZ&~XyNaQSNz;2BtfvQ+N)1jtP2XP>#H z(T0pgKOYE;f=(FET%5h6$%GL`rcmz_+romm$UYsDLZgN_s7U1Gkp_ye?C^bh0^;EI z-o!FCylRi8;{o5NI|8ZV4!a?gO#|Nm==+;!(G_n|WHr=98?F?3e|dBDQlYnX@1Umd znN5^M?@qy|95za@Z?AJ`R0=6P!Z%_h8SiyDd?MUF7LK<(BD9U~EHrR&4gqbY)w!=io@}DVDx~mo-+=>p1SuO z3YDAPVKRY#2atUPjf{Hsu<`NqnbHe{gcS28$QPbiBsF}8DRD7h2){;jAQ&)40t7AZXM;YVS{DH6) zdRn>@6dc&Q^alaRaLo;+Di(CRs8PMxCsx`vW)-~kRqyRUWUmTK3-Xv}dZdqdA+57c zQ-ou!s1;+#zH+&UK9>GOUb3e38Sp*x;#;0@t9^GOF|(7%mZQkfN_Koe$iCG|-hKsD zde^eYg8n)1LC1B(;*N@qVmC$3q7Ktq4n>0@%u7As$K61ScHGM=%o?_!Uq&6!0w70m z0V*e>KTA@coETI9=~z1yLO!FWe`MDOCCRE|VW4&!Abpolt_@3bqCLEDaaBYAE`dl3 z2uuW0OQl!U5wK5#89->#7cS8IWXLo)>!&yigyJmeKc*qoyde%#CN=I*9O2LbT;K4O zM-u{2vS?5bYzt_T_3wBqh)5+Pd6q2iTZ9kriTcl{|8Axw!hp+Tm_o*W`MtY)vh3(Av@C*kvSX_ zv*cf7p=qaL!ydh5axuKX)JI@qJ>Gw4YM8jl?pzMpgb%#3{sV8``*lL!!gSSpstwQ8MSD0&Hot zQNjF=PpHQgvyLu@UxIvJ+Njh;MIRcA#Em*|yj$&{P&KqD%`NE(k zdzV6jQup_e)cI9hceco*k)miY<*#(3O={5@BgDNmrha4K*n+5Waf1ypN*01;cHrYy zmzlN$Y`PC5_A^S34be;4muxtBZ4iZN6j+{y+0@V)LHbu1+GAR{GX)A^grjMby(+faNc9Ch&tjCk4y8=^c6-eV#y6BvCOl}QYbfr>)-;9>= zu<}sr0&lIgwz$n5Wk?E5mGH*~oLcb|`3plJl^-%aKP(LR;QmuuDFc6oO%f%bB#A%Z zCHJ>;VHcwX(@rf=gC*8kTufA^9LHu*^GM~3cYR`Y6N7>#&b)2w?LY)9R3vY>|xkv-~(q5 zW9nar;+kCwWfGr-3o%b($|T$-jU9V0NnGqm&e*E@hiXrdW-mIH*jeTj$YM$zk_@kV zAUxRKf=P$zoHrZ?SvahNsUt-ViUp7e$>8E08=`QJ?50}=6-bZO;moQ5-Y&ftUwv2y zz5q5SUI`QUyk&}W+eyOI%*9i#hRj0B#ZeG~H!Ch3(|?#)#JNfai+tqY;`>%9+J~)f z_8qtwV@7dNE;02&3r`@2>1XJ(`BkRASBGK$CMfx#0=L;F&Uz*z8^S9_1-> zqRLTf{kv2^_W63sTIT46Q$GLX);=(s)&B03exj|dQg`TJQ~P;M{AqL?tYlGSPFl&}8FpuYIoQL|SvZANy zJD~*2&8e&kQ%dox4AhZFHAbiK(%lrUejN`OOE<_PWYugNBTGPWw0b^@)tq^U#*&_%wAOdQ--vaf7D_O~|x^ z9m@e(MF7lsQr+KEs;3+HC}K)#v#;PnQ;2Ow2f$nLlmwvKkJLW&_@_!E3?`|j>rYbi z1PUf&Ev(A0;R9WPJE`Zuh08NBaMmfrcDFom3}*xuaHC58{Zt=t`aMk-C(6XI+8weu z3lB=}%z08hi#*P0l*_t|IB2_4@%t5V1XT}RPN|5qq_PLsJvd9Eat7y6zC3!cga1WPIa)$Eo3U~Ix z;A=`FC0B%4+D!_E;l|xsLG-GCBTVb)ds;1hR&%NVLtHP8Zh z|Ia!sI78&>sJs=&AGcam-{3}+#~cHJgYuA+ob*LEU@exFT6DIsPmo1mVYr|g9EkVu zLO~n{VO~2Jr(8xDUc_abI$J%lgb6_|)VYw+?`hh(l8eKF7&e59y{k;eBwPhY!05;! z`*o-~bHjP$WiCOtT@UOzyrU}#hw3)Gu54t1 zi3VmAJkf8L^1Fy54W{nn;M>SuTngyEUuNpH6*LTmS~=a*_t3y}RXdwv3tY)$OV*?4kI?Cxki{!xB=g-@ixZLz`DPWq6`Bv_LJ2 zDK-}rX#ypkIs}pt0(Nh*nRlz6If)(ZY0iS`*s}u)>)6 z1*Y}l^9Yt;IWU|snVI7T57!Ff7oevzhcQiqQJ7W2r{JN^pDcX?7k`C~0096|Ri=Gg z7L=Zd&%`*r;xO&b;V>we+Y76hJZBfw%Pr`yLm-4-kUsr`q@FByMcG`*K3vI-{1Csc zQ&KaUzd#*`m{G3r-L=ccooTKIc8$N-d;u^u?qaGS>+hWhT${2Yukc?-S;-gjI<(Iz zVn!_X?$i}|<=GVAS_ehNdI{y$(J8w+`&RUR*DB4(tI|rHl;V7~)HU^A5wj@!-x0GY z{QsFO{J%`*&Bx@AsRk>7=_E=TO_gPo=UkZw%%E;bT|jkKsIVwca>nScu~~B)8y&#S zpw6@EfG0}+m7s8X;%1WwCLpQ_z&2{$mY1|qAzB@O7@5NJKx3Zbq!P@rKK@dQ_T@*% z&A^1VXP+UMIYEvkgkXL@*1l7$6 z%EAxHm@TI~*UOxZP5pg%xj6;OZVEBi6sqXQJ=EVz4bT4&iaHx3;VgWGwlDAW&=>aQ z-1WGUuLT(6!P6r~b$~9}odcnm(NQ{{Q^d`xdl0jgloIM!8nHJ)g}Z!}if;0H_~_K9 zl<2nIDN+CHN-12)U^S@7QH!$9*-N-TUMfHHK;9U8V{=2AF7jbhbKrRXwd8qw6X^P_aD?PT7dzI zTL<4%r6#Y@h^M;qL*G#G&5euz6 z`{Sl>LoaxF-QAuNE%#_-%|k(mj)6WW^?0%@Zc*UdK232&1N8J7@615x&LS>;r!#W@ zA&+h_OgFp3beLWT_?Cl*^adr>2MYY_-e$EN-y@5E0+E0*j7mPr80K2KFSlK=H*%@}8ohR6Gcf$wU>>0i-j2?cV4C#4GHS^QW*uU=-M z%isv4s$x!8>_wdVti6oaaI;V71YO}n0Sp?%DWh+MGN*KzQi1`+M;8SRcsfW{zha`H z0^jg8$o+e!x>3!KbN!YF9ZT^aC~-#NL0^X*Ih3l;PnPrBSOacFKqZUlBa*+LLdkhU zb=A|7q#47>UaE>5xd8HivT#*9^!Y(16H6T2x0xEMA21MQ4}1{InYGwZoR2 zJtg%FjP>4vj4cz89_48PEA~T-WwsB`Gu>tH3906_7O9RsJ_lz_MqBuR?xqy%*#Lij zqFRFZqdWfzC|d#9`2v`&eWJL@X@x=# zfp25r{19FHUA;g>xsXwEo(WKAG!jq7?|SNY2h8)$iKNA81t z$#j!z$nPN#9Z_KF2N*>Gv5s<2?jlRQ28eEzZAyZR7phD(l#aBeNci#S5D9KU34{l% zKM=6_bt+Wbq^b`W&_MNopA$ay;gk*xYzstcCRN==Rw+0POzw>;K!**Z*w{g4DSor9 zE6G+2a=u}R4~eH&Kwi;x8A4<_uc7yO$tuM+1FVyqmEHY$iN#bx z?c|9eR-RuG5I2Q<(PS?GsnbCL0}ZO_N#WGwJg9~CX5 zL7E(Q)(#)x25W~|Vj5^}VjEL>aYAqqyf3&s;t;rF>64-sJXU}=NwsUxXjEt{zJxLA z0(zSu!l^bc0gs8hTe#RVk|}#iJ#i3CR88_*;L$MUPI%kF*?7^jDewP;R8G!@#|PKR zO(-yo2}b3!;wR`M%q)mHQ*STgk{w{U>qc^W1p}@nz%}h(3C=Itsm^TPd+Vw0p8{gj zmgRA=zC1j3>(mv!XHfCkK#=X(W&eq5ef^a6T(fo{y0)%={F&yi&MEzQx^vw?tSv`C z3bOT{N$Umvg==xx{ND-A|FSk9_&;j{{3g|)dH+#^Mg{m|W8l^(%5XMfvTNNG`WhPo zt-xoHz=-ODiN(?+&rzZ&rYKehc&lfW+N3BM4ooU|8tZ}B;PoxyQ)5W$BCdB+>31dBhZukmr7Qe#zQ11g+;$>i`<}xzi zeG07!papB4k{i8Dp$f`rzRb(`QzP=bq&&M-k4}(B#~BzW=#oq~<_bNOqJFyr=TKbq z0aS!9C=&}_!;8NTSXimlk|nD1gw<vr&;T>9!?mA+O%S94PxsRB9;Jo778-^F<6j zi8=Xh*?{hy#;D+oRl+4)+y@msk5FZvq|KGwgo?x?QsS6XmFzom9;p;#DdcdtQU*BI zqQezuthAfm-0QKCmT_dHFT zth)Xm(H9qJ0NNuBIAv6gj}zO=5$>NK`4dQc!`+=FH==L?LFQJdljtbC%9{YLqo7UF zqtJ72;+v^KP)fI?XQ&o8AiYMWaim@lhbeiPQW;#=2!lo3#7){u*%q}Y^?_if_&((|Y>@6F z1vKTMbvX9*Xn(a7U8^?I;A0z-K=3nJW$Mc?3&9EisQ+g&&uf%Y)*++mb*jH#$@2J( zI!{8&ynj{gC*PJ!{yeEG4VY;!)ZAn&Y#j_wd%B;6#`|TMadC_x)uAHQ**cnTQJmR& zUT};7E2FdFCR2)%zV?p-82oKK(tE=h$QHwu(lFlMcjN&xE~Y@erpFjPsh1gw3H=JR zx_`C<^NT!l0GQPg2F*o(0MmQ64e7fXaJ5WPTjY-__bkeM&#( zt^@v~*7e;jNBE|hD2OFDi_bTGwm78&5uVGAXzJIrc#9{H4M@jiIg@-cf(_gls1D&J`XlvFdD{c z}{m)>1r& zmJgGkfS#v^m9lfCyoD-8;V*4Aneq`lncyB;K?-yA9J-@ZxY-u7C@g|;Ahi#Ro~_kD z7~p4TctO3;qGM>P8S;QC-lz_o5L5j2USQ#G^nr%tD7?J?Lp-IW*nt+uHgYhdl^d^f z^HqnUtbdTWeOa{xmWC??auAy^48JHcl@CG#xeggof>UG_yg9_NApUAlZjP2*+|l8D z*>p}RiHCt{BOrKpbQEYoO#U9q4^7;m(o&$6I`w1^sT}T0%A*WHr#^!_rQHL>xD7)& zu?-`41y`4QP&^T1z~=AEUmaT zn7W*queaH zanb8C^{_e`ZdMB7qAC~v>|h!OCV3Dq8h!^adJ>3!@!&?5+ybXB?Li+uSmLtM>ShlN zExN&c%JE*w%VB2G>dXTDHFq$6ZkS63+s{ZM#$Ed`J*;|1wq&p~UlQ=%g{g4$Ko#1| zE{LraF5Wk}vbEBc`QJ2XnVR<{7h3KjO$E>`8C)a^Cx22Kp^LlV-F;D(>fR%|`cuWt z*P_s}m=lZIb-1BhDh$LG-%-mxS(-N}GB=nCmhr9ghPYX24Bm1mSWbbqc%9ecORA6) z*Qa9JZ_BKlD0VI0=ukT4r*&T3*P>!GfBph}_cG0S@9pYbyDW#PwX4TpYz4OhyS{6F zGIMGk3EN7cv(MMBSvQcsTt^W0LRVgFwrl3v)xC=cfzwt`CNj(Om%C;h+&>wA%hzXG zVOL-OUmjMhVTXH;{&EKiXxV-8$nszt6tmzHR4 zU>cL@>-K|u@#hMryvos*S6WeugfbQCerQUXS8y{=qfkyE{SYtdO(D3-H;{ORkBp%6 zahm@DtuSql<9X$p=S>}0Nf7VDIVN5yF3UJ6+(WXu1Ak@i1ecRnc`Z3ncBqD?CKJW& zDQTEf@pwG(E;3o?NTv8BxhqXhl4(s5EzqPeSt(x3r>9_|x!Wnp<3?4|Vk-zUVHwgH zREtJ29I!|^8xQX$d!1YPf7wt#N;&%@R4Ap*+>Bzn-#Kv@nC#XA_gfWY-sQ+PI^4C;>q3Nugs)6sv-pS`-JW7}(@#N!vgw&nR^J z(IB}Fj8R|ZjVPjOI;!cqSJr3~4=UlfiSe;PNXu)peYx?eVqXTUbv7~LUtnony%FV= zfe9Q<@w^5pQk7a%k)6jSajgpTe~wfR-oiY+nN~Ycty8jrlYWhfiD;pNf1U*8(HRG; z6-p50>B^6(P`3ti2A8spphDj_52#rb-l!+!z$=Oy!z-qoOC4ifPEXa%ql+Yz-g&Sc zXbEUJD_OPq8WUev^5_B`yBnVH`;A`l#&hEc#h!=ZxnHJq9jm~VFm|;% zw<>OUtHPWmm{z40V^%5kn$5v7sbIBv@7;;_{{B4Jjb*$_D5a5P^$Uu3Vq~}5T-XYcrTQ` zZs2W4${ZIn)~hLabQ3L~U{ZM}DwXgBT;=J`)lg5LkS{nxQT7!b*;g%lVjgA`H&T## ztzJtuQsy%Edi>$C=IIbY%!&qC^2-XWRgrERB)&|NOLGtEwVZ9#s0swS_E)fxmli1f zXl&pV6b`S#AfK;85XyEVOaFn7?a?sJzoC?~J*8T@wMy81gN3I3xO#sX=X`{L2|h=c zdi!akWA6dkZJyg=i6fO9leHOgqnV@>-@=F1{ezt`9@Q_UUdbX)#5t>|(-u&8K{|9H zuZ&$uKa+?Bl8!i{}gvmD?Ao2sY8}vYBKUL@As}|Eg5^k_Shn<@U5qiLek~&y* zgX3Wr!a10N&r8*KCwv!Naoi|gkH4dDfEpSmS*!87y~gi;8n2oxz*$64=T-A+uwV!Rib*b zg|Fr*&{?17JT~77XZ9@{zt}3xd~S8`qA$Q>t9vgV061LNHEs3yd0*fzbTc~YUEHyY z{&w*ooY~(?*Wm(iT<3GUVz{cb-9|G={{!UyOHl2<q-)sM{s`j|=K zu(3%pjg9f5X^`J(lW3&_#Vgr%yoX1x96m4QP5SdvXOK+<`77N4p5^QyLF^1^%a85oiUs*{(xp7#bVpWu zvSylBzLKJgU=ykGss+;KokpCsg7$SInQln5KhFiU?}5yy*y#;k==PGud9rkLl#nV0 z#fe@`8k%0o*{QO071aK(aHnJ~Q5UVHC5;uVX{ro26*)c-vAr0LmADNisUs>rFc1>FCFcBX7AkBG!2H2IfgPa z$v$MD&u?S8Js?XC)ywY78_gz#p8gEW{Ux=8JEI3ErA-^))WavGN_x1GJCiZIVDXv4 zMyCFLVASTBFe@_%jwv+wDi=EKQE?uj+JWk*loG){021%`DW3kMq%LEPCcI+PjUPv^ zecc57d|hcY10wwda7243Q~+5bEEL6s&<}e`qv8l;)?7_{pHV;yyrS(THN1EmeS5x7 z$0Dw+hI==`$|ES^blfAkT6H5@m77ucb9j+!3kr=mIodEOmF#G+g4@hmyh(NbZixG3 z$#$W_6!{L@jX#5b`%kFO?@5fEMtc#t$y;hMH+sQ=Y9&4&qxU&wAAYC9u1Z|nXw{P6 z$}kabf?43Kl`0ubYeF%__!5mdKT&iTw(nAnG9@2|>$+{b$!{!sFLDZ7t4442n6jTz}kjf-c>G={Z(gdtQ)I*wjoABNJOmdbAHdR%I z%TV)0v4K_#JYN`{W}n~#c2LTG8kzAOYQ@_hfWC@SdT-IzVf~D)FUU~e=`&EV!F160 zBmv9RPdnI5S55J2DoEeXsvtix2}Xg!zBr>Agp$}FCSwa(@fV~j)CHz5dWU9z{4KK* zo}{?xTO!|)tDZeBSCYqNH~kD=HfAS!^F>`BE!WbjV$V>-MG`*rOw6OVg_|t~o*xEW z0TJtGwYpgaWAbN*i>o$Pm8?ED3Y{1<%V8FYIDU20Lnpa1e6ETjK5R|KVGkCsov!^_pH!;VWOa~ z%5fKk+}{J*+=@{P_t&%^&>ELlAXZrukKSKn>LktevT{*uE%3b9L2XlTo6d@Tu$q?` zoeFQm>)n0QW3;4ti^gU;azk>xNfdztt%@xzn1UO54J}SX68jY4e^YrJK71csc4UnU zNoCqwc)CYcJCI;{}i6j(KQU^lSbL^M0a_eAghOlaPdPp3TdLdXs|@0L#g#My{w3X z3IxhboTxyYmvUiPmbiqO&nh<(g)R3!(z+IQa@bi z^^%Rt0PrPdU?kU5D3&6m9ZZ8(z@LloIPsWM;I{R(xWU2b$3IrN*jQoZ$@%3E9<4wvnIJT=;&8(X*--EN6z2*#l99|PHqmh-ei z#h-}hjc#|ve4A-MwR(cUi>js@TqM!lgcL>9t{@mCI~iF1AqVsHYOY;~C;pi<#5jDz zp(B|Vv^9d@541(m(WD_Z=vudp7K3uwC!s6OXlL5>8d_SwQ1J{}1~tpnEBKI6c$axl zp3_>G_7W|KnMVZxK^u2|qOvdQ@?&x>7%h`9IF}8pC&vhFsM!(RH^QE4h~C!xI=EmL zOK$igI&t4BC(%#l8jT>-$|P%)h|@kRcLgCjeLFe*+__%s8wPK0{sJ@SHG-;kkQRa3 z%xV3T=e-}!cy2|0)VG@c>B7;X_d^1;`5?P#1ZrXD+SQZ6WHbBv=?E}3na;%J7vq#Z zBDD6c;I+krN;7GTk1xWR1aTCo+m-C*wIKZ8HEUt;|MOZD{72+1io^M^H!;zX5AN>H z2)=9{e;_%vFyfP3h>i|-nK0<8_;ALB<8xEMjJ!Y(922qZl}E)MLa zv7L>H@Ihz*mipUq!fd$=@ahSg%?qiZeLdxtPEg9YT_fj5Gitb)3Li`ov_dJ(5Y^uXXvVAO0GuL}3ubbXu!t7y5+wN#Np5h5=> zR;eVLHMcnp6B!`rY=dsV6v8pgp(o{rSnuJ+G}T@Ub=*hcHjr#3+6el2jxw4#{eYH3 zU}$GE-Ki3N$LZ-ZtUuF#r3~Er1Skogsa0yH%QB$6SOwh`oN07=-VWvjy6Zne>m&n7 z+sr#XbTS-&H(_||;}j^l&3Sm%5_iDm{M5pH+|FvzY}o-Sr<*C`{HOtZcF}|m@8S7U z4&zuws21Lm`hzIkyBVME;{`BgjFhbYg6>9}(JSdFgFC;MwVklU?G%m~VK8xlPUKAu zMe#h0pno;jVPcZ>A{~M@0XFbCT{~83B-uqeti^B&vRBr~MPHDm+Q4`a9{3 zE%9!FJNKFfEZ;~OM4I6$jp3ne!lNgE1$pXzFs@@fXFl~znZ{G&3r+m&+A35A(P zKndD{BGRl%7U?-`(54ja`avzb4}c+fHmp;OKfePgn6}XFTyopLZuE*PgjP!;vbhSF$dUil*Lr~nE0y&8;|#Zp!rTKdl#}H@xK0nk8mZ2zmlnn^KoYb(l>_oYv0aV=*nPHoSBv1WuwkyPc^U zimvx&Yf;=V4A)$vX-8NC$7cZ>p-l};pIwmQHlV>5 z(Zhaqcf-#F91XD8PPN0^qB*O8qIa8YYyy`|2bOd6#Z_>2yW5zS3~{}+RI+zZ2NPix zhNVIc1#u=w>=uh@n=7t=+3=$HQbBV5q-&07_)W|`v2|F^<>hFQrR6e`&3#mkj`IV?OrfIdz(!i0Ij}% z{X>DVt|_Z&`?Hzr`o}FE6auY!7t#XF6%+%T*|&nCWdle?oHm|tZAyP0P@9$?bpgT_ zebK&^;%XFZ!*%q}zoTWo_dj>14YL2Ti1YuzmzOUX0O6S3UeY%JRbv9$w$<;;cdQ-w zE71=99Kd#_=ZidH$*Z(27_{rZ%G1Ja6#CkhNB(J-vp&|H(n?UAT?G?;h0t&#C#~1* z(@vylh8=Zi+v2OBKQXzq{8tSQSVC(K^NOv!1iFlqJAL?-Km@2mX=rmBtHki${Ebx7 zglosro9FHunxHk@gwP_Oc*1sOY~(=Z?i8RyO=IGaYU3twymr)=Cuyny88Wi{8_9=< z8Bf+p4L_sL2;-GN5)W-)HGsM+S_wJN(WOX#IiPbvo+ci&0$ ztCI7aG|EdS@E$)1=$&GUEhPOgsaI>ALAXk_mqIlKBV>weFOVpTWD(xs?mJj+mo~v} z(E^&y8hY5n_XeA27dX;jchC+D6ze;V>dWt-Rf;=-5XUL5f2>K7Jw7(e?ocY>N_GH+ z+5+9iz#e15=C9My39kPpZj})vWi5)5>? zO*pv83i@RPLg*aW)r}fX^Yf6_0#HX-m$hf4Ml-yUmO}Q>vP=wo`aQ{QJ|q)$KutEK za>P_j)rW{)D2N32u$oQe#_guFl^=E{vQyNU`ENgna` z75^$g;c2v$wMy(-_MVe^{0ROCy5)BSlhZH@z)`0gx$g@s*R%`th98kNi+za=jR{Iu z5?B@T__A24F@2iqMvuTJzd(}24#3Crv^PItfp>wPo9n6%)3=HCjUe|Uc|@*c6SYQ^ z62y2?z9g4fU4FD(wXf7%?^oz~|4mIh#%%a;gSK*Hk~5y`6X13KincOas{`QAhrlwG z0yG25+~5jm+63Cyb0Z*t1}iv4uF%33^jcBSPL|uL4p*h)g6N~t!8%T8!cX096~yVZ zCH=3IYPd6x6N}BXl5!`e<}grDpyPWdK~=+d(|#w{AkWSg%}qnfgohG=UMsDmCAI=t`J)$s$u)2`&WF#y-CHBKoC= z3A_>j0@ABE27^5Kk+Kd@O3Upq%s&p_SZYjF>3=FK3+vSVIX7F2OT-pfULHG+5RQZxz2B57r` z$4E@k+{tCh*`!hAi1bcG6E$jqyLg7-ChIA}-VQnFg^PJZi|f7LQgr=mw07;4O2dm@ zDO%!WAkV!HQ*T$gyy*URdxR`Bd`6;Kq7JO4)5I-+SB>mA>7S2 zYGlsD^X$iPFvbRW!Ec|3Stu6fFU)eU0WD99tqN#RAAZ#k|>mlZVDHc;^OLw z=*=hFY+n+^4i3kb>laD`ZV&9T*n%rY0X4(4wIJSUp;Di_6zwfAVNk-n`%@8L4jd?4j1LK(r%h<%X#Rr2zt?N-%JYj8AdBYHaylx7s;9K0MNIfb5g?Ct$bc zG=@ZHv=3`H(aKO@ST)!UWmM!hGyza~%_f@sH>i654G#87!Cou#ebPW$d952*Dw~!s z6qrUa9(bfMY=t_U)kcCpSk*&y2$s(ZdPCWBd|ct>|a`~AlEC>6Yt8(d4-N7_J?;!w1x z91XzTm9$l=ekJH{IBmgj9GQ+gmAvnR1%!HVQk-cpg(KufK$&1Sd51P8i1kVW-#mz( z1NiupX;Wq(oBtg2E9rV-xM(x4qW6u-nq5-dm_ksd^Iw0Xu zTJw1^%==s14PjO2)9YzjsYI7ZH8j|hz_))^2cG7sFz91oi>}kqwL?t(JcBM8u*G?{ zQPXh7Xo?<$w8A?MPD;kT%bFu4eNXNCf8`L3pY zwLqKTRnX@NSyt~tNgpLQBA@Kdt#CNwdzDI1QEE{jIj`t$K;xG@E>mhG`WR|(-!YkfnO;MKGm*RgLcB3<$+U|kp@Gad7PHo_hi-frRhC`DtNs>h$W1r;{7gvZIK3lYR<989IWP*AoK$+SyA7{pwBUHTG7D~tf~5bp(FlL!J#1Z-z?^wDpx5M z?tMZfYsbRg%rqfKpxc}9cK+2g7ao>r(olnV0($y380XXF8U)rsUkpdjodx9&EOVQX z;XIuQMo=edT`?MLuzE~WJ9xjKjl(xKdokP;Do!udE1?JAz5zF$(jqy>D%wdVPWUy| zlZ`acFOM0NH*>wKiEp0AS1;6=_Ke+#^O)6HMjoHh*GsNP_i*4|A8ui;oCs5Rl35Vf zXjIjLai+xH4b=aXBpO$QL2bu9Hqx@VveI6OF2)^36E(j`I_OYUn~P^eAw^%W;dy#X zha^XjgOTo~)YbKx{w^rmzeK0h5fh)5nQDuU`1*p4XXk4sa7{VfiU(Xz{YY-W^#%As zk3t4L*w}9f$(gKG!jWmsny%vcpvmeL_PNV8oID-*1|^(`+SMqStyRRlKx`RrK>3JBGy6{{|VcE zs$9hUVuGn$x!1vMAESM2;hi{}c*TQ{kT<08MTz<=-f)>(plHBGVvG%J(F8`~r6N3E zK=DIzC4}ee9j)ZSE0ExGI@hMEZhpoE5*N33P>NGrJ9rqJN2VQ!Pix+dp?V8?ZZm~Y z95x6V@oY#hrdhnkqV3fbS`=b6k|4v=#!)5c{Yn{6#TyR8SaG$YBuNn8XF9i9;(DOv-$gk*jBYr)Ms+k81~)#KG37EHZ3mILAvwf^ zD902a7$JkJD9L^ht>zi#CP!LD@l%V5W|dOx@Cp7*`w2V+-DtTm6PN35a-Llf!!ZhX zb};R40;l|36vf&$CQcbi@d0kOcQ^ntRpnHOsots3B}LbxTtNgqy^@l-MR9N$V%E>J zxkg9wbITqA$=%GE?NxWLy)5po6Qju2v=zMzzX-Oj%lU|Ci3qH(fAaj@*~CGmfAbhF z_bNR|KWc@ zuQ>V7$XAxq+RK#6#BvH6C=a6_g`5VmN~Yx%U?3~6pX!WhRV7gD07@zLGpfR%g#d(H zW4vr?UT0}GLfWI2&|n*bTTR(QT0fSTJY_!B1yN{hprXfvZ=G)BaM(A}uC&<>+JIvs zuZHy8tGG#}H)9hhB?@(Q2F>lvSs>M$v{nE>q5;{0e}%v5(J(lSO{wAg+hAQ+0{k+d zWgZ!-#}jxSEu#j? z`{l)*qy2%{ApM9T9zHg~N9e~WoqoDbvf&(t^g>iP<8S1Y;-h^?(19@|sgf+^jTA?w zn-Nomw<}C*fj{X>IsDaQUmBf7SMwTLoHhktx|2BsNU#<8NmSY9&6e{(z`+!oC9aGP zR;v*O{f<{m;gfD%ng6bpaUm6Ty6H~Jy^NGKNv)+jRPyMq%pp)kOCxJijExYF=7Qd) z)UutbL$R;b>Oh9(I5Z8Xl4{qds3JaB+pIOR;B?u9b%V##rm#wIhf>2wQ>Ajg>N38H zQ)Q3~le{~{8Xl)qa-a^@!3IR+9bcnW@UNRtkzGbHsDoAgIFpVCLQwVW7$DqpwDs3U zj&?FfcpwE+#nrSO|4xe{FX(FM*3D!ncTgy8okV86mTWFK!{t2944|&2S8Ia2)0pB^ zi7)BXp*F}T%M+6$WbLA2BW?CW7(7C;bce1!&_f zwAbb&twC*gBeE)Y!iGkV89iw;B>xy^IFC>nHLiO^hS-UGZ;`vqDx?$NGZ2fZXP%^u zq>GZTtCqL~*ame}P9K$V>ldJVSJOiH)oMAu+r&wEZDtDhRUFvDG;7SCIebjU zjj1W^l=C#SItDGtWv!Mt$H>D}+GlDbN=r)aZ#h$#{tlk-JhbOsRK2zl{q9M|4J|hK zuSAPCcBvK2d^g^u=D05o(6}~27N0|VN7rifv#kYnwTh=cVHaqb1$S;%D=-*twhOO% zEzC`eLQ9d{s6p$VNxKBTuAPV}b+@^D-IZ%6OZBCb0&z zlns-$lG7D6hspL%M)6H?Nrh>jijJt&%oA}ZIGB$|D@+|rzXowNnOrx()E86p;Hev( z4r^XFB*&L<*}$P!r~LMK|B0wCJ=^hV^3r<$`L3ykp!RlB7`Raix_gK_a> z6`$B!z$xTt1(bWd2KWd6(Di!pjh-YqP2K2cLIamA*e$i^RB^#nssdOCZLqY!w3BOH zkvBr7^xtQKJDar%>^Q}*(EFU?eG}lIJeva%~4gOfWk|(!l zM~QNBm(vj%B^zjyE0Q}+radqeCmjsV@jn&JsoPZ;vHfFn^4uF-2d|gFW9jyqimG{V z(UZw7g}maKBHiUAknq$)EN3-_2Gs0)lJ5gC$1W1Pi@otgDC$O-wnGJcPW!B`gow?S zyQsyc4!xR}v|R#c>{Ts@m*{sXFV!pYBrf(FTy!HKz4&UGY3~(q#?NT61?AYQFc_E7 zFP5Z`ona#|11(GYTgeT3a8357++WeE;X7Oq-@Oi8i&il$YsD4&;EGZmp5r^*5+Ae_ z#ibH`30J-lZb-*R((0?fTY4|A#jmzHHAU=YFz13H+IQMJ!uMP3vt_Y*6c;P-QeSr) zVSE;z@SsUmfND9|3u6!ubEtQNGv$Hk>IYCbXX2#w5a?kJtlz%6_(usi{y0uJV<-&! zkhJJ*PH{PB%h8XgR*bYcjiiH{ z;dKGzz2~7hwubr4=lA1SncKdvDjMY|U_NOEQ2RGy3K)qf+efRu9Df8W*T1MemcW+q}Q=5K&zzQ??h{hmsUoZFY`ZQq)Ltk zuMd!X?JyA>u;Z=vnSo8@K52AXa?-mqAywn!FWmZMAH}!sVaJkXza+ENko9{Se9P`~ zxyb8rciYJ(bnRv1?yrkFdaJ$>jY!hJ0%5LITd?M3DJ@Tua}yB{6(7$V;xZA|V$z_x zdUb?qn%7M1bYlZSdHtZ-^-3H~MNvnjNd+FUSY3R~jfCmRz)Jl({%l4YIJ@DaUfmUc zQgg+_y*0R>7vIi!->AP9ao@BDahth7$f{gc*D4(;Ty$}TjV^y*%+rE!T`HO5z)aF} z=+Frot?}5AK8WHa9k~wA7?-hu0diNfj>znc+F-r?p>UFM%XwCO36@~WrNW$ZiZv`n z3EqY)u_Xux^;EHQ)vFv`DUXz?CCQ1)!tp+2r1(kX36tOBd|KpM|LTBl0@Qe%i|k_$ zjiS_~LRfgFA$NYyBYFzK(AU&rVL2L0y!oMr&T;qgd1zE7C1}_25=ZAuojzWZPHEGI zufuCMCc#CruSES~)H9Fl=d7$uPfTt>KH=WvByO^0=9S~@v8VRAc-dmrkOfx6fF%2D zWM^~cWwYP<-7%=wiL)wX`7$lHi}>K|7hI1Q=Yxa;^XxS`%^$><>M=kA*(E-7xR0M* z+IPF-K;`#I+ZN^9= z*jIV82<4jP-LG@h9wyK(5JG%)hH*Sq)X|C~R6-UGw#Rc=8Y>u=)h8^>$Paj%TUByM zS@??`$gjI{COwIk??dJnf<^=mT$|LW{DL$IC zaMGre+a)pfLl7RxG*xIuur4fIpzjnr1AG5imBp-EfNz}1eZFy631$0e&`>J#+S#k- zC6=^Dbq8nk?`C-0;>YBVJYhT~m))w(M6j=<%nP@>NOU${=B&ar_0!K(ug=|IqZ1;R zpwVUca=Z`rnVlq?a1r=5*lPZbjFqhF(H_>8!|Z*%-mG<7PG(3EhTru*26UPK?*lZ(;V4P9Af%A#Z&hJ zl@uRMJq@XMLUT8{sUQvLh~EvoMH~h9OEkiou+QEMmibRCaAbx(d8R8;Whdzg=K2k+ zWJ@398Q%0$cvHqJ$%L;;?juy7!|#ArXuAPxn36O{isqU6!p!Jf|t_Qty10?99G8 zcM0*p(zRuJtvrUdue|wKPg~WE5b4I;Z>sfqdO@z4|3GP5$ipI$CsV&#GSEBPJY-^L z9$tpee8MHAV8@>lDASosjb8d1KYxf;2?uO&xW}P}v|9;3_&sHVg>O+ra>`VSq4;xm z?GpVb9alqCGFL-fYNd%_)!#&lIXh>03W~VpwGfC#PsP z1KjO(#4JH1WN!>`LNZLw>uM&_2t@NL_=B70(myT8cp|Rq;6KQjY^0I#6KGg~+E~EEyJkKo*3~6JkiN}Bmve_D z=iz~^ubQiuEFtueiU)FbOo#X*9*NioN9%ZgmVR`Sc`xK9}UVh zN)z{WHYFj%IjwvaqE=mq<2av-Jdnu-{%9?!=NnvxY|?re!?%9Z%iKx|e&Oy~(pcU| z2C^xNVBHZR*|Ek64_MC^mLpp4g5A{?q-C599JPzKY!nqwPu5|I%{R)_9q8+-$sGzo zMxCckKhH*$*QcH2u;2_?gKLwg&6GUXHtruX8Y$IzC#&9jEVkWL8PeNeyT)^eJe^Bh zELm9DK7NHW{j*PM?`P75!p86ct&|D%3K?kHh^}!kWnr4eg8A|3NjztTxz4(1B7BwU z*jX3B)32YL8dd)NG>?Vha= z7bI21R3WOena#4*vu8eb**C5^<^OpssDHXQQ4kU0VN_@+@+$n3#N_(PJ zZj6@L%@=3snFj~rc=`*Tn$)jT*>xU;WEPiWF39tZmyVBEe9?@f)dPhQ_jV5_*@z5$ zO}WFa%8N8GGmo`dG4TAHj<2YFx?b=77lnjQPY(Xb+s&qh#V%_|Y2_hjf>3yvl=Q`} zq1pM)h5WkMhwqmuXAXksw4t5@q80RuWF`ANg%Ga;jQ&3F^nAkBH23<-HKd&c;&FMv zK#2dNr~PnQt2t*+%4hA8WK33hDXYHT3LpxeD}hcIn&rLwz0B|0pQMH4feS5Ct8=3e ziIoTT*->Z>MDw`rVUg6fS$-R&XDwY1QCZg zPL&)9lw4Gj$~EnUwW+S?nlrFB!U<=pV}d`q2t_2^Nx zI+>OR4{!QjoU0vRy6o(gb`c#AY8=D>H3)PcmZ7q7@$cJ7Y{uc|1@(pqxVqb)*%*q> zu@lCq=Hq_=go8hY2Lf%p{V?9J%{beij9bIKZM`Ajjo!_Go>6Uw-l>7X5P?v=Ll1f$ zQNsa&3V{a3uy-d{cRO2Geo+DV=7VPb!_eutE{$e+^0v1*v_T+nmX|?J4!2Pb?fTms zW}?2oQtoIM_5hS%MKP0yh`vtWG!!cIU!;CizG^fY; z#YR{`LJa&fCoj(&TTsdc8E|Se9U_XUk{x7Wja^1Cg|J0W@(sIO7^1UG3W(=yB1}?H zGAF^cIJ25Sp%N~N7k0nsmTiXc$+cxKALuCt*;iA~5sM#j`w_3CVuwEIejc?d8|xq? zUDfg9xkyw$-_;lCZ91(EUhe}>h6)pXQ;>hkt}MXyuG4jXu2}N&!dJxg^`By~a$7iX z{K;Xc-1-r2YCtfc+AjftgUf`s8SUFG5P<0ZjeBy?ssieABZrNR2tatd?1~1hE4lAo zZ7&CV>6!tp>*)XLdb84CVqkmHA3-zl_V)$^f%F69*)g$yD81bTJDLe;ARRHPmU}TJ zgk~xYQ&T?|=DnDeL^F$zi{S`=HoH~cey8tdaYO05N}a7{22|B3d=N-+%Q^^De$f{T z>l-B&n4nPA?{^$K&mSUN9H@-a*tZ82_7-5$XojoyW_Xtlkbd*pM;6EyeV{3A*+)5m zmvB!5|JVMe3j{HA!S-2HW{U*nf{DG6czfBx;I8gYo5{Z8Jo_WrQ3S=Gu8vXwI-r`t z76D2NOQJn#u|@Fxf?r5fL{LaXOk6}zTtsvuI5FS?aJF7jTU;n*kbC3WNZ3Ef@?!x~ zz@c733jnYIO?k^aN*34sdy-|-JRIWW27!6Q1)LzT-z)J0rEe}TIS{~52Pz_ZwBiD} zj8QrMxmbN)ynaWq78k>mgutQ?M-U(p0~&haTET|I+Flq~bUQ#?G39U_w7V$;Fo^%c zQFH(cMXc?GcE|Ck=3ylmU=aO!`_wZ45z8;BQ;ijoJ+LsvICUgX+;go2Agllg^q^%rgpDi! zguNpK({O37uriYb>ctfEb_yL}Ww3YWY*4^4rC?3(TTTLWfCIM%nrJ}_;>3o+E?_G} zfSC1$dSD7fVp=#Z0!X5vxgCg}`~L}qad8m|CFwx|MfTv4?La;1|34rPcZ^+j@Vb6xd*GnE%)#9rQXh zOIV!B!iHkQAsC0n@-H7Td z?Emf>+<#+R7@Io%e{k`?V=(`pnE!H>mXVHyj`ja168nEbGW@?nQUAFI`CljT!Vvr& z2L=FG{fi~l{|x-&+i1@?nT7 zG^Uq@7+&yc#>|^u0pu-|wgw1`hh_;i;1%6ZitiQOkvVyE^t63o?KE_KdOBn3NY?Xv zCzk~nS?VRWRj&I~L(>vZZ=3Lg5w2CE8&mj^dH+I9cwor%&2O{)^CpIY&X(2GRx_{h zNGcFGy;0gjutlOe_dDUIQ#@_#R*;|GSIyiT5KFA`vq>vz_BfuR$9m>-wXyn|HlT6n zob?*JaTonM%vw`W#lVX@m0kh8IA83%+8&NpA=x=A>ZJmKunroVK0XwVax!@FCZc{2 zIOpjShw&jMmV~fL4gC5mcLS8Tni%9EXxh-q47S(!h_Y~ro$kGER}p99V6Od9o?-)n z1fwfWUe6`?AdAs(9$+w8Z~j%snT89qNOj`YgOC%t!p)sy5IeE844MpO%(GX_aS1W1 z;&?nhc$aG1;B$<*fpPp|Vn3an4E@FPImh3B@-=cM4ERcv6fpr6h)fmkp@|3ciKo$z zXC&(TM`nB}0dl*#l99K%WhKMIN!$Lvi@JgeAGN=OHGT!P<7AaL?OW!;vx{*h{8CPB z1ZDFy-I{S~L=P!v5kX;V*W*l65!{J&ZRVVl`K28zNo!jj~lW`F*h>TXyJB#l~%Ej;z@xoo&E{{Y$H7mYyDgpQVI9NHP?SnV)1^Dka zfl)AcfK%Bs(xB}s>9rZAvEiIeTa0to1-w$GV5)?nhs9Aq+=f^$%oBbY;S{Xeubc0o zXeDANGI}UXQky0tHX5oY{fHou*$gF*xn(q+4l+EIUTU@rTf3B zb1HwDJbOLg#EEH)ThvaRn8GX-tG+5!x(I24`m%G5hQ(blImXb|xa(S@de911i!l#2 zGxrQaCzS zp~S!*{5Yt8ul%FU@^>#Bgh*wC4-WRtsq1V=t#i?d3#&Wm2tpxASm+{Mc~(7ms!W~7 zmoAisd4zl6nM!fvd~CfXunIXMt1ZoPIQlKbk66gR&q`xo7uQEJDGj)ZUb;)d)6G!{ z+}t`EU*3HD`nHt_(h*2DAiw@YS%8bK;e?Ptw_teg<>n_ISF z;f$aNB@5J|>Kw^tGe7<=Q zHm{miD$bWVZz6b~LW(STx%1rj2E_4kAi8F_PhNu%GF@H=#BrSm;UT8E1rkZ#s+-bj zaz;otG&*-I@cBOeOpd%ZoPBY=-ivnMDyzF1G&*^dT+ziRG&uDhy z`gXr8AH>HB@LWa7ve$f0xIXB4K9`oI!Q(%TT;0@tPTWSguE|oT&1&3^l)k9b;^)3B zmS(wX@^~+&?VvCov3zQ@cyt%zJ5L8_gN3vlR(wI=Ivu#ha=HJY#B%!T{kR%Oc5iH& z$^5v*a-LA&;5zmzlH)`V6~%JC&-+>wVUclg82-5nJGAd&{Bl_Ld&jXzt2e!35I<-t zxi>(bO&vwA*-+B2*SKFAN#ssPbs{ltSVpHy4NWwPP_YGVDzSu>V$sHmWa6TYJ&{;J zH6Ov36m}3QhV{EW;9NW!&h+5JmRRImO!gSddnG6P3OVyLS~=CKpzl$`8rSZU@Xn^$ z@*9nL;8fCAW9VyFqk1_2e&^_*&!b&wa$x#tBH?T|c^A)c0-I}1xs;Q-$FINMBc6y8 zXpBpHGm^9%9-BIBGHN9WMLRTEn;Vln@5O+iAHKn`lGj z2OgO_nXt9HCvM5KnR`!!x)+G;*h6QiV6Ucl7k9VT`H&v5DLy>5scb{r6BT z@yUO>%~%PtgWDFNt?z3}}KT(62Ppl3Uy*~08&REKNhC{sRHq452Vm945d%BnI zZg;BPW>V)vT#1xV+Bkh@cS4DGYz|*?(Eww&T4nJq`c&RB-%tIdOJcfLY*BbMZMWh> zalC$*onn!f>7!xzW^`g8Df&mlNKQEQPtp<8FQm5;DmNBUeWTX_<4hLBQ34z~X7eH? zxYmWwJyGRGF<5&vE0Vc-DTiblKiFK(xVHh1>dS57i!OitRBD%MLcIXiKr7NO;fO4a zz*q9-?>XiQeKY)nTa0cMK2cis1E3miPJx$0W77%WREN*sCaFrYbSiFgTC3F)mb_ci z`nETI*9=^rd7u+!v2#k^X`F<&QRN zTCM&90^zmF;yz1ao>0jhi4SXmyry%#9tnRJ|D$#ir&2#hBWia#(;Q+k)oH!fAygxF zcw`nyYkX65%g=aX4G|G&HTrUj%-=Pf!wdU0LdV~$lyV7!p&y9pgN^+zwIY9g8ULEV z<%ZWrwZLoYT6pJDfJ$~t-s0DQSEVi) zTY}Wf`DXsH`TFBCePwrb{UT|ptKY`lsGD%+CsZ0#pZc@VcxN%$T@q~@J62H@Zf&0E zItXR3Au)&EGePSc{(zcH9m^;vPhsH*yn(H`!Xdc zK9#d!FPjOznyk8!#;<&`FWCgiVm+P2OUh_{P&)1{BlS#Z72N{KXReEhsYXJ{&vvA*3>r!5dj8GV{Z1NlEmHj&WY+i zOmE7Ft}Zl-0MWGVjp0Q}-CEv!FOE4opO#_!AV*^E`P;@Q!S-m8g=?HFRkn>b9MhU% zJHP`i>l-TB))dK?$_~9;3MO52%x&Q@T36;L0^LyF)U0q3)tOm}Z2i{(pLU~3Ql5yT zVc|3#3DAvsnZc|^5O>_!XaTso%JhY>kR%tqt9np2YtyYzl-=&s)kC^(*giV`AuUOP zVz$qi6uJehG4!S9$8U5Y0Y_g0)xb(Z#Fw+`S)Z~ zWPW{PZ=&EyKkCalRvB}BPsYP0=~|}v1O$ZNH@4x16!5u#~!ubsQR3}zSirK@FcPzpbt zqWf&b|JLZp+24I}VnBI!SiVIsIs6${`gFf$`0Q5&zXc(-!`t8yWP&6Jtr_0j{}Bs! zc+5jOFe8=z0V9O=Ux&D4Lu~dx2JD#$pU?RLy54XmQY~XV zItClW@{y6+knqz!(y$NqBaMYDW+YL4LI`;e#wMj6_1Su^qU^xOz5t}ZxG9Jqctz}5 z5xV7(^etZ4-ZmDq(x(Vd|~G65{l*l7rQlN2QYzd4X4*NrJ`JgdHg!^)-+kYCVWt zPwdw!2|816OsMs#^TJGl>sdsvx!R z0=1!^&$3Du#E^U*QO6^WA($)-wA`Y#v-5PmEu|`r?|p)?KE7M83dP{gUS4eI`!5+8 z8DYZx1xP)k6?g7Jta&{~O*jRimD3Lg9|Et)l~2#>t4N?wuG=Df_atiKsZXIi}w=P;9J%`YMIy_fUOSn5W!d_oXG|+ zbRQ}rV)jYM7BG$fIeFPc?H5)R*~il1&yI99nJCtJPlo0d!4f z9vK3J=6A)U`JT_g#I!US$b^h@c-BhG`uhIiI<+Fiv% zZDY%N!^$)M3g3ok)WMlbUnq)wHih9R+J^6W#dn03eFoc}(Q0MWY%0zdiuZ+4h4||d zCLYy|4ylKGHvs)j!1#O40Dpppg~%eE;gBHKpW@Y}7VUOh?F|N1{*cis^e}28l;Tc@ z!2L39;WuBCJ26*OvkM;Fwcaw&{@dhf{jfq^suf531=>;*{hA0H^z2Ivb*mW`FZBoQ ztx1D0pt4LogB$eX4%UmVl!AO#^k~jG2H)_%?{InyoZX;^%{uq@#|p4TXc6qkks(0( zmAqnfYo%G9*J#mq`o9I2@t@S$QR%M|R*HLy#R|s5pg#gkY zhNs9g^EiX|yp^fhD^l9Q2Xod&?$)*jJS&zjXgF%{bR~JA9RJiFp{M(i_2i1O_5MI= z6}8Q|N2SX;cAd-qG@#2HDvt%cN_?Ei2!HE?3j zQ-07HK5_I$ph3yqT^#s9KwJ;28Tq#eQ-R41Aofo=bVyI0CfsFg0YNuN+N5`o*$QWh z`k@ssVqR!uBp)Gc5UsQQgu9kq=!9)0{%T62GFuF@;Wi-UwV*WBCmbv25Z24MgLVY` z7|W*U!NzO_6^I9CX-dxV^OgjHy(@S=?1*RAv>O`gub?N=tGn`C(m+le)LqmD8(ZMPio=e2u!IBI+M@-76Cv8GI9{6_rH5eM z{#csehh5x9HLceGF%z!zGl`r~p|iOz1Ze?M5+ks=i2T6()L ze0Bc`eSlX!O`Ic$=GQ%&zN40&KZy7ASvCMJ9x_H-+t$PHOd%^EZ>3VMQ_j zGBfHh{$a^tUK%8T1BMl39u&2A@?=(53N9-p4%}!)9J~1kr>%I``dvXN5e`ZCH*!+O zb`hi!E|JD%e3=*~eV6aOb~XkE)rorRi&7707cD2Awv}Spg*EZTF3sD-l}p5pWUFf! zx?cdyjlYNe`|u#?1J2<{W_u*Q4cr!M)?hTgrmrLY>xqPD^=6$EaEs8H#IsaRU*y_7 z92Nkwb1Z}z|(*2rboxi=?f zm;A*0YXirH-sOxpY#Sl1+}T|cv$*6ls5pUPd7b4Q=lU%r!Fozjdz1RmI+idOA>%IS z(xuiaHX1)qLDroc7vYQj2#`3=mL+Cp1gA7jj;76IOE02;`0|sf4YXf=7PtTSjVpYJ z5W&?l%+UFKB9v~+dfKZg>$Oac>Vq=w=T|6G)9l)tFBR;|LS(IN$LWZ1uzTe7O|99k z0^_xCV^nWJ>?(gStZY}nLGP&@Qh%VRCW9O@|T9p{dItD z>TK+zDd73{xY7QFc8B|`^F?e#)vv?u8597}TnpPyvtxxlBVSMV6}%3Ycj40|1o0Fj za=OFAEA2XcL`}vI`?J*-`6>IadHV3570z`(E=0EFPXVoFAGan#cyno!VmY}Q+}f1T zLhsgPZa$D+%h6W|$KSCknaO}*52QWA_0TW?-;oIBGyy7kaquQ!*ZcE^?I22nHmuYe zlZzYI?07(;e1p_KTe`S_MN_(WB%Z(^bcrDRJb1*Qf%QQ0KA2-nHlNo(qa<#!d+}Xi z6+g)esve=4xC}cu4S-=w&7P#Ddsk%W2rNh>tUwxsO69xgy9wE0n;b}0l z2`}mlPCir1{D@edw8QveM3O7n|KV!M5GNND%_!|3YuTGq(*HD{pc*Ggy5u~6ZGHlN z6hP-R;!m9H8L5fjR5uhOVPw6cTrpINsyO)1Nm~Mn0@Cv)Z z1iyAdC{rMBkvxzY$1=v!c$FM$w$c7v7?#Org;84Zl!`^Dp`lzJ1lugj4SRs z?X*}M8>a}b!HO?0?v)6JA?qyX+`ud{Pk()~ZUn<(DpTAjV9!~Hgc2)Lci?NPv`NN8vZB<{n(Aq1fU6zeb2c%u=L zgfS7>H^j<}KQ#V~n#)bJ%LJ{Da$m#41;169SrG!-Kq`N<*{?x`nqy!!WT=r7aP#X&uT; zFLtqx$M97%zG@Ur960PO3}(#`Rj7Qu5l}IlRay0l;6?`YIR2{d)CyPT&~!-;4D~VQ zSGvwynp9jv;r)p`M8mw4^D()!$C;mvWKSY%tqwYwbfF&d4GT!n z&#{Pdm{M z2~WyLP!`Sb^Kk02#+{aIDKSyPosw084jZ9^SEfp=+CTDA94P?Y4Gj57o5TeS(#wVO!{j!hutzJnALnlAsdrmP}w_N0_@O zN6|2AQA-$h&PlT9OqJ{$f(zwMxy@;p9%70d zc68pM)zr1r=pgtOK`K9h3O6kmKx&6*dckNn={-{ENjRjdOQHz`qA*D0Cii?QtAhtg zkb0qwDxzzyOiW+M-HF+bc@foU}dsoct z7rO-lmsfeF75tcoeKcWd8<6t&P&057wk0pS3Ik3Py~#TxxEv2G0pn|`e0YAy(bdHnSwe+?( z`H=GWV%|Kx!22XbS&YzVB>3|^Gc>Q8pDQ5igpd;PHPF}hkj!_e57JflmrqSl2fRigdC0B9$Rrmg)+i72j+^V7mre znjiwADpJ0Yfvs4HQFd0^w_pxxRdx~AXC4jYH;LV=uKE7m9ZtS z_Y+KDo&fwQBV@rQ{l*ac_6qAu`1Fr7CWf*Tt|%5`6!JY}wT|DhR+l&>tI>5Y z2YBv{jNk<}G`0FVCQ&`_d#7f}`cQ9rzwka&ufgOm;-9pqzGee_LSY8niGT4O*`B$D z!s`W4q`hYFqIrqWNYgiey$0zWJq9DaTu7Ctyf^ev8D zsTWkN8tU>XXQ zSde!BU<%K+S9GpwK}p0O)LUXt%pd}!377~vlJB2#wPYBT5yR$eoO}4qIX!m$S4B8n z=@{^Vn34K?{NOuwD8#sp2c+i|t6c`d=HTX`oI4@M;HULEcg_X{#piEIQ+`Ao9pZnu z6QvJ-;3kd-`UpBy2s-%o{{(2VqyuF~)8Ed`BF*nGSr9~}M%-R1Suh|nuKgO}NRtAg z^ew2smKiuXTna2~0t;!&7n@vcOLqe$&b-wCoB#y?X~L2-K46O2z(~FT0}O~KU#l~0{zYAq;Y*$D zsTY1ir<8zLun}xK5Vw$D0qDf)``-ry7~&V7MsC7V7Eog#80tGyzJuTM?#iPZT6!?V z_7$llmH9IbqL_BdZv`+;-Wskj-(vvhbdOtZh+wu>^k3@?vh~PR{VIwnAn}AQHQJVC zJW0y{vPLy5Xt!}>!kuAuzK)jxJ#twEZZDHo;qML7@u&k99Pq3Qk(JXkY>L=_lUM*}{JX+6e(W`;zs+eD>*UO71pI>A>Jd#M?Do(%esHd@2 zC@`~CAay!VWq6_}QOhoUIrgf0_RGMTvdE*th8vO3Z6menB)dNgB;1L53B3Q@236Q9 zjX|uh##4}F4v9aAn=&=`Iw^^p;`@e(cF%}*!vMrh>9If_V0N0oV8EvG$RPRu7HG}; zcV7-(Mnuzdq~EMoF@I8bA(ap z_2b;YFlr#SI&?+?@hUOa?+sK8*1ALhtt#q|cA5P)0dyh(4=z)%k7l`Gp14>6^kKE$ zIT_d_-c+Vr(JWLl%gb4q6=I*N0WAEBOI`U*rndEjab z$hfY3^?#ecJ~a45U$2VennC4Qp0hip;O?swB41kkFsx3?=|ZLd?v` zi0}wY`gUAv^3xn7cR{Pw`5k=ux9=kLb*>7{3#7Y)71!UuD^v<1r7_p zcc8GZ08{twnK?ePcy9@mA7XxYZz3H$NEC{@D)!1j85JHWpLSi?&&og?8wo_eWh-bA z)@#fn8wI^}Gms`{GFV;w%cofWJ(#YlRP;n(UdB;D1%Oy3iFa~HGhX=;Nz9M9SFpy6 z+Y;k}F2gX{2TZ}sPae%^aE|P;|Cg)m{uQ6->hrdcRKS)$eKr)YXA*)bp*?nA$?#R3 z#=BnzM=6DGd&?!)ZcVy$!js>1;S_y*r0nWIe5zM9hvK%05Y4}uiKa3^)FgXyZVT-bplODK-+L0Q0 zn+S#V%xt054oKG5lQ0k2D^1PYm^4LaIC7>hyQsYpvruFl2rs7e0xdHs(xmAbzuZFc z{m`4df316Xbo>e$({mYPD-83j9Pnt(OZmXW#~LY+q$q)*xQCwc&2BTtc=@qQG7iR# zwQ$-`(%FR*6Jn7h%X8oG6+aI<|H~lEai*!$5&&Xp9-D<4F|GRf1j+Kmf zVx%bc9)%Gux0R?V9&bJCNR^3*0naa@`%l88oUG#ChIwR})g%Py+K7V62Rd2sKoJLA zip0Bc1KT-K97(k+B04}A{&5HhHPO4^?P-dQ z*jw=Ua&(-6*Lu=|vDHLSPSIcQ;F}$Iy<(gVO7#9_XqJT;2cgN4(5MV^f!BD2A!IV9A4gu7_z)EbUhG$rfq&66?tR_>Rd<7q{LWZu zUc-<&bU}2>%lIUvKqaUR6q%yXm?(>aGy%C$2|~RjW?-H@TgrU5#|nb^az^I8EOOj( zfYiJqpIULWz*mNVfEcjK#LoeK>yDGu5#V%eCO8t55XS^I2CGx-+(5CZPs4{l^nb)v z{PL#4yX{XDw3Xp{08i1cm4y4WeWaQp2syj;Cl*nJKV;@WX(ijE4$<0etDI0A^`)d~ z&_v+yl}h~zniZMkI;>Z`1I&jH)f)q;2^pYx;qO;j8#JjV&CL|B43~4spaFhekEmPt za+JZ2yhXUXepptCZINwC*O;R-Bb{`FBS}383)o1TGzsCQ%6uWNBTPu`bSjfb$w>`1 z*#s$3-uV@!)2=L{WAHDfzG6iw6+L~3qT->vX@?0HUwne4X-3ib@{z%oqCWDRBn8T* zxjK3=F|Q8z^Cj5d#->VmcTPMS2tjIPHRQJTcoRXQCs|q3x{oqY-bo^!F91aX5V=#H zmY!a^azy$1@6ko z`0*g!Y#{7Nw5Lv(*>=$g9IYB?(!uCGK^FTV_1t2YQE9iW?0>qU68N!I&) z!IZ)mr;zhYqqv}0Hkqrn7K$EQR;5}6uBSB@_rc=Me1#nLywevKNg$fA*Jpm2+WaOv zbNcJa^JFzmzKaau#A3vNFt2WfWUtUHhLZUSzLZb6g~;${$vMZ#lB-z!!ZGSLc|#6@ zOL?tMJxlS{YACltF(b*PTxq7q_VU2K$YQ6!;M){?t8T3yH9)mAN=N6f#842L#3I_( z&%*P?Kp@n$66nmV2vlG=*WsM+yS=(LYFe-2B3z1J&A=tX)ZiGhF3|Y9c3B4C_BJ(k zW_z(}|N7iMwXHSvVq&B9wrTKWMJ_?G7@8Tw#q_U#sQFNXe3oUPg3BJKA1K$k=Zl{k44eqUH{A1)#&hSZ-w z{Up5$XPa?Tqia~c-#v=Rd4O_zSxp22!H3#JlU=p_rGC9NmdEQ!1vLQp1~&IA_PTPq zKP(xEgmF%g*oC9`$DFS_$*~oVjmn{FyMzPvNvnkOzTRiYhFCJ#mBDWQnA3BOlJz~C zqZHQmcZ<>t5m!yYBeyt(DscE)!p;@W=o;4(<+TD{JDG>-)a(N9V>ais&jjCSzCyQ? zM%%ZC4+lbFle1T(1||1A>-aX$YwbXF3KxF7CVesZC0lQh7@f$q%=sdIiUV>qO*!{k z$6^&Q*4tabPIN6y%dv`>dXQau`y{OFHq2%_yJnHannr#DQ`0EtmCS;2J9_O12}7ec z^cF2I9f^wPd5Se;2+HSiiy5att8h95fKaf`O6Z) zK}D2tCpOOSJf;X*f7|%}wO~Z-CWJr>0;x^08-lkvG6mYJqvcGfZSfVj_+}T1Fepj6 zC%QZd5W&uNxC^FiHaeRai6Kovzx@1)bRz@SN%VXh8KFKAFzl62p-_hrqis5LK;p)3 zT1B**Q?i^NCPjn>^9*1aT9@GAxi#KE$Q(D(a$iHNq3p62`)mgvO}@cYzqYy)(g+AE1)mx5|v^Q=@GRKldmv_O0*t7|X;N;Y${EWk^Vz zh~1^o&1ggFeAZi!J$We9h2=4&U?N??@kkU!-W)(P@DsMe>D$xK3aQU*eVI~L5jZbB>h?(A*wK{HV6&ROk0H6XPK^$ zDj^n3-_TB9ugGTM86e*$u5breGCT4%N&a>n3HrcIAB;TdHcTIMA@`Q!XQluj{{!?Q zxHS)D6iPhJsCq5P;;UpC8)&^Qw5|C|_r_s`7&qEJQN-v<&v8Q2+m`~kDB=n(A%c3# zM_rH;p9``5xgpS((bP)Tafn;K4i*10?D#3xQ>I=R6NyLA6728B;xab5sM*zu=D@~W zD+P*Ar<8g^f*}=|Ql8{e`N`oakxK>w0#;Zrz`Z}!MPy1;i>(gz(^TfZVbOss8;gt7oay?9y75K1I#&i2WL`X&e^Z>U*B?>1rc6eXF~!?^cb4LUFx zRyS4W{EMMWJm0_qHoL8GvFdkDO(03~1$_r76~BaJw9^B2{)u{tQ9#RCAPgx0+mzfd z2bHw3Re!WC%;|b|G=tpp*HMpUg=@CRr>=-lPH8Tb)Kv`4!>IEIjQY~If!6b9n>sBj zb@r-FiQYxv2@Y{>rd%g zIWwpjJzVdJso%LbRTvOeuvw@(4*Q#jG+`3QJ*81R!Z#dMcM)!NK?u>;s*6IpBdom2 zj>J(i3#o4@IY2Mf(>}cV$T#wcw%1TJz)v#y;zqth25}>Bt{j<)8PoJ5w&=D!rR&Vd zrwV$56qR-qRlV6!33~|1;NlBz&K1Ptzi}c{31<$%T1W5PzWlXH9ylFO{%w#khcNz9c@aWB zH51H%-#9~NrYXHBbxA_Dz(|f%KsHZtclJr zpf)nWrHMMj^2K;`Xmu6XWKd~l6pIgj`-2P1G=+*JW-lYot05?7dE%XS;kL*Ff7U-b z5gs#V>%@cL2lk`@wc~2{pG-R+;Ir=Shmn;Z>=ZqdM1tAJPk*ZtEG9Iidx}^;1+e z^wBwO)Q}+>C>SK(s8?cqdR)Au&bYq22N7g^1PCT#snsUW} z=a$dMEy}T4NTu-rAG;WUq}zMxxt=_GE*!6WLG1JXS@j_H)H4$$Qq8v@XNcy>+ zSnW$ex^uRadPZ49Y~7pct19ME++W@t^NQlC+Ue>vXl2Tr7Ox@LB0 z2e2caGM~2U`jY!#!qbqgaJe(2PK!jm+(v5}hiZ?+am_*)Q5{F8I5*BiAy-gGi(KTB zX4FpTemMi3K)q7Yy89Wbc)_nS9h1tV`q`8tjS}h9wY}mH+OTmVZNr;OArl+Xsi@PS zgBzaENw99ispcV4EYxJSSWw6xSTvxC)S`@_gJKgvw~pVIy2mw}HfV9K9bd~LEF`{w zDiYLtL7f7yr~Q-aFvMA<>x}u3kFOkaj& z+R&zqu2&std1nJFK7dz^G4$=`Bd%dr|I+#Qd*33nV?3KUB_~}QLK`0C&e=SFw+=kv zwu~p#UuCiiLbQ%abPl(QGLLe=E4&T!yk~%1os5zWP0dms%ov}QQ^`#^bs{zD(@w2d znX5xS>ss2RMPhYO>WiVoH3MFe8dpLTXG)ZLvUyPgrO1{e^b4_yr$0Skzru7*V784e zY<8GWLzpi0BSkA1s(+;oHQ73T`l6X^nm|KvNwXs@A93mP6NqQt9mW4xrzmyKzgpT% z;LAwp%0sFqfA_CqU6=#3pIT}*L<-(CWnP6a>cSz-qSt|wN&Rf}U2x81GEQk->Q}pH z9YbF8w07qk?)|eq_=d)It&t78)d^PxrnvbOY7%x8z^;1Ua^Ee#dae|7)S8piJ2%0r zkX=(B2YZ+R2i+NUCOpPWO=fJ+e$TkSr^43_|SpFz)}Jna*mJA2=HynHLFs^cIG#{~Ez)B8V1aAw}J z588aW-&RX-AtMNS8bIxb{@e5Tg8-% zTj#@aON>qvG7rDbWvpibX8+WRFa=-)%EYA_q3YKGhn)0k3&4sX0rm^^41pG@cF`2k z3NeS=nPeH04D@KU;^cElv?~#2wRbJJYO566lr1ws)1h7QW~$;S*4UIXfe(gTY@5?` z5!aVNrh((6qZV{bwDj}-NG==IbFUh$ht%`Xu7Vf1oKd(EnB^HzC4Ma%$p_J*Q6q2C zN@Rv7^R&=qn0fws8puO8+)hh)W15<9@#lD7Srx+5eQ@_-B2N3-EyM9uV7CAAp;y>@e24`rKiS+4@Xj7Eo++TrXG<)jt$DHlzN)iZ&(n4TlB%8 zi)_urT;#0#IRRol^9|0rK{Dt0e;UigmOAQpf1Y*4DgMM)^>N-$BvP@-=N>ascSZG7 z37#qmwYWw21HXJt_Nq^a&T&z)*E@@}W)&stmAIQq$>%z8#ZFCPZSs}GB!=U8<4;2k zvyaXcilvl_YWhvdAXatP!pb`eR1quTGztaB+Uh0;QV*#1-`dkdvcKHUBh$X~bKnhj z1tZEEeRAo(Mu?#5iPOq;q5_9AxygQii8f!f1O0lWsfj5tH2QVLNA3HK3z=K4&Ui$} z-?NLTh#%GwJZ)EuoZNHA(NrSk*-j*H_IU=*nMoM<4tGd9Ge;5bp)=vqyL1*poAXmm zrj5?%+c=#)W)kEQ0rXtA$#@yXxwWfO%J97dZx+f$3d@B$(sYetDe zCaKf^!Pq+mi57HQx@Ft8ZQHh8yKLLGZ5z96+qP|+ySmQ39UXD{q5t!+p7V>$j1`e{ zj5#LX)x(1k3?GBsQ;Y-Hh{t*nQyz!hiBIU5s^P8MIJ)rPJUig;xWgHI7}sX$$0uNH zsSDK+{L#Z_Ofk#P@#npdq{^-Ki4a5hlwXU`!kQ!?>r@;D;&N*q$PQ9x;G3G8cTXWVa4n;Ld3dH~YuF%Hu{l zz!PG#oxIL+a6RlM#RQQ~;Yb1{95>XI(a0N06&=ClW_(+`blAdemqkm0(T`GiwS-Oh z)9=N3b&{XBTndZJA3t$@w68|;359vrr(vh!`y-W2&Y*u_StE@lO1vAQnFu4D0Pnic z3~2G#bX{th3h{1#O|IRqe-QV#a|J0GeZ1KCX2mv!(Y}NhoTUk2=#7(%9Kl``Skg9P zOOn=G7mI(K$FnJBA%Q09k(ZlF`bh={e2EY{N)cpUd>rfg^m>CUKuYLBzVzuMK!5?} zrCcLn7W3(W<0Z3D8nR}{&}z=3sVvbO0#F)45k3u4XAFP^t<;qalA+qF>H@)x@+YJ) z#-I><5eD%NRuyk>Yjp+_@VkVU0sH{ECjYc2ekl?<9pFw_Ymq!H7$DMwv8_>r``+<{ z5dDN!$bF6~Q%7UW!Y^xB8p8vm>jMfG+wl!HM#AKv0SV<#cN;w zS|NW&=W7W(vi?gPKRqroO|~ z$p(R=B$~#!^U<&Zbk?!zkHDzV>CZdwP0hGo{{&dpv}J28i^RWNlD0kt63Q~5#%!PgwTrJJ zZk?n&88VSuG?QB83Sl9?b%P1#V*_@XV_UKdYp zfn&ZH8`Gc9=$`&r8USIh@LjT!a$Wy06El>(lJ4uw1Lo8ZPU0JmN4Nj7s=WKkVC92# zoBJ|H5tX$q9_lYa!d6WsZ{7r6ek7M7`nRZU`eT2-POdJZp{?UncO>dnDS#R`@ggnv z*Y(zA|_^H^QnnUv#Z@)r?>v#w{8dZ@RN TC*sEvEv=D-pbWMbQ<=t& z!oP!hMpq`sBv_1{W+)=lU3@I)$a9W8^(qa79Ni>_%cm1Vs)I_ifyre-RHwiZ= z%i9*cG0hDz@EX=sdjr0?fJYucvbqHjpiX}}P5~rgKtsUt*5bqL!amn5LXOrsHQc+A z%9+Icz2XQS&33;z(YGk_(e)0by*ZC)x67ikWZMBi&zAi(Zw8h%St8eD!@nF#);o;np z0_npU$JW)Re&481ojjyjpO;wm-sm1-jVsfPZCQ9cqBI$`lRpFaS?*mqjXf-{$~(`9 z-ESG|Lc^__3n6%tECtDPP<`-G#a0S-IYQsCy)crAc>YB!?YvlswS8t%>em`5$$>-6 zmLduWn6KI>K?mZ7vtz5`=%b0>*Y%dn4-NO7uUt%u5_XVm1@KNI&p$9qN>M3HiN+ej z88p%m+Di6 z^tv0O;i0GF@~f)|35G)iGjY>|1o?w%Uut2~cvO3rZP>%fi#npF5=06I_3?6}m&L0t zIC%Yg&k2Kuf5Efv`aMw&l0%cjw#9Yq(-qDQ5{cU@+AwVcip9v$Sn|LvE@;SpSu1in zk7jSL9-h?~=PCu~;$cyhEzQLsP`bXI_AkNdDE;Jkw`$7r_OPhCeVRc`=D7` z6v1sK@~@&AUfuSvzV!!yaJdy;s18#briSC2&Tm(Sd=hp-b`Z1NU%Oli@8E5YOqYo5 zXFx$=>~bg2Z;bqfEwEU&pG9~KlJ(wJImy5u()L?@M4|#_flijfmI-0Rl9Okkge5Ka zS9cLCqGWZfXaYzwr5K=T@Q~$+prsz8KUk3s!MLl^z>qAR0^y7P2(M-z&Z(gmKh7&@ zw4-GD5dmC#cB_ z`mQ^FkSPMUr?gb;V$dtYpx1-rHwKGqDw3Zolf5406!&+<%PC6=H#s!Po=IM*ecEmW zRCLI2!n*r0hVzY`zielh$9}}ku-x~G^L-6E6Ziuv6-THB6|r(i;YIf3=F=!N2YGlF z6hPW2U`-g`1&cDK))c|d!8AGmDqqCL-9sZi15&CL(CD6=jG@)T31ze{$LX8&7Iyz& zB;tbycw}j{^wxOJucv1vXFG~U1;1j;^orufHmFJ0mKklHqn!f{Qpb)OnXKLU4zSqD z8}YuoDB%+{>SxPhGJ#h#??Fl2h4p`sD?|(lTFMJ-g61>FZ|l)Sat7Ulsx(GU?f_n@ z&QBTL6A&Oul`!<9c6j7>IHhVf@>Jnv6T2Q3=Rf-|DDQ1K5njoZmmsFYaSVBB&W;ZO z#@J^ckiuta9J#sd!-X7yFv-oN`V=k&T#GzA+6Vrr1&d*Gg*o%#T-x(qm###U<4rGI zdNg@R$8ZY(&;uG2>$Dp1%hH{(Oef1L_gw--bWXCVPTLqA237%wosqR&$`pO7`0WVb zn)cmg3@ESIHOi?aX_&HAjxmGn#Ff zZ=v80a~7PvK_8A_4Z%X65(1`7G(pP0Qv+}KWW|oOGzZBUFp)Zj- z>a8kdxl>v1T7EuU)!f{c#xg^XQBwIfjv!n2G5P0&S~`{0kru-kc9`#yJg{of^%S_e z-P?{$_CeRCQ8$6W1OhbgE)DKLxOPwp<>p(45w~o=72zhCmp~{?V6LNjtG;3Of$8BC z1RF2GoWmAz;5DPMcSMQ6^Co_PzyizAi(XT?7FhaOM<^R zU+Ury+Dx5tWc&T6W5})C@782!de7}9Q*fLd>$FzeGwJ-YLKE!Hzq=nzG2V>pi4c=M zQ+3I+%gT!#Ren;B2i6*??`z*qt*v|MMzUY!lH<^?aRP41_l-s4^>mFj-hKv7l|E05 z`kq>~K|Mua5NBr2>Yi#1-H$~5O(ks8Y;r`*hJ&Jkb*d*eIO&LHp*v6qc-U5l2ocE3 zyT!)2-M0?g>;)Q+6VP+9Tv(p~KBUGCVBUzZ&w7G27IqG8Eb**W!2RH{BOnK{MGY|5 zd(ECu0ao*IuprTzmzwG;d%H8n0ZPnU&1^+M5+Dhv=e)m?apq|- zU&NKF-Vgo>eiw?zn;BuX-UL3;hQs=poMLGRP_4F6UUxUzYnW6`9=^q#ojoufYTvM( zXK5V2)}d^&GQj8yAThC*Ys4j&-BSYdR?>y`P_{$r+(3F)5+5pvH$wexitWX;@UWv} z`rGW>mRZA<&=*v_95XN+zBN~x}!RB3ouEcFZD@KM~cNL=bK%#3?+ zEB7vY@VfJq4l923$`Y2r!^uRSEGf#s7y~=o?A0hn=J5+pVU#1b$abWPlK4Q>SIR9u z6_8NB?I2wsL}_hGU0Q5>mKGhZTbCTnDV#V|j-@{R2+1 z?E=yOAU^Th@exjt?*iAVlz#O2bT+_(n}p3J!cq-_AExo(D^swEq?zlIDmjb^S3(!M zGRvAv1j9Pi&!ScpY=*R|312y?^6o*h#Vb@Gtl&x$^?pH_v{NBKMAeWc>JW99p#r4t zK#@Soe@mE_Lp|R!nDEGp5LngRgGe@wP=mH45DD-DFqn9-)-@Z_Kp&Rk?a4t?04-__ zEYzNWM-}&wHDDL)!Lrhu<_njUo7|EBZ0?PjPyuE9Ex)MVb`CVvoXF`?wIv)(; zvx&Vdf~T>sOCi@3N_)_o-C-XOHwhwBlUPzHI#5R~e!wctc2cTy-cibZ#Y%%iD+JN> zX2!T7yc>0@P~FOX$)ZtIkvB>7;T3`R(I70l6pO5b`$;^Z={o?GUAj%(lxri6cmKM- zuRr(-B5E+3p82jM>RLsFf1)k^1`=gF@0U#Jz|<|4NvyOGb?T~!tuCw@4(mZV{l!S! zEGKD`djc&lyUu&TiHHQx)RdCoVSz|z0q`CtJ~L}oSnm&Y<<9tmX-5MDGJ6{?dtrIO zsUde-=9_pyfdxKvNgSu;%TlTz1)2`tRf?H(t~RfaPsnIT5MtN(i`g5t&J(tffe|tg z@Q!})zBAM_^P>KqB}3I+k>m(p#EEqoF@iLb=y{l0{TGapKWad6i368{-cva2yccej zEA=aSl7yZ&Yb0A1BV(XgP*NXS5Qyw(l)~1INo;=CV9*c|fY7Asc%C;o*Z=48+>=Qt zf1?&T6%v6hehZ^Z#FO)~0IRqyiJ^Q`;$&ka95!hjOdqW3D6vuR5h^`@zaGl?sXo#k zf*jYYcoTAj+*Ez3{*@nVedF5 zj2(`fqzC)&+Ad*A0c+zP0IJ<|4)8YCTPh6fI2c`V_FJ6_^e$NIwV`q@j2#HGCj1aY z*&#N|jdK<6Fj>T|B(2~GCCHm}rRkLN*PC2aWiw0762#xtXu9P%_~JhX{e<5(xJfhw z;8!a>DH2CRXMbe8~Ke=3dS2Jf{sPHn7!R@k?4ol|G{oe4lp&rQ7S{MjK+ zyA3f(k|t^!b5@h&i1h~V@B8WzN=K=uIkXFw8?A5xNE@7BooK2f6nT+$%d)szt4_L8 z^C0S9p`P0ajPSA(1omHpPqC8+4u9o@^VlcJWW6-Mq|O(aZnT=YuctOwXqr1q;-PLn z=g`6xYFq&1MG?e`qD|ugCA*2goX|?n z?4`JHe~1+YxI+)w{}ycTJ4T~_v)&*sZF>RGXZw44W}gDC1Hf=Z<%)F*xDGpVAl@^2 z#sH|o_AbWAbA#ny?G0DeC>D4nhaAkrS$~6DB^qU!Z%J$kdS=m@EVJvtR%n2X7`2Qm zfWoxKP9Jc>w1^UwXMyol1)L~g#?mk(5De#DJHVSQxDwXwg~=&>i^J5}K1FzL!nK|K zJO<(BIHT?PphstrTR}yivYsPR^)rBuiJuapG}}Cj-eung5c0}+2f1{;y8@%@bA5~A z^ucM`#F0Mp%k4;c65s(@`oYDTj-D&V)$@uV0!v&PCHyRA!q^rv$pkkw_dUfSD6m^s z3lJGk1Q4cGpI5@vUk4H|cKrBk&Q%Vyr&V35LwXDov%e0_dzRbqzB0$h#;d&i{pGeH zm>+~!=FNg!HMu(_K&PaA9y4g!ZmL1q?n@rWC$6Ep1=MMF7BfVBd3STeVKs#faPuCZ z!sX7uV>fdt(qZ)f61&_QL1wJT0b`rw5$=zE`dB+6~X;GYC!t4IOGFk&atEP zsIe$d^8$(FR3bHkdWE3!9`!K~+#I^Qdto4Xw z8C5=2>6EnFaw(AX!S{4YB7(^6nI2hxdFT}(cnuf>gZdMFKjXQ1pz*L}yP5#o@Z<(( z?^6lfqkH&cePnqh*;Nc`$r_f#)JN<_Xl`qzo_7l)4mp6BD?!6G*6@9)qGFaL$mBWY z17Z$g5~idLAIW99YoQ{C`egy$gYvk%I-+f?n0K|NwA#dU@QQQEJY9Y4Oz^~85#{~1 z&CZyeqL;tj8wOPL0@&^{TQ4XmzwoUaT%Bd#i*F=2nXpkb(49xNQq(LXNKG!@99)Ol zC{^LE~{G6dSgS4xftN8G{5y^}_Uhg3Uz zn;i2@LRP_BA7ttg)SuwZWxRu+C(fYRr2v;1M$5X$?>mvmSCMO$t1wh6uuEXRr$=GH zuDd8sJmK2V@b=Nd>?<)x3g?$WG6iVbbt1=cn!qXkzrniV>O}OE!D&8)N>K*E_!mr< zfKqMA*B+5={4%GWZ^Q|&pa#|?U@524-h$&UJEU&ZoW^fR4Q@NjUggQh9Fa7=a(Vae zGYq>fXI9pcd9Hiiv((E_drvAMYyXrZIQl(uF#U$~cl&{R)eE9u7jCxF_utOvS>|Oe z&4#3$N^bu3&+KzpIk*r|x!MN@F|VsKbHb#fbB#?-gEn9ly{)GxuzNY~^l2^snTXsB zF5#Hn%c?`sckqm?a{h-a@gdJD)nsof#FA|meg)&09WOTKw%lcn$4W*5fAu=?N85~? zY6MX?aOAD0>&jb!5scdbrdYTIT>#BK+0Zdw2Ci$4HZi-D`H-Pi_95wB;PX5;lW*Qy zx!T9N-lWh8rl`k6g%`&T>OHk)RNJjeIZ4emgicdC0*dGqSbUUBQtKVZP&tQCIaWaa zscCB0i3Bd3(6U9?ab#y{I=CLeCktwa(#wDMxFco{>1%t~CCEO8j%RE!-TFswK94ap z(Vq$NXMnR^bWrre9%;xfvCa8$sEa^=Xdy8>svUG>&^QM{9-Y>&kJ(^~Ojp+jjM> zFgD4ymE@nai!;38Ud@;&x0-{ro7;IoH2+Qj^@Yw+HBOm^uA2U*^{VaoNd1I;b}QGC z=faGH^G;Q!TN~d|v@yLS1?L)GKi94}F0NfOg^L9{c1y-pz+V9(V^o|~*R>&L42zdj zX(axVk?T<|Lp%;5C8xg!Ig38JfVE*b{%}YB0H0ipV{%RKDl=%900R7)4D|~f z6i=a3zQt@S_CXY$S2QQDD2GzGDiTTV-?ZT+F^5`X8pqH?hHC;hKsy2P%q(kuZ}7`6 zyP`k)<|fo+z&10vT7_^M0twX+x9RDlF(ANVt83Q5~g(l_%AZeyaY zP;~xTe;(yAPMbU(Hg$7vtQ()i@f=(iAIy+{p5=kxI>OFG8=Gu?ns#XO>AqCd{>`TN zQJlJPt`ulYk#lMW)0w%)QG+3ex@9XSwKi|V?&c|ZtGXI=xFfY2_BzJjQ21s_cVt;7!`(1*ojO+8mHAG(xnz=nu z-}ZUTjqoO?ci416JRR;d*Y)Jt$uhM_>fYGH-vmE<)lJkk%k%2Rnp3jc0aoljzpPO^ zL2n1eFE5E#e%0651h((p*zUso_#0nI8om^0l6(I6xCZTVZ$n3d3BI#Ov*Ugc28Lml z9$W~#N+dh6_3Re6{&xNQ!qtqoHyd7k(#Vc0QUf{jZgV=1!B%gv=t`dROO$hqCd&5i zUQ6#Z5)2i4+=;pzW!XHS&oC&e6o&|KccijzF`sCMj78&Cyk{WoUmwq(7O)}rVkII0 zPQtj>ln9XVn8_cZlxxCYy@-%K>d8%tS6|{JQKyW+gHPKEO(Q!12$>Q^|HEy0@?=t2 z5^%~$pZpJ7vjJc9hD%>EWJtR_L$`O2`7G+h^BHaTb*5LAAz0YL%6&293IpJPCzN6v z^Ce&-9+10^Tx(N!Xn`kO$fvYs!S^Tc|(fMQT0N~KyT&_kag0j;HWSS_CRs6u+Y3j9dpgpU6;0)FqD$X6Wgd-WNlHZ82O6 zwDD?>A~SYsUY@k9f+1oY`aI}3Albi zar>ZOGm8sWqPv@6&h0p=mihz5FFkaBuRil z@Cvfd%0R0!ro+Mo1uQ(c?{1$kdkQzfF?}c*oUf%?<$8KuY92Ypmwv1;qD$xIGaJZ+ z3s8LSK?@AUiP3;1whq!P5~i8N_gC469Z%3{S^{z~9DuENOm?6tKTnt`SWRS!7g|%M zY5ChyYkn#0fCOV(-Iv9*-=?>hcAqM(sPWyT0P#q;9SS{`=&*pz1$8N7rj#7w&aS%@ zBsG}KrIGNOI-J>w=5!_;Ikrzgu<6nKO)_C0{lm5yI#eH;6Sjhf2qyC04%jcQibeS{ z!L)Fq!PayKSQcMPw?iBLR#0<{oATq->$0Q1^|-;Ek625-e1#LT6_7=%YbC~;kcXK{ z6)#_@j6B)toSq8|`D8)wq9BZ(EAQnBRQNyRC=&h%Quc(Vd#=mgm1Ghxxh-{>_SaZ4 z+WeP=LOT{sMvSs$t6$!D&vu(Kw87|1J~*a?KSe|Ylpb>}H!N|gZy|VAX5h{kiv)36 zamnwf9ag-wS$;L2b%!KgLulKkzB+_UdJ3|0mcnb|-=*pdSDEn*5(XFTGif^EC+U3E z-c?c?o1Ck62iWz3N{;<#C}q9lFNI+h@bmEbx^9ai09-x_j2g@G?5g!yuPxgPJS7{u zS5MGaa{Sy7UHQL}r3|5qMBm{PEbTcMrZD?&II00xwGnS*b^SLQAJ3}mfz7(AB})2f z_h>tn@s++v6Lc=n36;MdU`J^CL6_X57~z_N>J+5bKBkU*<1f*{-HIeMP8?GJ;#}HG zc1z!uo{6h4k+*zA9&0RqR|Xp23KjX>gz?2KZ5&g~u<~T(sp{78mD7`BJY1<4$pI#N z+m-07^6K2KI$bJqT9wJ%r1xQfthtzh?9RXpV%0+XFMHRE8qqd0w#fUW=}1`QBb;sYNdj09-M?+=4hS$0Z&h;8wT@Oz(X>W_S7ZkItYFVi!lgAGcZ-nV)IO+zSvMV3)1(=HZ5Rl81 zt?Jr~WAQ^FZ0x?&8rMvDp^wJ_IG8U^9~jU4E4IXpZm|k&m_|PiFzn7hSCe522X+|o z-3yALcx?3y=qc&n2EBD~$kq?WL1H8rcv~~%(NAH?Md~|1XXQX{Gf;>rx}@25z)qVP zZdD+)+}6gFq&K)NnIF{3s<*Dmy{%gL&4XeP_k+6ce|ue!f{$I{48A-i9Xx}($If6Rj7ZUbE80VivnT$+H>7qv8SgKsLtmTf3s(3 zw65bOm9`?30xKrB-9Khl?a=_vmYDZ>4?naldTAhPa95SeT_}&2fHY{5$WF$Y=0$A@ z00W5x`7la}7u+p5d9sz7(mchJArEAVGdU zAsEx=(?FE`b67g$`=J#qLf3x&P@io_EGYzlL%jOB7Fvb`Zvhn2EIC# z|E744FK|z_XZ^}B1ZB63-%cJ$>ZbiVi#f71n0se(^|79At0m$) z(K9dwBnkBAT8LH@L8J#wBtOIxY3gm-lxyb4JO~H^R<$2q-Q>k$Q}Z;}qB?@!6iFby zQ=xc0Vz%Id6gUmO?2l>+5CSTz+te|u2Syk>hH8AB6A`f||NSXUf|CI?w!e&@4CRB; zMj-mi_|83rbtq!E4*~(mRXk{zmf&BIt-WEhJ-4fgp6EZZF^5`MlDQh5paYL%W~SkQ zf};4Q*x%D%H8kewLi?oLCRYj%xQJm5G3^Y z*FHfR*@CyZ4t|v@MW{bErM-mZ)6GHp=BC~1e^6z7$TSjn{zq*RfmXH7#UaBK>u_Gq zAJfqx?B4&p>}W4c1U3EfRk!(#376>v9!ekR@lMHfK#Z?4YTXa9J-uY}5~Y0?)_qT! zPGAwUVIF9Ux50|~)N@nE;%&XXq(wAV8jJe{&IFV6_87J53TB*l|BVS;kc=}`eQgAvKscgNo#j`0V20&y-yGf$rq~i!1$?D!E*E}IGMcV`VKKe2dyf#& zHq9X;5TUSWwuYXM+X>>N{t4pAluf`k=Ldxm z&b#^Npn8;@I&;pDYUz9L)_ko-kxP{k=KL;-ZvrT5^JpqlBmZ>(Pj>IN{@$q?CEnI| ze-I?7p{r58Bz()xoA~V|$`Y3YTq1S>c;Hm2=St-RW+Je-;bun~9Z*%({G=h;hV(|S z3sL4`&}bxpAszbTX}yUU;U0E%ZtPc;1K}K1`6Gopq1~%=E?o`ptW?;;D|R2Lua*DO zzvRAQ?zi4kh$SBlbUn#c>&jyPu^5_$XB6mY8umtlYLkcj(Tzi$ZiZZ!gHuWHcUWD( zeS7z2QX#a2Wg%~Vb#{w(cM6~>t4*tT9=2Lfka2ZTP0Z*zf7$%_1<#?sAlUDcae zc#;s`;=@z;9_x9!7J!_DVyx87J!}jw%`l>18*t$>XaR#3IS>+f*!D zF+NoZLL`%|4)tG%3Qj_4@k;EHfJqdx9cR8Whq_c!I*#kQqf*gzu=9Mfs0i(+f6$jb z7qxn%oid>qY^iu-O5&}Z#pfc*zUN-m8}bS_D|00MI)czoRn_!; zU;|X*WS@zdDl_mF<>p)9*Bhwm$U|2pmYou`MT&*!^KWe=apCoJYS>fStz~(NYwlh- zEd3|_iqD$&#JeZ9o<%&*G`ne>BGAi?yF?`b*!pgL+!BD7+!vk^< z%12(-ziom=sbm-}0=BI=^Ue1T_Mq5wj%xu#b8?jN>=wL)WE646x!7Af0Ks_U%28eU zeOG$gRCu{5q8Ijxztu3$y0Z>>G(!Q%H1*3w364&^qBzZW=PUAL4Mm*40NgB5;e2#% z;IeV%zQSa7iq47BibnQ!Ln@g5$ikY}AS~YShx8>?rz{^x`epD2}P2}lc zIGV}Y>QS>cSmKmm>gQV??J6M;IanwaXRLt{nk3mY%f> zE@WTknmU(gjjr+{mR`l`i*|%3k|YXfo+KUXv_jfL!Y1Vi+>U8<;}^kE4kg@riJ)sV zKOOkf4LW|4?ciY!|7rl_V~?{zZd_qq#Je>Hyt6tK)u)!`-|BsWK^< z+VR1Tj24-4>$7Ijw=KSJ)hb>LCYg8=V~D;|^Cn|PCRvx`SbLr-F19rd(&y4NdcMtq z#84Xu0vOO;8FXp8NKcYp+30!TdoHY_b_pNI$;vUb3U*Y44X{%J45_LJhFnj8wD={*&DdQsuz`*^iXs-5x<{#LzLufTtD~tV<%l z@2>1h*sk{AL;jv19uD=ZcD0hZT^*T#>cJ0C4-ql4V}O%72u)z51t#~NukN0cOLMuso!)DOD@G@o$S_Hz^0+vGhfk|{8+qnN z%!_?z1?28Qyx%K8$F09MLbMI9=ECHK-@E;Irl#uC5RGZ|+#zJHj0>&qqDG2!jE!Gq zPly3u9~!t4ArRZ|DSNhU9aQRfW{?odo!;%{^+S#uZ(XFrg)aOaLVAPq_w3Qre7%9% z*_T0-9-VQblg2<|vx+MMU&^S{`Oeqg2etD}Q!!>i0*#UWQ>AD;HT}6x(gdr!pwvR> z*ZdmeX{H8(PRHm=de2jmiCc^Wxq#@+(D9pDxaJd~6I60a zjxWG}wP~LN|76J}*pE4GHE@5!RrFbTzJsL*7xwfco5M{`c>>SN&)KSs1rg;&InaNl z-Nx8qCy{MpWBp0*C|yOii@eGe-M~@bGHdjiqsaENa+d_}GQQCw^QWcBJn%kyOXi{5 z=2B1E*fNJUOZ?>>lGA8q9}(_mW%u!J&BBU)xf3JIWfWC{~QCcWckP0#G7GL*@jN2poQZ=toxfF*i;Ycx!UN zl#O4fjV<>ScY~0Q=S}23sJ<9J1pA-YKIR|fYubgm%zt*ET|bN)otkhzs2neOJyA_g z)b*w}Lf_1$(lHT|beT-I68I+)txYzXL>nb?BrvOKfK$gR7Epb_{^|msax=ei+(As8 zv6igGf{w+q#ea{Ls0G430|WscmG=6`d_oN=Rl%AHtNa$yL`-@fB;^fM!L|& znKn}(PX?o4)&>AKiN1lnXyea?gv7V;7^V7TW#t(;S%>cupb(Gw*M3(xyqYK*4XFE> zSC91lKmVksTP`Zw5zqskYxU;u2cl+EXGQoG$07~N%bq~SC*JiTb&2$CEu&_tZ=N;<2$=j9V-c%dK=^&pV#W32xrw5&m15f~U5WsdZWB3wTqIS& zX#qAnqHRWb`es_D;Z21r)#IbV4^Q#Z!YnKQSFNZ{w8iEMi%wsYIeoRHsqB^k?8mYK^cjZs$Aa41k^o|d=w3(-d);AoEH$Tk zezPxlwRS`u0}N(v0fMWv^y|!Kvm;2sA1hK@1Jklp5%8ungk^6vGU(U^i!Hq|UmJSZ zI?h<*A82xjW5r;1E4LAUSzb$ZD+PBIqw6>F)cVcS7{6a^ zxsWBQaq9G@yYN++|KvO%qBPolq^1q zD?+}P=Ne<65r`aVu7SiQKR=4imRqcC(HAJs~(bB><#ukp+iqO)4CmZvq9)hSn}{1tu>z)H7K!riFS)f z5v5{8;>=uD%@ttCu7R>+kca<8t6aVcRAt4O?aA7X_SB-MX9_V9*Y3^A=(9h@R*>|e znu5BV@zQ{;s>(v7jQuI}uC@9KlW&lkD<0Fdu+G1BdTsVZ*~n?`ooZD2)HmAgsecXg zsav-UycPQc^I>rBLb_fmX%U30PfVXFa0w{7Q_7b8UY^V&!0YI-sGp_*f|`Z=7baiF zid&egMYl`Rp9_0aTSUZcpdm|~1>OZsQQKohR zl(dNELmngLCe+p+FZ8KCKd#MPb2;Vvzgom^??`jdAJA*@rB|va_ z)KDIq;ff@0+cNysvSv29YhZ5f=_|c{GVBol9!H(oD@C8|zBNyz#6dZqy>g*ZWS_AF zf{7OlaDYpWmy$(QR7kF4sZzf9AWv-L&UOXyNT_x}8|B8v#l`Q&bLZjU6$R3;2A%=M zVMynh0E8?V7nGaoHw7dj@KCr)>}1l_1rn74{e*Cr>yCrbVsB$woI4wuJQo0AJxzGS1*)Wg(ux%Q@QXO!$CJV_72%zY# z{EjTspNFhMgvV{svEW0;nRry{g~@D98TEAe=$m(VQ|0G-z<6OHw#;1@O>&3t+oLdb>y7br02l` z3v-N=0#%}O%iz}mm&~+LAg5Ys629FL_V#^pwu-yo^;yy{eR%>H9%Uw)J;Roj*4#xZ zM){XE!7d&hX}@!c!e5*f&2?uD*1C<~>LnwhZtGgE;oqJ^R^*!D{#mAZJqoKFI#QKF zP>uuZJXFiT`a|^Dj8o>Pt`D`U5}Z0jhUO4I7bVFmFQ&kpT!)5<|S@x@CI?s)~*z-A4G9 zY@N2_=E!EH_CxZG0;Tu*6ru0;S8J*jzu^iJR*7Ze*bm6@w}kpJ z7|>wy1AzeXe9m5=wVOaSw>778+08NhtG-pm&lvyK?v^9kScVzrZAOtT@5 z8ts|c=r6mA_BLjjZzhxaCSP*(x;!_d@;Gb+vN|xt`+XG>=8MK3SkN+n+)I>r5wU7M zD9@K12DFgD9cz8%=QH}IL0Q1{OY3s`$-aAK<Bgjxw8;TS5`AgkSo)ub8##`+uD1BYCaetdM~0(XHM!o})RJkVxGkR9U%zjh?L zWSN6Y>asukSg;9qJmk2@F+1AEK=_>vly=pTFwyT>XwKfw0vy}G?zMAD*h&&!C^x3}ewf6ho5sl9F3=drsH1JD7dbR zAK?E!5*Z>OAV&cJ06^vU`;Q}$o&H}VksEt+iRsRjGqW?(EdOBWBc4RO0l-z0GSX7@r~d1n4I}AiW(**V-QhzC z1U3Lxh#Hsxz7LaTc$^R0zKga;ka}7eM6x!o^(_FTWgaXvHBCW@0l&N*d@U3nsfd9Y z7h!4~N>f2AWt7|Di+gKYDms|@;xPjgH>^7{_|UzN8%A)JRFsE02?Y($0h9@)!){T& zmq>C}{4es}Ix4Pc*&BqQ!3o+}aCf)h?(PoVSVQpO5ZtYC39i9igS)$v;4Z->z>xRm zdo%aWH}~H8dwSJ5XaCObwRUy&u2WTeRh=#3hm%yT@)L$p_ThoAe2yoDgGQKz_{56) zh;Ty7Kn)ffWQm>Q>Ci{Bx8tDl)8N}ZXB^nu$jsh`jpjyt$A4f44@Ko-N1Be}pQHwb z%k&=jCbixFCe?AJRS}GD>%6H2Y?Ui=N>K#G zm5JG9PVH^CD^fD3U!BC}=+IT*637d}N@RI8W$M3ZygN!O_^k5#UD6|LZm3aY^Aw?| zpa{a;r>NTuz&OuOo+{rjQ70=KT3$`3HPNC?i}n>~^#T-!d(SVNS zQ#TA$PcJ)(th2LQj%Lkv3{k(S)#depym3P>VR#xB^AdPdlr*_tgDS1ak&HK1udEGnzm$yKuK1KI%%tgR}F z4T-e2c#H-*g`6d`tZFT$j+#n!9wr*LH>~8>3N2FFnm$B!^pJwShi5rg(HrNoLYjKY zKv$ie(s@_JwBv69FKII_(fFCFG#Jv9c+Yu{AK&4`&dY>{f6i&ccKU-gZ|?80r`C|I zgZyXi5>%tH8(yC!=`q zDezThc!bITHrh1PCap>KhM)>NlVT9=3}@y2jd)ME!P}C%_O&f3AYDWHm&6wklGOeb znQLZ!-Zr4RhhTbEnsT(*9dqaQnBx(9NYI$v#&JQ+)Wp+J~frTGbOj`Q)=ES_^O4uL<0rQemg!QXEKdHNquWEw+ zy9(t7xF)-w(;L&v^Xs#1&f3Pbj1T(HM#7abKF@w(?y&C?Rm8kWeAVgQzX=X}+T^Nr z3s!74542sI-Y;%E@|YI2{D|DRsB9--ndZT`REw3*N3-B&e>R`_waUWRki0ZQB1{UM zmNPw1HM&GY6IQdku`E6n!#qJJY5<39f**@skus>jQv@@C4NRlXSvs94#VKm;#3a{{ zm{W*Rv@Nz$)YH11`Q$q1rHTY@5zEDR*iFf4he(QDha{1p^kVNJM z*{U5i?UXQ_xu+mLh?VY4YL9yY7cTZ}_i<6ljI&uH9u}kITsIhzixunU)Hsie<>AXT zZPbPtvuon2^uFi7P%XCBC*moU({)aSRCuxXxShahf2k(u@qG8uzymT-v`&k)k%TvuHV{lk%%s~EZX~r4vLZpRy zY<|I$Ch7P0`)4sMPhgC#w|v#8uFg?=;D2I*|f$J+U%)nzOYt3+#gE8)TKJO z2Yel9XSEWr-}SLjk_GOxUE{&D@^5)^U+& z(Z>F=#f^tFoVcG>Pc3^La$P%iXmy2zb>t+H{NDV7mq<~Q?h__A8FG8#22x&7a4W0G zNBB^SEi@NfO&{pil|eYvY<;1N{B=a7>{<^SI(MvoIh9 zkcdkH9qUrY z7jSER5Vb|l3%c}1{3^n^!MM;8Omsth?Pm!CS&mDv@G8pfowN>rzgC4BPj$c3q5ZwM zjP=s5(U_jFGD+qG47np#?C2v0_ezyAC20*)um|6?2H>^-!x7+1FfB&|Kz`V==?`h@&C!Cw)t14;=dxe|DU+j5*2iY{BItq z@PFc!TK`+A{ZIV#zm0qQhPt|ja*+B^1a#1?6nwj(;BY7P%Kynz<+6aK(0Stq^q+8K z_3t>Ez}ifNWM>~Ir>Q6C_5BTsHvdn@8F7b|ofK-copvDTPZA+o9X@!==-y+%IE7Xm zdVLj{yS#PDpd&tf6Iun8L1yxAL~fG)a=U^4?w|N=v&hx^D?PpcgkQ^l$1n5Q|B8Vz z{yPBvzhd$+6qMIof9Q8Rz+4n5DOzz<%27WN^GoCssKDyJhcExcR#-ec(Gocnl=It) z#=mv=LH~-axr{IF28(GMes>$rUR0v*NTC8Kb-IyI;JZW;s8(w;!6)OM9V=6})C{W$ z3%@HDoG6!vi)tfaz0N!CHva3>wj|)pT;O=QBfGVOE{x8b={3o-?|U_od6#+jJR#Wb zzJEcZ+tB&c+v$FX@Ui`M`)=du*Yln4pMAfZ?z@+h^*iX_G41J3y+3{Ke}5KI!hJoM zo$S27`Rse+u{!Dd+&`Gz@%r2H^}c0Hb(L;7D?+URr-4~5qG4-6(#A2KHo2;PE05fe z#8J71wz4$=qd&Zv3A3NdF$24QxS0v3ztB;iI%0II(EvMT&`u7!c&kwiTf3fFjyoT% zP>#AHjJBW=>}<`nJ#a}IU$v?te!EzZ2G$?WhXzpoWTLfOXrA~oN&xyXP0vElRIcBa zr)heoU*ViDv7LEvn$X(t6AV$$uV*a=S=Y4_>{eXb)LUIZ^2-vYwyM%;KZLs@hMUEQ zg~rK*Noe|o%k+uMP)d@Mh~;TeZjn)rNc?RY{B5d{W1^F(l6`POlncl0{9YiF*jGZ; zpGPUhBpa51-%ucts9~ra@wcV%w+HyQhYpX5%|=%4FUHA}$a^UWBD#Dp8%Onin3C?j zHQ9FZ&a_Op75T6g<*|*!9EiYhiCYEJa~@vN?%g(xP6|!YHv0C zG0l!(cb<{iDp7NTy$Yn->&f*KtWINPsKP@r&D*pmK=lCFtuTv#=m`#4#LEm=P)16! zQ&Xg245vm}g#X15a@{0t`UEz>FnfH|uuO8Kg@P12)nAMhA4SJJzg0d%u1b)fj2k(F z-@r*GXFfOc;V-6^w2so#qnG=BIV$nD#r(G?kv3XYoo%gSXE`z;c2 zB5PBlYJ+GahiMrwfouSvtST-}dhcQwOmQ@5O$&X`y} z?6TQ-)X>JNj$-8rN-?ou{>5sEgC)C#|An#s;uhK}Pd~Z8H*xX=i+ITnt;pz@6WLi3 z=NjV@d1%TKT&?9XPvD4rFLWwe@VzC0V2nJ3MErtjQ$tXQl zMtZZkh@g@%RR+1gI362^%|^2HFCO`?e&)aZiGMM@grtI_v#cYrHTHPHlKI=4aa+x1 zt>_PJ3U*{$YppBhQtB;bXNTezk4GCa)FnMy3fgirBi!|n65aW03r%-aC>?#>s>{Xe zG_LkM6pt)I3&+_5kz`#ZGyX(<`Jh;vpqODnL(nwlm9H6i(zH@vZoJZNq$LY^1keHS zc)$r)72MIao%%*`x@)XWQe~3s1d^!E`4*;-6D>GY=O2cd(~_7U|7<~&$IyjdufJsb z+}FHx98AAas*5q`eLy31GgyBk#&O194SG4;DV{OqfV z1IYIKoyMro^-=2exKk?SXGTe;#>;sq$o8YpW9}pC2aXrdyvvRoB)ND!%Itm{RWT-d z)yM=J)lxl;BHX0!l5gqFVsyzkD#?Pnrxv*oE~R81-DL|^$c;iWI~7@EL^(}u;t)MK za#%TWEqEw`EHbzp3vT42kGIY;cXoD7mqqR{Ii#OjULiT1kwe$Mw#TAy_-FNqMZ+)w zq=EWaVIc#|x@=WDY*9DdxtB9c2I-}iR)|Rlvn|`yj$5=1qg}Px;uK2Y((C41cqAmr z;!^3xTF@nE%HWdfdLB)D-Hcn(cp>N~Dw4r6Bs3K`_Y6OCx?RQPi^ zg%7=EE)$&&U1oL)I%RDkmDH$;o@w=LhbA-aiHX>qa~+B88g1CEbIplO8jaWub9HKD zKt>$Keg;AWrz_qMA0+jn(febX0o7j7z*;0 z+Ha5)(oGFf7|2jy1(>qWSR@WxLBgp!&BBINAg0v4ik0{VT+JYd^~ih+KkOo6NL zUr>IqH`_W){`%1k6Br5~8+Akx;OX6E9!LYPjQR~A$aCVt|n03x9wcjBU6W5TjRZm0DrS8Cfy0;9M{2mBgHD;eNOYFCb zWWzOHaj7}9QMaBQ-%IWfU2AMM|DDut80m$3$3&tdSnqD}dugk&2P?7|carg&etWgM z_3x3qEJfoC-`nG&fC)7?LTu0F>&8R#*^Qz=Md=JB?kv03)$8izlBwCE&R(d{uV{fp z@B&dT_`9qtrcu4r!7*rAMEnBYhg1Ez-^B~Xc|vQ@wu!hI-gb1{Vx2Qh?R5ynM8hJQ zSjueRv((0-`eO)OeFUrWes=EO%NwW480FRmSFRY;wb(oNg1+}Cw&;NESJ!GDM)oSl zKV>{|qk~CS!0PT6S4(^0>@jOYQX@z}bmXT(kR<+EcgSTQp)KYJQh52b)u~ zxhdkGLxs?Mh}~$fotL(&7frK!eM4c;5Qx$E%AIshtJO`Td-XzH(T<6^dBKU_yEm(Q zF+=kwj%h;;kp_uk1Tvj?Pgy5TfAqc!4MvM3!WB5t(%Gt>GfnGt3dKQV*p+kRWms(K z#g$F%(F(OgTcrw4pbRy|CQ41)n}+l%gzBQz5%2Mix$^F7eB|Ra z$?ka{3PD3BCgBrwbKknqI9e-=l-EvqMoMBf=38@Z-go%9`L&lLbRUg{$cTSU?O^Y6 zBsWXmD+P^tk=UMhEir}2UYn3_O|^M@YPt|2Z<%t1hDXHAZ{XB?;?Td@+M^k2>jYfN zY+SaeTLwjP!qRzUoX!woyDeo_^I3EJ1brPB%aG)bv`b!M^kMqms~8OA9<|dN`e4G* z1^XWs1>>|C)7<9Zft9E_0L!pSstLz5C}G?;1BM&H4sDsJ zkQoC$dHwda47s|-z9t6jXdZ*MOeK*i*do#{Q?Bl@JQIwQ4<~jpV+q@*& zKK5Som}}ZT`_IBQdGwTT%pC@vZP$)`Ia7;;+46oV(9AG~@GU5AfGzepVCtx7@Fp{b zfmCah6X2A609ZcCoAR3(Yi;o@mlT`2RlzCkq_zu~J4&9?&n$0{+{)<8S6;ma3?7xU zj!vMI4ICv~K6mQdEfE}j+;Upm4H}hAQD-iuiX?gd1n+)*!AswyPtqayty$33WAAcp zH+WRuI*<9Z{fHtogh*ZIxOM{AF$$j&&75Jt-3E4CKB`rWQ8#THMN7$IZZmLeJ8@h- zu2oJ(kF6xx2R>XJ>`kw4M`?tP@*Q?C;~6lw8kBK6HRm`i??#Mjrr6d7wX8X`&^7#; z9?Y*Alx)ekJoq&|oez<=jAi*`-?HY`LUEi=6xm~#;>CPtNYX6m;<0n7!@gIuY#w>X zKeAc7cH|DeS~_zaGjA9qh;1M_7F_TGUo_`cPn(AhD#aR-w0*jDKe@x|1U?m;VHSdn&l6lS`X>2!%xL}AU$`!s*^v(s-9$nN0 z{vq3ldHo=MYz@h_;DDFHMJnxJ5$mvd?I3n+DaouLt|#NA&Ph$5c_~o~V-v~fCoT8# zYn@%kwe2uYIN7*KaA)g~X>N~2Hl2{VPwS(`qs!Id*=%odu`)}xBvOaZ=pOAY-E&L;n3Cs3L*z9m2h&SkQ>@w|`W89B;w_3uqbrL1pRH)ZS>OSfk zu*1if6Hf*gEeFnr4T-({!N)?OJ~8)1H+*}}29-M55TRmkC3zw2iTlhXYRoB? z9KWt9e^Y3fcku8iNU4=mpK_!{CjE_|J76AR*DhKPok#Z@=hFX7?_DKL8fJS5-T9E* z3toHdTrKT~8~f~BpmE@QdlbC#IJ**CG3ko)pMW-ocX@w6z^Z!|{H_(Q{n}%9mN~~q zB-B3!I_sU5q(Zz?m=0ErkeOItUzjW=1nAc{7iO*M8sDpN7>LP14ljukga3hz(@dxD zA`BCgVpm%eABTBat*J~(TqRi&*}7P3=r!UYV1~iMV`x7L8Ss&v z%))G_Aa0mUU2HFC3^ff;3z_RrUhOH~7`iS7JC}vt&~+Suj6)n0Jcb6qy%Y!ujY0$f z*yWT=hw%AsE8mK@s1nz4+1wlBa#kLHKj=@XQ%%>^xKQg;LDNkX%-L|k#$I_$xVP= z7yg*g6fjb7QE2zEQSb2yS(#0|yMp|~p&t z0wbahz5~~e-q20l>sWML23eDseW)Oc2lgfJj_lBETpihKurt}dICGEzN(;6V-;Uf+ zW!yTMS6L{cukP|~XDyF~@z6^g3>ktrS}+k>3&oHOP&tY&PA$KM z;?PQ*E7>XbLMRw<8NZg!!g(k;u7Ye;tTFTi(U=FP4=%T(XCjV*Oj&#I}pO z8?&>M-}oC!`I+n(-a!Y@U88 zGl#sclWUA5C8|wty_&na#C^}N>e&rrz|^Mr5z$QO$g<_z7Z_%Xxws22wkg#&9;T1k zL~$koj#wh9p)#}W<8JAQgM6%^FthBd3|q(aBEOY7iCp^VC}J;!jV47D!As;wxn5t5GqRO~SOKhc%cFiFkKHIk&F-8Oad4l^&5Er*+u) z9nvRk&k@5!vCqLmP8DW?GpwMr<(kwE?N(2g-vpJNtN_T z^y?ZZt>3RzJW4O*j`RvU|2;$4H^)Ja( z<@@fW1gshPkW@%Ns*(J?TqS;22beTslf*@DtGQ5GuDzU7s|tifa1tjm(d%nAmV&jH zb8FRsDF#RMRdb6K?1N9U1og94RnwyR`BF5|69gTcp0?N12j5j8z68%)@Kz{uyj4!3 zyLls2Nu%`I8tlzwPKvv&BdAFk^i3M}-`C>Fr}4{GBgqKFxk9W_7V&f0)VAfLnF$QJ z=o{Wb3b4(rVt$(xdh|B}9P8(b+?^lp( zc`F>Hw#x_jWRnKqq9X}XcC6sCBwR7O30-hqFkG+=7Mk-MHMi3TT%!pIc(^upk~7J> z#iOMOV!6Iq3C=fHI7-*%{l@*mb!NS`=$`XiVY_7jIXZ>Fi}Ti|Dj)I3s8IAX0Rm^E zHF$9;w?^GOrJg@Jkl@@&X1jU-Ga840flJG}d_gC_M$0@)iek@t?C*w~D(3M6X3xJ!wqbQ3;L`q}P zAx))ArAlQ=rATE+rNO7er?Q%`>c;QZ>E`Wr?}qJ`5Y=uG$8y1{&uPmE%_+`F$!W@o z&8f=CHc2q4btjgbN}vlU4X_S43;+ZK2Sf)b28;(V1!#eBz*u0s6O2p@q7Ov4M3_W4 zC)o9%6q6>CSd%J~Y?FAC8k2C7Qj@e_UW|C<80FaIAIfpdG0SnvvC8ql81vGweBK$) z0Wbl90pv@NqIe6uk5DO{xvqZR=999qSxj{MuM)CUT4X$2g* zmXHZh78Ad|>j&`ca;39`W#I&U);zzh%feCltZr3j3^}c7;Dl5b3_pS^+96Rghb`-> zNldpvAmtUx9wC@x$*g+CB)VHK&=vj|6&F9TcFH8a+boa@ULG|Wz{sOxT0LWu)NP5v zg%4s~GYRUJ2~>s8M;*YA;=uV)XdI7BRSCP=f;_=#Ql4_y`>)b@Q^4_xL;=MD>J^^0SbjI+A=#dl_Z zRq>TME9^Hy~pu@_qsBYar zXLvkRX2L)ISMVu2?o4R9M2);=j&ob7{f?1XxWYRH{dAGDxR#6)dMDLO)-|JMP5mU1 z!j@J2LXpq72#gcDnN7_mj?-HS{lM>!VL8mN2@JT}OqIF@O**zU6J{O#@R8BD8H_Ih zPvDYb&A4UWX9HF<21h0I*bZQp{`PRAD{sck8C1&#Np@Ym)TPiT85^?a^JN4`o3}Zt|sfB^*!!8g2HHRW7ZHzn3oPE*a z+pu??L+FwB^_jz0ppAm($FrmcwsrF3Lu&G%V}tJnQOr z8Q(H9)480k*ss|0)@aKN^Fh%B08=ecViMl`km3E+pOceEIFFlSi+xMbo(0Gfv{-7$ zQ{|@gTt0_2KB8>CTReSS=ymi)#a>5?$_ReV*H@nd;14tZhXP!bMEQ$Jv()1ztW*DQ zlsIAiKa@FP{onLQQOO9DHI}Z+WeX4B;$XMkwcME-nHU-yF4Gq1G)*+Tn;LXI-kUE? zNzxT4RnHZ#m&l^S%>NY3)hn>|9$VO-;<(B>HwX>Hg?h5eyF^`)|ow=N~Q$GtkJWA|1U%yJ^Q~U_UPIF zp-aL{Ev_uuDy=L%t~>%&kgpp^ODbyxu)M6>(37JjrR8Si_FVqRx17dhBeBfaVAJ&D z1--@eG+?cDSy#8A&WUhuf6oGMXJ==Be}5sN?wfIwXh%p3+|BX+Vr7i?c}k-goG`rxtq#-M zLhLqX1}5PeI{ZWa;#~^ICO2Zz4kMgEzf;! zg@%E=NwWR6IG;7j*VLDb+>5t5B9fZuLvcQ;{s}YNZ69IlAH>Oy%beUz*}7C`4P{CG zG2wFAh(;;LA*N_#>C%4?Ay;KIAv?CFF}pCF!Otg4v+Bw&OM}yDpOC}snE`tueqEk;!h zvW(PhqGn9t)*l39i6|X|`cyyA=eP!50}3Wdec%r{WU@scQD!XTg#I75Xno4Jcbz&Q zE^f)PaYqyU@3{gOz;HL7ppmiT&f7P6ab*t=S(dTyN_9fDWa3w{hVbw9KvwyCPcw40 zLNd6;8&JMjg$xfm;DHXHL!Rh+VJ({5*j(UxvqDWju$M2R`1Di{uqjAzfBZUuS*OJR z_zN!8uBeSUd5P8+3dE)#Byv~VhLJi92E2P7(<0R(*CJ#6zHHM+(1-mFrfY$0fr6Wq zb#|G$57gIz7=^ML(2QP#^L2UhOd2eGB4RSM#N21!M*^z_+lXHC(GluvjRiU>i3E|D zNfQ2#!pr+rc?aZO#|JN}1`-=RW^1vT`hvg&2fBHSmJ(6L1UkAnN`YSgg2saWgdLsK z#Cok6DcQ?>lZ=9_gj>0UY?M$c;`r+@%EE)xg6#yQq4_KB42sSxp6rke>iYxn`)o2L zLIXt75F(_5U?F^eGW?K-sgPh+q%U3gQN84kLTGDLpm~7L>rwepvM}_QUbSX~G;j= zU~>letXLfyXQ-C zdk8$;9kg7TC!!X*9CuSn0eN*p%2chJSlkw9N=Ou3zGcSg>pmlEWeki-@_W?tkJpc? zUc=y_1b&~`b;u`ce?I)+iUOXnULngr zn;9&KOQ>EZEGL1_4p3-4Fc+jm0llUaa;Me3w~`k8J$GOl6sAQ-#=%JA2W)K_GN(vs z0P_?hO0X5^E8;sAR_YGFg@M(RN43=VKaiT05O`dDZXQr+I4j}@(aS<Iim6v1rlIyp$g?9Vtay}IYQ!vUd>mPHuyZ_n z&*A9A6Mz8{>u91d|8R~-m8yjZ>9s!f`a zBCO!RAqVqJC96m(|7flqmW%}MV4ZA}$ulV87}wu4od2o=D$+Qcx6y>XDec$~+L}PA zqk6IFq30swd!~(Eae*&vmquVz%3NLdUfB>WDbQx9R>dt9=S1R2{^#17Y}?3*FDx(X zEu5aF*JZi^jr@b-ta6hvosMXg=`P$WV`P?8v{^>v9I!jNzz-{$kPfe+@-xl3-juh! z-D@ml3;SMpr9YR15(y_SJ!y~pEblp@yOUs$`(`bft01d9%Uu0?Cc`;!|O3 zWn`XT^Q#VCd92jDQM0ho&sGBuCUAsuOYc@wN`{ zhn~waQv~Z$J~_mb8%*%!`KIbwOigellfa}i_}~J5$O*YT7%7nyRlHAA8XtjzZYAF* z+75}>wKgkP;nj6SQL#2-ibTC8EhlsEUQMbD3CLxt$V<{!mKs14?E&Cj(Kj z>~*P{a6&-Oq=n5ah0UKc;d5yA1({^>ZxoF4HpC7GUcjm1skv6QU=PmYd7<@lH13A> zMPD`jjjIz6Dl!GWYb`MUum)Ypv$YC#xhaffMwd-YZl*qN+8(+uNqnUx+#gzr^%1}B zuRW}7xdZ)~Pm!QM(C)#ySVBlf4PWtZW zDC6`he&+yQosZT#YWi^2K7DI#ZD_+qa z;*?JkL<;z)eCpEg2Al(}od1A3B;PABhxB2`AP_GJUGa&XRyG})sVaY9j52M*h64tx zF*$(lp{t8sE5}jt7ds-a(P=rhIBSkq)YxIF!wGqEY{*-kmH<3#y@iOD{P|0$bPtY< z`e|$#oQ*%qN5VKFC*I?PY^y>SW>PgHh!7WZcgW8A-l}Q7UKa$u<-YifIC%InK61eU z@}Ba0p{|zkBbXx^x|EHQv3R9dEI1S#zgr+&PzUyA(vZttfq*whq$yIwRB^C{;L@(u zW^y7=ER%53X>;lTkM3zS{d3JC2z0QU-Bnh!7FA>NMG-&(Y4f%lIgi@qovB3l_^oVi zV>ML2{U1{0M5S@O{0OE4k*+YD3HFjdlABP$FyC-fv*hBJPDufu7Yvtlv28+T!-iHx z1(gs;2K$x-0G~c$vp!*JI%di^8X~TEBy+H2bcCMSqXx87GF_tsott&^K!`H~djq{9 z33C~m{#$+bVf4S6JdpZ)1`MnLqUD} zPfC3({?(K0zY<~hcDBz?h(CVVIU@MDMX*Cibbh$MMew(d(F{Ei}3-Z&u zu;bo#x9w?P3k$Yp7luJ(WzQ)y<32U-VK!Y3F$@R>UQ&ZX-Yc9W^kAjoFKMT<{p1Fj zrSoTxs-^qn1Fj6ssRx8(TNo)#GigR!#x^r&t6Ii>WsV=WC~RixX16H($}BoH7)Xdx zG1~jAL@}&zy9*jyj81#U;!GfC1Tse2(>dGcjN92&gDNw#;D1bAS0N~ius*eChLJ6y z-}`COB+3`tCTbt(q~wFpf;zH%jlaW9Yq{VNx0;h#v`*S2%BxV746{DK{WJ40?>*LR z!yCzm_Gfi}&4`Z%PI7{nt&@|8q8O#Q z>j+CYPPxr2T3%AKWYc#KDM2Kz@b)0?f$s}ZjGtGQVfr+P@Fh_2b$IW}YBCee&C)#5 z!y%&--$57-945CI*P}WeuLl{=8#-I0+q@uK`(sX}3?`dL)&ySA#E@VD@e~)FW zFN`<$`?At;H!|&xeG=F*{0`U5q>Mj@?Ewx3yPn8q9^s$TK{b}=&KTb^R7Kw=#=kXh z=Kp?D{I4w4f2`DGMXL7yTP@nPbk-zc^xrjTNN=M;LDk!5NFfbr{(3=xg8F;>TOQ2* z6%WFTU_2BZG4SWlc2d@07ogMI2=5RTT+M;@Ku)$d@b)YJ=X00jzjV?S-2X-O=w@uP zyT5kdtcK1_r*mdRWu?dycyEvSDe+Lki8(qC-$Wn}CeToA1t^x8-^S9z_ zQM&p%jtazDcAo)P#Ds>Fb;X0sLgcl}rwxsr4y)sImYb2=Yjp&{?q~ah z9ejtjiFhZzC5uk@Yxug?Yyr}ju~P;FQx$_YrP_=~ao;91^M4e!0&G5N7Su6rNBxc8>LJ-lS#t0E-etH@`_B%UO6`-Ov>t3KN8SvAuvn zC{uGkT#K7IRy(-V-H5+}r;!xG|5Tco@gu3s8{*ttH(bPOQs=Zr)J|)L7Zn+hLRlDP zY^r`Lk^+fBsxra`!L^hnY9QPdHEL@wzk_s|gs+qo2gvZ-@x*|&=BcFX%y$(j(Lj4Z zq`J>HSn^yYR)%`iLpbF-C71z*;qCJ8lMXv(Sais0zKllT~~z)I*uc5Dw`fM%cRf3_;QmNOSpQ zD#-q15wc-AP6w>R$UPh*BAq9^C90d_Va}$yj3y3`D%w?SFShR!((kH^p!qtO)y|yAUEPPDXd`imh z=QcaQN(Fh03rpFa*MY{EqA=0PofE;_CwM}4RbAi4R3=TiFHSa`gSV+|R0RqDfLRiB zz&Rt3o2;|T#12w(dN<>#JMip@O*}guS+5=f@CY&q)u$PVAe$^rQfCOo>tF=>xth0db0>0wVgVR|| z%8bVnVkh>Jo?fSlc0xSDA4_a}s#@=N2Fwlg5fm}5EV@XcrO5iE!McU=6jy3>0v#~a zzk<>&ZARF2d%sFGDkU$u=USOPCP&G*w$np;>s$h5ssMDgipq+n0FS3tDjP8e%X~O* zCcwAQOX)2-KtK{3Uiblis|SlPU^2})6VF4K-PdG+)9v0F3;Ggs5$%}B zx{?6S2($_-g34}=yArTW7!R+Q_AK9H)o4VMt?M#oy~}*xDTT$*tv^KdON+Lg985YK z8eF9FEFW=uumjDgG8IXlCs3hvNWkZOHV^Vfz} zJ<=e?3o8h#;;ywi6YMlhNBX{a+_>X$^}Yv3l|)<%yFGg1Hs-t>=+IV%iklKq_L@*m zPzIg9OrB|ku37h~kD5cbqy9yF?`WxIchbnvQi4DuYR6J&3uU+S&4Ry`{z5@_(LxUX zmo@K~=nMHrcNdN5Sl?3u3ToB)KkzC{|CLv9+To`IIZV4-KlfPwVlReB_MR;W!yJ)q z0H19@Au0@?f;|i+a7ZP@6yE|r)Ra~n5K@z<7orwYn@A311kx5J<|gYc=*b%{>J92G z>e-gM`WV<69=9&MU5yuA4Q+iUdF$FHjVFJ<-2IsJ3L}ESN_hI=>pgk?!1v&2#J6w6 zqtD$iH|JaC7W}$Au*Y*O)KJlKGVA(z!PnEre4Iw;_xn6JG$*-Qxp4JC+Yj~Ot&|cX={Foxqir z0bgwDJPb5a7pCVwjH$l)#hBIaZ{V9g;EqT$RIdz6BWbFsaoW08`_Rtl*pI93+V(Qt+jwpuKQGPwGs`jzVaV0e3$pw4C zcP$76^$EZ51ag5=LcQ4vTP4s`hK@5$sPZw?`vV`yC$LC^H};|@w&#mjqHg_H?o$}6 zMpV){n;LivyLzy2A6cdd=#XNgU;U!wJIfX&ec5ef?-r@jEKF&rSn&Z0d)p>cYrqVV zGQ!y}Zer5UR!BI(gBYcPh^|^CSOLatQckY%0&VD~=VbKNH#0CR!TCn_?8Yv znH08_Y|=GDS~t8diaqfh0W)#KEOYbd9XPfY@rBx0X zg7cXvdo}Y4sWNepba&p;h9AKlkGd=orxI!+ADH61eG#a!qK}P zJuZp~GGQKh?RE6Fj*1@N_mB42oWZqwR#Bc9?aWONBw>NqP$XB?&B}u{p?AFANG}m? z4b$s%X|Y|lT&{@5qoV1O{TMw>7|dxwO)cH{?2^hOx2mns6+S!!A5GHj!$O8UqQdB` z+lB`&>vbz2i0~n~tG$HYNTGa`m*7=*DguQ{G+qgw=pI!Jw4>E4a1mo{#}W<(M>SpG z)W^6ZOV;QCZ@&jcN8B03w`z=22*Z z0bkBFiE*my-^2klrRq*XBBo6Rrv(F8%~WXHVcuEW&+(kHFhe#PUSmM&L138!-Llwz ziQNj7UDYB^n@^(&&m0xH>6f9#6JPhpr=Uotp<-=9JD}x-ip9nUpCvNo84Mi5tpmZx zg>9FR{9D9#;N;y6`$W&V=cWEH90cCoiq?Ma#@<*)8R{{gD?T z{HfX{m3hWPflP<)%Vo8q(K3_t{C>$F=07V%Xy2s2j_NHzJ4R-1?9H%60As@}1NMd% zp(X7`rekvd55~?SC=Mp<*7-tkg1fr~cPGf;?oM!b_XKBf9o*gBCAho0+u#m^-7N3j z-2dOai>~VGUUXGg_j}HBG?7QRXX!0g1G$ylcrVHBk1aC<&(I_}fo27xLWn=k=5Wx* znwi@A1j4!oz@6}qA-BA#vWySfeck)S0y<{jP;whrB|^-xV}79|wd3g(9A%z}1S5py z5sC$QWkL?6pQ+K9>O+0vRJlXbO%{le)}eYg;fQl*c#|}zm7{f^rp0}DS3_uw*y>l9 zb4_ExUx@Z2Hh&%(of1d8{3b56u?@aB5;YNiG3_Y^MX{Z+{Nl>W&zKq4>AoUjyVN{9 z!g*3^mL3J3S|Sx$SalrJKkl{5xMW!m`YH4GQ~MAQIn%UBDyi#eu0lQVp6K#DD_@}L z4O24jvZm^U7<3@wdm-3q`>Y_=BOW{A4j27LtjWCUrI@@)Xirsg8eiLhoTcAbfYO!) z*QLPAyFU`q+BmdHJICF@ZzTK=--3(@oM*Y5`JOM9IS`{->s4&zl+N70=YS{jlZcVO zFEZZaOM$Um67jItiN&maX3*M&0Llw?=%H~;lC-5AfGe#^lQ*{5XjU=Bj}}Z;wS>n9 zZD$q1HmO2)L0M&w33(&d(4*0YkgjjcTCe}7%sCdf(=Arew>76FqyX(PVAkeDa_Ky< z5i-T9;N`43pkIaErB?Tf1o6BE1R;dD`-P^+YBTG!=kx7A>aVkZW@y7+(aXum(PDT`t(qU6d zY;EFlTBSMYH11r0zPuC4^!|Io2^Lg2lZyMfk<}Hp5H?NZc(w;}vqHDCxaH95W%FdN zRl?iK0}0++1#LIN*RphkF+>|zG{bR!_m$Ym#N zw+sC;#1?owG=pe2aq|G-e976Z^i6ZkY0*t8;GOTTor{2#2ds0tYnG_sAKUING>$PJ zC6@7Km;aueP{ry!G|Jm$ONxYIdh5qWI+2d?y0>VDct;mOV|Hu4X3Uv}m0%^~*Gct+hCP>u^`qH_6Mpju}_h*e@EJ2-mp zSLVp3;59D`XGYcoefA{KE`kGmnA@Cf-m4+~O75)irf$7jI};K3>zy*%T?Hf}c~<)U z__T9ehQDtJ&w;pR3bfgxvY>?!0f1k^5sT>1+RXP;^u^5GjeS1AyJIHCk?kvIr$`hR zmyg0A>3S7)I&-=61Xu`6VY3SrUci#}V5BN*zfU%&rjfSRe|&=OBjU%s=G!%6?Qt#Kxj2vx}-JJ`gM4}^QsCD6WFYyaAluRY!v*GH~Ukq+t}?VcmjZ*bDMS*x{(dYujq2D7KJpRxDu+P0@+U_#Ok5N!3H7r9)?;HNbKumQ= z-R7<22j!N|>-3)))CAOO%6s5b2k**gngNagJp(N7nv39Zh{SQdH`9D=f|c=~I=ebk z55nVF&V`W9^%{|H@5)6*iC#{@!&ml#Qpa&t{|Z$A&zukriJ4TZtU-WTN|#dW6WjYu zc)q;8B^YCiy;R*)mB42Q*(7C~aD+b;$UAB`j^sj&us!VOq-L8@KzKosS8LCF&9pJV zcv2&6q7=6A_?vf5aZTbAp!Pl9&~Cf0D5I{`jyZ~VO~z%4Xl2~DS!_n@E_xo$t5S!` z5vpTAyETietH?7j$dwRUbM)Lj$#q^DT(oC&bf)WE3Sw*ooPGCZ=S@pnr0B#K9B}tT ztotf61qxx0+n*zr*rFf=7$|V#WhKwf3v|)@%w&@|PvnXuYLKM2X zK#zNdBTt>u5Z62swGTj0*JE5Vqvqg-Wgj_}G0ZHc8R+Pa$!7;bp5ZEqd;^XcXSkAD_;!9P$6OV4B%eY0n{!94q zvN6*oY4j(LMLC7#QDA4zAqzQujFLELFv&@t=LvBVI=wg$N{=!^WNj*WhIc86*BLck zq*in3A11q2On;~u8@#i^v>98BZfG`75!|4|`+&pKwsd#3%jTYt^i`LjfKpvIY=Frr zTadzbtfpSBz}{z)Lm6A{5Oe)v)<>^2ZT;&5&I(RrUYx?CskZm-CW?jDd04m7>_b8Q z3Xz+*&QwLCt!&H%wMl8Oo;v<@%J(tY(Hggw)`y`{EuuL(rid;zRtacgLsHaPZL^SN z2ORtH;$oXx@3itUJN=Qdxg&wVk|IkSnT;bR1gk635&%)H<#_P`@t9Byd!e9$WTBCD zdJ0VNCBhM{Z=kw{NmT`rgmO?&@+zYItO%b1{!3ozSUJMFKvDg6qM%5S+9{_O6jjTB zJ#~x?z1;)TY3?USK{;y5SnnoV!i^+Bc=MDT|l7!)P zJyfR`*K~=n0;Y`w-bv=ISp#P%jJ;Y{-OCbPbj0^BlIePj)w}hT3NDHAf~D1(nC1%% zn|Mo>6W&)16IJoIcGgtWMYG|A9;8Yt#?AbM{}~p0`6xFQO3doETuDS%H5JQdij?U& zWe}_Er@~zxR)pE_qur3}nz{LC+;q zvpKw2ohr8H&>ojREKyGLLyh-2_q!yEnukU}hUePSP+f5jyYsSfs~K^bg!4Patbc$S zxex)+TpCHL2ZfwXG4Vt-XE2n<^*v-4`wF8p!k%WTw+S@fW)b1=U=kUi{m@d3ub?$f zKg_2oO)H2DWG@;@<|NaPM;Tx5++$k~G_j>n&CJi@T)#VD%3F`jv@^(J9XhmrrSd$K z*^WsW!PU9^EeqOrBrcMyf2X|K|7J1iGZD~02#9UP<#xX*MW`ZPS9j#h`>4Z89u!QHrc9-G0&R-YHNoOPa05(zU)ll~m+akJpGC%en zzKN^cEL}7dv2sLOdfeQDY0M{WAgLQ$+O}>?f%cZRo5ezK=rYXMo_0DT&LdRx)20IH ziMXKL(eESp{$bsSns=9y1W>pTc~yrenmWBMxMhF!ke`>HGxH7+>wm=%m{{K-{`bVH z>j)A#LzQLRF503BY*rd|1DlhyhY03-{{*8yS;nazQjJu*6h5XB!p5II8HzSkK&{&~ z?^)}|uyE3=_AU9tD8#PLafMVa5gRnTBUx`^K4dIXF$&Je-3UUAw+HYfbcMaD_#aEX zX;pj-79;nQu3|0eX?95p!Ur!2e{Y?5m{~KLw{*RrUd{W{W0w3H?fDHRf)>gHE%IuH zC|h6_e;qs*B&V;*wGyr6-ONuEZGg>#oij4f7~<4lhd0Gi$j`4;*_LO&UfVFMuFoj7 zyF9YC5WYaScPXm*xo=e`rxKy3e0FzV`7B}ykvbGs!2hrOn^|dcqrM7rbF`3EdhrWvuMaI2@!URWg zZkERRh{Jjp1+6IahKs2ztO^9xRtiea)6ybdkxGQ2_a+@~Mo;*8erq{~!|!AYJ3?o0E-U$=@s$>BUOlf(#><1}R6*G(h0FK9 zhpag5(yCi1SA-%%Lh;(1vlya|ojr6z`=z=vGyE*}OlBC*voKV5Bt$CJ>IEB=zn)T_ zS$)+67UbN#y5yjdqbH(5_Re2h@LbV%tqeIo<)^ikVsiO7_nhSKHyrpu*lsWraIMsY$1~yJx5rhP{MU^ z0N!RSQ21~w9Z?}5#pQRvYul)-I93MXf`CC@-s{ATYFk*Ec`bGolht=5MzW&|^5F`4 z6anLIjVI#@wYmL!wgcNdOr3j7m2lwB`1H- zf@HjRZ{YrMO;`X#nkzJqaF4M|@S|?oX(g@+Om(I7Hk(s@awa7MX}tpUjYUK}qK|!6 zd@u%f)ku=9!B+>4mDUSdi;S6P^Iln(Z2zS!T6(W|P&gY%D-=fJ_+YxrMhM6){SfF4 zb>c7`?nYcvOVssFZT0M$T6C$?%i|4W2R>p)t8Cg6OL(FVzU&gLTp}p+VY0qnk#_0^ zU)4<#qxrESux#cd=zi!MBnx?^jF=I?6|M^e7-P;OJl#vPgQ_xv( zh*i~9yT19JDSS~4YbpSfW%4C^-u0(S&XLu389iE0#XqCVhp9zUwTfydG^u%;IB7Y& zPi^%4QgzltrBeOPe}^<&GiX-F3DwozBa?V(J8eUvs`prMLt7~?jkC`%AJ(5#cD?cl z3w-us1{P&$MC&b(j&dhw!yW_YjxY$RHwD;DlvwNTE|j(yCNnx=`zr&R{hiXu=VwY8i@@{cqAru)5Rdws3-uw} zG#@pB|6A`Tezsjq2PV``5jmUwSgmvmfGv?Qxgo>#a9SE6{Drsxbb{Wfgn<0;Y<&BosLK1%=~zY zzXfz77iKLUIby3;q0?s*lpUUS75!m@v_!@?1~3li+G9DStN=k4&R~@KBdo42@86=x zPppLKADP#j5|l;VGdu(0`UsOV3C{qGag%HrBVf*(<#-on6 z8P=mXX{{p`YHNTR$G1%6E@$Ob{#8S^4ymjL*@elS-?YkMi7P>nooikRlc*GK*^(2s zl2u38dM^r>YEh(yiLnC;IB4%eVu^Oa(t5%`kEqD6v2iH9`NBZRZ}d1nZX*Wx?;-`B zq6PnwU9;zo$PJtNM@5E3YQ^HiP&=vYrQ#b#I!XHyG3geQV^t$61Xd_SfqtRLwOz|6 z`5;zO=W5mDA@n9M>!6R^VNae=VFMY{3ZSge!6^eErgwxFQ&KNJm~nGDz# zO3Ql>%Z|HOb6I8^2pq?TWH5p*XK2V*iKs8!2Z#%fa4~PqeP~X<Sv zVWP%A0E~POX5*lCi<`mnM1-03{E9s98i(O5QkSE9NyG!&(1byzN3nRo+Dm$|UA-UMxO=iR^ zxpRWK?}O3DPH{lpc5#&BRG`4w7gy0{Q(oIcmyt8_;^vG_pL~MD_4J2$YnPEC(*~!| zrZL8WF`YX%!q8K6ii0m>#U}{bG0LRJTIa(6n$nT|>e?LaKl{em%_f#p9u0VhC$Br1 zzpOH}wImvJZ9NpP&pA?jqYEahYnUzT0fOCZE^QQHPrNyxQA=e*fLLA?h-pr&_|D{( zLy#1Z$SWd=+QrOZUelHH)G;()NG$3auJ45%J@w9lZP{kPPqFdm&3BWjKE_z`MH-mQ zotP>HaP6>w;~psO>>@FfJp8o<9a2JKa#kOY+jIzN(5s#x79U6CQ)2lup-;O$$ej68 z_{Oc_!cD$gMVA{#Mv|^$Q>&QkV&?23rC#jZ$7L@zK!B5UMsL0W zNyBA%a>6wfW1>MZ+K#jXYo$D(cfKCU&Lc6qws!PekYU}e*{SRZ6_|+yRoF3v zJ*)Vl$|DI~ogmrD*tj~z(u&U?sxamEUdTUvVvDR;pmsdINwusC6Xy}v883JCWii7+ zg#SOcTo9=DyeX++GHY*D!n>?12Q5zaWJli!L4)oQsH!qzP`t{aEC4dOJt%a`Yhk z|1O_1qwrQ1qmY_M+l?1SP~Kn88a|WbBR#Q~d43reb= zE2>dQ_@AD*%2+T}LI3Uy|C@wgm{|AE%OtN(v2c_&04*=ntIi5?-<%kZD-ns z!`#0u!qv!y_W#>(F)R|yySKkcy-~v!Y0=r!%{9^+o4>fj9`wJkZ5UN=C)+kIf_s6L zJ=&Vcpq}h(BS)|o@BH`9^#3Fu%q3??Au8T_q0u#!BZk6eqwiqx4>jK*AHy+DOm2qH zxjs!r({qO1Y4+}R*8Q4efzuo>?_&@g`e)FWGm!BWN!Oqd8t_?){adNj9!lWnU^bI6 z%8jZJgzrj}CI`162)nmNHmo4dC zPqtikK=3l|U+XqIbsI${ zb}@xN4NFzye-`X;tg#J@Zn^{FT%C_TPYp5Sd8ca}%4RY3kY$y`o!1=6Qd!gvBrMmB z5q4%&3Z;k+34CPIBz0<&aN_5)67iM4k5il?o3w8+*GVA7n1aN57XOkjop}j9i~1km zx*DJ*_8PIY-G8`A1Z|~FblB~%uvG7SS<^8-Vd1-H0wralWmNYLpzU3A*STk2)v^&X z)c!W?X-mB_)|#3=NaLgUadhKC2w8zfLJLzR@ll(5#j!tZoqdH8sE}84G<-ZWc~Qk2ie%UN>q~gI&*SZ~Hjom_9-GV#NXH7_ej?mx@sV^Prje)^ z=O@A@e$c86(cacvf5OM}UwbGW5CPwYO`ruglW-hT8vJ8ggh1VHc{h<-zm&HMG9NDW zwQj^1Qn0y`ANE`-wuU3KYW?7QTQ`!&%xpJm39eLYD>;(l>kqcCkI46d>WV#YOqfPd z>$UlpPpCa&N9;N5#D8mYb+L4Av05+p4QbUr(yl~?n8k%PD*a{`rI{-|2!ron7lQh5 zP&fz=KBegGVFUsvP#9Z4e2dLoxULQ~}OEH7Hn0*K3c zMB1)ul&dCvWc|J0L99JoFZy+>GnQ<{7WUV?I)!FAVG@TNxCRfh6Xc`b5~S9$6jlTG~{+*`>ti{Vm=VY z)|;<#NzPP1lk1NK52&mZm)_6=qSKEFHKm`-8cSVuMK8o!|FPLR{Sks1`MUj#Xv+9d zy{WDCUA9jZh~5Ur zSHAU_$(}&H_@s7&O4{|X+D&CE{Z?sPKbbo$!jj25u97yvtB9vMH@opUR=)5WQV+~Y zRjq;-CJmYZNTM+2ElORchqIUV*1B*JvfRu(qy7vQ1C}8m(gGni_mvk3;_(j_BX}$C zJNMF8iO5_|TP`K?H>RYq)G`>xpvk}Yd z-GP5U-Mb2pZ1YJ#P|vX9wQ+XAZbNbpDTd)=4GVx^>$cm-U(68n)V6X}WG~~FcPaTy z&^-e>rS{U)kP5BoL)YPpQncEKIT3^8#Gx3EF5eX2+q_{N)jBOIIzffrZ~rX*3HF&| zkMXd~Qk6>dY(fDaF1>R=ecHRm?Lr+C$rrHVN10ELtCz*hh6+qS&2~e$&_D2I57Pu! zS_LdZPX3N><5c*0k5>#WG0zD4!pSiHwtaB8srn!uw9@dsWlUaQTDfH#e8)ilgm(hh zzG7bFJrA@hnm_y0?Xc}V?pr)Ff`yx+C!A{h`EFibpks(&-P(xV$VI6 zIE|CTbw5woa=aP+QO7fdF`$d?9?57w(Y|k$a8D{e^H+KVtHvscOLs*Dk6Fwoex^a| zJ1oWQ`6MMY|C;nL{U^6F>`kg)cw80UW&67QNB;q?FK&4zs6}W0Q90g}6O4_;KiE)B z^3z1_*3}qYU#s!dZzJ+O?qpD%UbTF*O_V1pY&!YV0DI zX0>$sglu^qH~bhRAGhaW7Ob#0{S_r1#4}mhlrAU>gHyWa?g(h$g zU($rj={&tkcEu+AuFOQt2i$mL&tK`z4-Fk(33Jg?;G+M&$93Ak2ml!?EJ1}OCN#(R z&)73esSa9s(YQvc3N}_~G}W4r?c0|~Hydt$EIMI~BlY8(wa)=?2}fEaBlQ(LJ^(n@ zI)+=aHA?-sfcDr@)4Pn4{DidC|CYX~B)^k!kE6va9spHz$}iR6d1*9`caBkR*2O_R zQIs>Ly>FG)kNeK=W|E?&Yv2hOu+nt6p)r3E7T+|!eNP1t@vN%WrQ-pr^ zc9HsjYb(3;xE9X)5&ymUzjk>LElZ4KVTEAO-lUxy7UDzLO_cX$amd=ifFeKlkL|!K z)p5{PQOy`#@yltkMZi}(S1-N!J?6i)xo+280>U-!$NEfI!f6rtUkC$@=!oU@z(f7Ob$BC*GV%p zcr6pLpFO0XU#4C?y-*`<%Ns*~1*^!`m%hUdIGwny*iuVf1qb@!vOWFYC7W^AHovOCIwga(hZxv>%4Dg@ltexjLPX88eK8eHv3P?~d5WHJt+>_nKevbf>Jtcthz_ zt-aq-5#=$Q&|d;W$+fi?NXtdeP%*LRDOJTGVx!Pi+r)y;iXx%``?mO+0)tj1%d97o ztV#e(#mq$?;_2n!Bj;8U!~*ojUPd&QQmOxp4Q%GgnQFG#rhTNgH83j!4C${0l^Dp4 zjTm?$^kB?pwj4{hzl;a|Q-S51e�!NSFgn^t0FO^{Nd(?dB0j7Z&4&zIaF936@>S zU)wJ9l^ESlKECp+1xT_a<7InUE1x_lGw3{!)vPSvGV@fO5|MP zGiWOH9^Cl(UY0yVeP<1XTNU^XA`B1>94Z{wy;9ur#OcXj^x7Iv0(a77YbtNxcp`xN;^(}XM%Zy4TVEu9kJ>T zqVgAG3}B1LP_x^ss~}zoOC+ZZj_O7T|%1I&099WKdj;J3CQo{ za>IFff}UX8Gwxbd-0q;y|2DsZ$o-`e_aBXmRI+A63J2Vx@D%?(z_$whw@kAG`KFii z`wYX;?`!q{e(=-q>oQ>V((os@VJWWSDQnkSCRK&XLqNoR3AX=#r+@6kr>SXeh|pbZ zgStPU&rNwY32VG=B$e1ajAw;9+l*u(Chqb#OdQ{e*hZE1Ez}l$@sw-0n6i7-UVW(N z&9;GgWg^(Z{u5Pr(>%Ch4H}eeh1P+yu4kP}`7(#VAL zu-$aggC^>$P}0X#2CluRCYy| zcvSIasxqv}m~5KGd~A(LYD81ZJ(l#@G$k*F&URYo2b&FA7x7f%QGvV-luU=bbgagn z6Y0k43)4T-Ds(FSE~qXPe!L` znFncIg6`EvBwl*r*}}al(hdA3ht$$)`4r9MW&^@^urJ=yH$Z6&!oRIoy+dW|{&@-> zaG3#mUo}c9AH_QKE{y;*>{kYFa3RTx8 zs|L7J4#n~MB@I1IYQJx>h;zcll2mc|EHu=Vx(0GO3U@~l`C7-{hX%RKr7Mivzc@+k+Dpk@w*ie&8cC_1{O!6~JzB#TUD$wnuj0d3?0 zm8@;rC1rd`1kDMkT(gD}Qu$}kG_*5rD3f6vemV*FK0zy8i2=Kiih3}9tZnANOI{&} zUcEE{-8Dv>Y=h8>@|VmJD1lyxsW%HfK`JK|#DLBzEo~LK4F4l8H3=PR!uIYBe_pgf zsR$3cVe?4TBb%^A595lxJsYIXccvp&u5Cdk?L4YfHoup6QTWKnHDR%8*$5s10`4@a zan*`lDF>*$K^Yg_;_E8v?Ma#CQ;A|rlpPucw-x1m3EZn0akvhwq*UruTjbc-6D6z5c3Z z7qJR8a4yOs$PFo})FT`G!EyZ|oSe6uHj0~q!Qr%eBi*QBH}@mL=}rxiDiMKhlzKAo zWrp?+17lyPz*62?B?Ycwmj0+{OnO=?1T&UO7RPE2t6(t!m%56MQxE`xu}eU2;TX*z zH&`s8KEt6Lp-xCCuWZLsEUHn6J1R4yQYeC>Q|C$K9G{3XlT`yhhs?;{Ex~26*kI4A z-l@SKK?D0w60m|~I=;>FAfi*H6~*N)ij`*FJ{Uh%N;gjBAj5igu z1i<}CUC&hmC~-5b0<6<%ac_1jC^DVo5zM+x`p<$iGs(cuQ>c~BM--`Y?L;%#rQ}?V zgW zdpE80p5OLSqUDT!_v6P^-e2_*s_Vcjph)36a-W>+zMAA{p`uFYtmM@m(aFc}fD(2` ztq%mF_xc2Zs-hbLmNNuKf*P0VL%xB$7c#Z}`?ewqt&~SE&kvo{6QaiH*Yh{&!sY;y z#f#9I-u;`*pX)BG5jM^)S7Pa(D)Z}3_xcG2rFehSE~+093cZk|63Y+rSC zHt&hwaj#t5S{;9s^iJ>Oh>UAo6x@Hyy0=AUL&VXY-n+8wqMGKv8eb9U!PqVi5F3P4 z%~*Z@C#kd!WmV?2iF(8@bO&dJe0X^NQfxte%?rAuJaEF~vcoJ2`3#R<+nZ61c!88B zT(ld`_3UrQb8lmK9o~&VP%7U9@ESp_ZypS8OwU3<(1N+Y6dvzb{V~^z!r!Jjhza2^ zzO>yr>7*?6#kR>J?Uvd3yd#idn33Iucpa+Dae?|cCpeVnR%#CUGga)>B58-l}VsP9R2Xn3{2I^S z37r0CG6b?qd7j~JZM^Y6s~o-7F>E!xkDJgvISMOt?0x z94RC_{0DG6?AULCZW;Q8)m6SdhT70qu+2JGPZN$AL)VZ^2j=Fvol zXA0hn0@^i=1wfhEAS>xt%Xit z+E_bIE{oPPY*cr>kU_e#zU(|mi?ffvV$Fa_8@)Sv(#Bx*z@Pp!G90UXyZ88%EWzTs zC43u^o86(F{-I+fwTeC|0m1DdK&&_Cmrb=%c0HZdpvwa36A)e9yz5rXJFF40ywG;> zmYLM;&midO{x0B$HHP()o9X~-@m{UhgjG%V2RZ~| zJ=>O>C)fasr-=tey+)fCSeZ!QPk)TLJ4-R?V8RkjaBU82v3WEKU{}so%QZeCAUcdT@z{S$m@MXh@hc&BDn~OD>M2J zzUl%6f9hR7wi86q=YLph6O~l-PG~A#8jvF|ymaw;{b5*BFC0V)A}b2d*f%Jx z^D7x?TK0hur>Gq271ms@bvk5;&UkgcqD+h?6Ytj*P9T1 zOCiBniXc^2zg#`I>8dwT2~=oxUeY9I4LB7ZOjrCsuwm2KsfhY==C5$G={bml76~*o zAZ_rWwkQ`G_|8!-XBRs2gZaW#L{R+jTi9Js?^f)>c?h`RIe&(`>ltjtpg1f`*OK?b zn>7He-_Cm;JVrO*zR_~R zt#>tnRrQLV$W^Y*OxHd`kHOOF8eOS&O5F+$7XVe3v!aJ?h2X7c8l5e`+TS zd^+p5r$Jv&-RzUSq|J?Bk;`R{U;tO0ALl=hp-M=OAZEZ-1oa&2Ok$9n=d4#WJj zP+~DUH)|l0iOoi+l_fUE^l_I>TWGVFA|svOk1~N>AFbBY9K zUsS^BqjyHaLE*rcs=%BCi2UXmRO3fvQd&;oI5?<L zMs%}%Dx)pS_+*E`V;8Zm=3q?f7h36Lx+DZv^@zYY6jFg+`ZY3bHmVJdPesx%#WIt# z6~BM}?v4#f)2+;I6e`4HxDz&N~Vc6g> z%^+cNy$t^h_`C?7ckNHQ)q%e&%Y@rTihZvOY{{r;PjqM4%BLZ0w;q}{yQgo;I0b8P zm|i8IWQ6NS{t&;ZO_n%qMXW-pLVd+St1kcHr%-@dEZnz)y9Gu*G=A4ZriCpgG=7c4 zRP^+|#!}yKB^qQCg@u8CrxKfYiuZBc2a})du+-C6NJfA53qpk)iuj9tW&%CT>6Za$ z+xdxEG};C^^23%HU-ir)7mg_HbW;Z4!l}0xI4G$HG5V>Hi7h5VZa$MEa$}FK9qCEr z=9wj-yrR!NuEEUahQf$AcLj}-D*5#k)f#z^xE_HdhlOp3Dd0LXVv_zkO{6+{w(+U!1^4*2 zL>LzNBh_0Lj^Q-u!<91lXuT9Q>}Zy^=SW-4om=ZzX|wkg)<9$}S&srsICV`+9bi_( zY#8w0a{mm%KBb*I1@la6!hGk(y0FBhXO~h!)h*G*4J0|bPBNTS7g!W4@<%Qs&9Oki zPI0?tqjm)kz-semtMtG5PWiAXo<#h{!onbgVPQTwQG`@^SjyqHXQ$JfQM3-Pcp2vK zil}bol-(U2|dF_bEF8y(>y^4hKHk;YL zXyD@g){)c&8s1n^cgH^aHc7c1!?birT0VsV z>VuMl`s$U30W?z)8&Kh#_rk!=eikm*zdR9I+@CA)B)rGUu0a+qm|d+RF@ojAk|X#M zmAnB)Lb}oy6X!TjCs(mpJh-ZW<)lv%y}KJ9FPvLVP+hUDvtyyJ=3L5BfpzKMfGE-F{&f_PkRa z(baxJ!tQ*1?Bss1z|hoS3qxl!hkDp^tvMZy^zFpvnDw!f}ZxO!SlJ0gAo^88UyuPl1SYFvvHvS0Uy z0bvc!xkp7kX(zfk_cDY)X_~w*qs*a(Toe&;5V?flIpMuXS_UeNL-15$cjE6`?Y#ZE z&k;C;?9=>Z*e*{J&qZGUiw{uJGw*q9@fa1vbia&r<0mOzr`<_@ogO;m2AshyLS!j? zf?GhyGy4jMTVyF4WJbQ>|Hb3keB1KIo7xZOBp{J!v1Hu_#pZg2FFGZLumgGd7>V<5 z_EUE2=AqNgPibpxDp5Qs@4hrPoRmn<0iAY%LZlXpWR044L{CbUr!9=G%5OFA9n8+( z^d#?v`8^zj83iv_HY3Gs+go1xN~}ntvWIEdvI>XwTTynLFfI`tA9``=PpCC@4fSv$ zUut&lBRv-oRUHZrYA5ZsWG|VkY}Knt34>ZV z@4*#lDzW6_+e7%Ttdg>$+?nLaf;r3~Cv7#%&0?4bX=N3!xDr0eYGn7Ot{H$_v`=u= zJClIK}gFC|AZ4b`jA$-nTJAYU2Y=pfA3(BE+Bz)5+*R*KLfvYXcA z8=}F@bDQlSG6%ptW#T=vRf3IwRt68)_3HY0E*M4}W1{eyeFi*}@BbuW_QVCTKJeR= z^U%A3VOlu5{%axmpI}no_!_jc@4mn9il&ZE#aWiHJOj@{k(28*`3712zw=_a1_oL6 z+_$zk2&-Fy`K1Jl*m%9g#kP#l&SrfG*6QsKDeq|<<+>44dI9mg)LFysnml}zS=d3f zhT@eK_HB36=QO4HyyWZd$|+Agp>%7~B?D0>SSIxUVDYyM+l$(ZBWWWqPV?)b#2e`R z?JH8Ebp|acCRO&Y#uk@kzR)iijhk4Vv7J`!3^BCFPKIMoH|&6t!s?0FQVctv6@L0v zUU9No)OCme&OKZQ6K%(VI_XDm;vOhHjzQ=!xT*ZIpX^CtkAsZy6zLKm0R$K1gfW0k6UPMyP%zw~OB*oW%+F&0gzD$X?Am z6K|BHbuh&Z6&xtUHcjfA(pR!-?d{dX0gch9nTp>BLbYM@&KTPu$^_F$w@FQKcy8!4 z>TV=Gk)v{MCkt7lduvu9pD8Nkne&IP@E!EvM zwko)#l&`~?>koP*$kK?*bH^y#JOUOBBww`7shlfUkL*mbNl0aBUcz>>3q@vqwI84A z($^#w{qhk9Umm8&+%xI2AK<3qW|}ZFQM-Li`N+xpcxz}ii%?l#V0q3PP{N+Z(s&`_Z4L*(`#@}g31|jmq`!w#g zn+mm?K0~|RBI)4Rc1hx3Mtq9b%ynD7KNUQY7h*h5%90~zw6($sL-7HrR_=QEXdb+^ zQ#($eVR*iXKpWR^>7yi`u?#Ki3{|t1wbR9aA)saf$?8O>B1LLG&BcSyyOybFAR$JY*M&ehDRqpPfxBFNL?T=r z)4E}s_F zmR%JC7TFgi{a0}bc)gF#`%caGH};Gh>OAdFeeYIALtws?Qf%;f{WIIxG+@2_xTv~# zm?Y4x$a=WYD47LsZ0zB-N8a^NKw^ZC@+{tJ)n63%0#v7% zl!NO5+FNx@gcAC_<09`|@E^xQuqHS+4hYV+y z>_U3!5Oz8BG2;pj)VP(#>bZ73sbA@3YM~R)nwn`*NmTUv#b&`YieUQIv)&Sb;KjL~ z4wLoiP@_iT?|z*Wgq`2bfJy&6qYI}dH34jz((-=D0lGHdr3E2N&8rch`cI+>c0m*izSjqXe5P@b?gddZ^T$qQucj}HM}N!WrVC{%dR0LLS!ps zC(BscC`HyJNtA4<|5g3o*XVi8{lC|98J@@ae9!rQ&v!ZZx#ze{R8}0R35<~OV|i1a zppWkx=r2*f)H~uDGio@=E8~#aoXeZRo6S4gthdX2cBOI3s#y*+hc7<57(GROKVS3?MAp7;ZEA(~tKzTu=7vsD*EK3}6 zS)9foDbPL1jmB?z&Vv5i6%nhV&_^XI6VS;b{qFDQO_8LLghC0$H!`;ha2>vmeNt=Y z{od{a?0uy@?5i$Mp?>|H6%MVwjAsh0F7Nd1Gb{;t=+sBk1H5gwYFT>7`|Fv0?-m-) zdp7$97Vxtvz6ql*;kj+TM+$tCpU+@j?i5V%Y8O|nr3I_>rS=O7rzO@JE+}?mpqQ69 z-5XyZK9BUwdju`S&jyEcD}>dU$Vl4=$lx{gAIsPth-fvEd>H?AujJx&#y#N-$>x|x z4C&%!*0-(R?Nf6~@zo|Obm&d^ z(5V1j`m|lLsAAq?kxv!%a)gk2YJvfw6DG-S#%kzyvD9B9>#6s6)5if}?Xhonm%|s| zA77E2^da7!_wU(vL;YOsH~uPN{A5D)s4_Z32tl6>ORjfWMyVIw``2%GLOZ=W0c;u!R3 z=zev!vFRlm(NDCK(xM|qH35lp;rr8UHKML@3?07e|577_!mxE>HC8NxOM|4}JFS2%gUMaWIfpUEJzcb6$)Z@cBUsw2u$Aodn{%A!* zsP|#3@Z!R{^FZ+Yd{JVK^592e)T+z!tniObiJ%|6Kf+TFq?^(OlPObntJxQP`qY<3w^GF=b?j^)P<5Kq zns$%Pv|sS??jDJbPHk$g?`drnsJCo$X}(2DASLu1biXQyW-uG%*9I9#^=W4y>bFE!D09_lUsRGft;>65+Sr&=RqJqN@!}m(s+ImpsAFmv6 zVJ12Hr1sW0L{0-W;dFzxNr*e{>rMI=2&GpPcrC}UDTCxyX99DlhOA||> z%#c#n$;91!x%X~O#P7xx!*^grFx{3qBpnHxTR*>MN_%o?~snEkR6CvSmbH!3cD*>?|6G7}tSgJwVF0q~F>h&YeeZn( zE!SWLS4c%RDroTqavoLVD_t7|YlPZ}#U;DSKfQQ(cKJaiijG;tRIxN(%IYKf0IE~o zO)Y?H8_LP}!eM_;d&%yyh8n{R@c@C)1p9}9({eZ=tXC3So--Bh)a7u&bWL}52n*!h zmAy(X?5EY8^U_@ocC5(GVrX%1CLE1~8q-AGpHNw$%{fR@l3(O!p#9hQW;yd+IQKmq zrQa#2Pjv>;dQy7UKqW`s?^Mz_ka0R+Rmhp$;&W{}`)Y_c!;5P*vkI@gh|dk~I|?u}#8altkQRu#_a4?hmFsxk@;V{( z({zJ}Q-rA+7-}y%`Hk?^Ld3~0UHnH+xW8Dup|SYd&FiE>c<Q4DVz~6+x_?*;M7XLYE zza#UxU;aibeX!0lAdvq=&-q`8jadNJwx5ZOUxvUxC8BWP z@Ocsg!;j1WC~UlrPT43jQf~mk+jU9VW`6r^2DsM1Bcn}Iww?@IZ!XX#PB59kB`CIQ z>UX=?qAg5glM~H3%;Sw>jmKk&Bw*rh&dzAdCN*}nb~m)QbTxH865|r!6*(YuP?TGc z55*(Qf48H%gVavyE+mNx6$o_PZR^PI$|K#mBO*lVtPiD)#K54;JUn~1^GGrCFf;3M zZ$BNH921rtmK+lt91%m*Nm7Z7N)JxHZWa)Rj#ls1!%o^Bvco-B{FU<>U!W^Uy-EGI zrLAA<0rg$MOnu^%)lVauL|ddJjwU3>vHwy%JA42#*q*yNGO(D$c*JG{hU!W@;<1SB(tqle?Bf#s9z;&IrJm) zwt|0w9`!-4&P;X5t}FCaJV%fRml;$`)z?>YSbp?~>oF^-O?J`1s-KYphC~f;#uR{X zK>otIxr#ZQvvsh+daVD%dZQ#nrA1}LJkD9UdDy$+QFyEe-eSuE5~Z(mZ9p8cCZ>ox z*?8RKKKDYgv4LN{Z=R5o294YFi##Pu!O=P|XMS!g6%;bw$JK6#bd;E^$6Bkv8n#zP98+(*>2J-L5PiyqpI! z{9^ga{uDcqB(Q$or`Ax^PyJYE;~n0tq@H=}ilt~V9A~aVryob<_ipiNhVp$7mtobq4X+~+& zKp@qHIXb6*LX83d=8KDBt+r>#p{`%!_Jvw1oY3c=!Hv_^sDF z8!Wu#--kxsIv3%WD`(cwTK-q`oP5)5sr_PJ9!~iBN*I=vzdGThc}E$~{8!#w=lzjt zPPCQ$=!GyvwP*IT15FEh$m%y9WswIQA6AFV5wy$>@5*WzGdqWTL7c#R(Ejnpsp3F( zNS|9Qd!udz7m5aD|Cpog)r$aVZj_GGK4n`t(*hmZwM2cwS9gaii%ZSeuIHl84l!?* zK8zfF2)sD+q)LQ|<(Wo7c&^85#dz-zR-N^iUW;Z9e%_1cy30S-EPOfB-Df6;+2za7 z5zX&Qn&qbkBh3nN!%HVvoA)f(TdBr-4%$7D7fs~c0~f6K>|WhYK7%{M=+f5!XDtAn zmF0hf(glmRvbDlnff#^Ty-r}IM0`VXo^)i2&lfl=>LzlHS+%ck2(ex1fKvYHm>>II zX$C`xt)F(!Tm~PsxM6!FM$f*fb z5$Ed474tAfbM~IZ*^CGXVC8~D(C*!60zS?T5!iVTHS_L9b_mt8xWE8|$rURnIjKH6 z(psfbl#lFi7ZLJgTUuV&O3>Apgk`3*Pc6CL_DQ$OKP&i|9(3+x_rz5o9<^N;%Ukx&i)omfyTs?>$#=F@ zi&dk#gEha_mE4q^Z<(IP0j6vN?!Qg>|L|q()lT}$;w;C0VEgiTYwMl0>XfU5b_Is9 z6_<>gYpHx&%&&ej=oaErkYR5j-Ocduz(DR58b9kSu1{aX8aZ^_dNB$29N7s7vv&$A zN{mxV$m`dQf^`F4%*VLS_D>hOxW_h!G@ZQDp_+d)-y<)@wEp-aZajbYaPo|;eOdbq zkE58;%7tqo6IU1yX=}+}!oT`Tt)~Tr?T7zUN&wL8?~5A3_WLsevZk{B@#lnQaGDwd zKgOeIaNc`61WNS|4uSmqwpo;uam1B%0qZyYZ&AP)gp?7K{wM0^oCsDP@K1kVJII=r z3jCdHuKh0}_mGJk-7IqB^!;DN?y56zBt}?;}p!CER3Rzg-!RoY)KuFgE#GVFj zaz=Cj1%?M$2Kh;HqaLyZ3XH$NGGxl8nAUgA>nB!nJz6O-+`%$R)p+tO0QtB6U}ywV z(8CQZLo%-I%Mc*nAMkT>J$_JPxPoOscBAO90Au10J!FC?=y47#gZw13vCrozFJqI3-f@6?}{WmgF7%4D-hXCLha{9RtkCmK^<@m@?hFVhV5<&3?Q@yjzJ!n`o#ba2*6RyAGrnNKb{iEBMuvBOhEAd p=PBWj_yagS$PeHfC7bU3Eg5v1sc8TMK=^^LC159|u>L3q@_+Ww=I;Oi literal 0 HcmV?d00001 diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip new file mode 100644 index 0000000000000000000000000000000000000000..affcfb6e9ed3d7f078f7219d98d9203aaeece53b GIT binary patch literal 75603 zcmbT7V~{R9x2D^8+O~G@wr%gWZ5z97+qP}nwr$(Cd*1KNOwBp-;~S(ZtMX@ES#>A1 zk_zM`LBRk(|0=!K86yAD_#Xxw2p7oS*w%(lSqT~lRE?Nd`Crw^6$S_x>=YCT=s%3y ze{r|ehzP;_$NgUw@IT$nt&NOb{~g2QmxHywt%I43qr)#w&0mhj4vxAEzqGynEBil( z2KS%X=7z=&{|_PlXAIW=j`@#JX&7i(Xc_-MkvRVjN&kO@qW%*e)PIb`9}!A=U~nLy znt!yU{P)oRsUFRLSMJ|QeWj*plOciV&DZm5+_EB3n>LvE33$XA8N= zaXdC`YWdF>bzxnVYm{O6LP@2OdIuBNx3lwc2Hwx>#r{4cEAh$w{!PThq<8~I>!%vs zd)-yy!=`$XH}Z-CRGCPu$uVVy>GuPE?<{JrdUPJ)0LwiF^v>kIlMzRS+y1mRpX2;6 z;zwnQkqNu(*ZCTJp?SDDnC6fVt@rL5Z|_^C#q3@anN7kwl;$dQ)s*si&!`ZYj*$+H zUhb_WoEyAG@kOaTB*jhv*0FGp+MD|E;Hnt0aW#-r6Pk#O$utks-%IUn7-vpOw6$!? zJTx8!iPAs_usk#$9L>16h$lHG2|#cVYN@OLsjP5UYUEW0J(CHbNLGMhHaJl$tVoMT zuuVhmepjZxI3Ws{jO|@1kNW*uV|kDcou=l>tX_bAlEy|&zvGqIWLz&zf2)ZBhwp5N zU3BOVYYDaOAJ(s+zb!U(LkA0N(#*Dr?w3Jt;rI(7CpA)?~@-E zvVJB~SCgsrsFc@?>NGtUOalo9#DD7UjRcNmy!Ff|PCLS%$zra(rIUp|tHw+x)xkK*xCm<_j!?E?a%Qb>&7mnYxG6aIJZWpEfD=)wXKDg1M&b^fZb zf}@m69#4ybLO4?_l;IH!`DJp3y>I%!Z+x=EYG0hhxH=@LC{5#+^A>mmTZS)cO!s%c3`_x$uutdhuj?p|D zcB_q+mFPN69X;z2_szoe=WR=qq6S;yrF+!{a}YVr1#2yAJ2)OIOucw8E?=!Hdz(G; z9c1deFH+5%y#1$=O z*;yxa-+~jL$0s_erNX#n=JyHKo^WygtG~kTrmwnlr@QJj8Mlf?0DVR`GZEqQ^ZrKg zuQla=C0Vq_GpLJyQZ5lN5D@;qPqMnYHvcZMvd9X^4A3J=1;h)&h|PkkMr#?Y4MIN- z0z*T~@AiU*+qLwOH&-+%DtiW?Ajj-sc>0;@{c=Izs+cn~c|I@UNt@(^hH(7?i-I_? znVKgRXB_3wPrS9Q7f>tUsbOZPdp!5Oi?6RQpOBQ98Nsgg!y)P#N}P*xGJ-ozWDzCN zC&>fPL6;WeCsDkpPh;FVuDud!8Ba~(+Q+5I4f4su z^~D>)f0eMs=9*vI2na|y;{QTI>whPqR~BR{+S1$4Od7Al&E%T>hQjr%3(aNMf~lOk z8mb|H8o~p)Lr;)e5Aqi!HcW0Q19lPhABL&H?Rc|D9(g%tbZS%>m2k6s^Pd|o$@{~X z&xgYo?zo-IEca=T?U#pS2kRNhhVrJGY`4#_l1xl@UA}|GHL`5_w?h=uEPB%)!x{Eu z22vWeUXo^CCUIQ`>+O>Z8}$yWO{^TBdXsU$XT$e$8C!J5%Qo2!Z$!b%@{Fs=+N=5_ zn}t@731ybsNqWr|yY(`aOLkz-oMXcrb`f!Ao7E>YyTet~0Y-OA#lzat0Uayf z8mH6H>zj)C@VYkAf(-l9Yn-?NKE2DJT}d{l-&= zBUrR7`_trsNtWwlrlM(%-!S{Rb-vwlS;?fCsQ~J9&UpUfrVk~K%kU*r2|ZhCzZ%{T z){z_gcygT}kJj+8$m@?t@R-uFScYQS(f*Gi99`Bp?8--M+Lm~28R4^dQD?fc@y`)k`I6+jJUNd~<;Y5OJ$nzx)`fv^Axnx)02YELop4}zHbP| zox~LP0O?ThQtR6u%xo(<%@QUuJ`ew8ee|u13Y(RkqmxMzxwG~Nr zGM%kHjzOmyX@6M7<+5w$d`?MaTtw2EU88;z69MlcFF~U3r40vy(*48*xx&<;Pyw}+ z)>;KI#eU2@PBeIpT_-|B{Bh7IOGDv29p|l_L?-2Rb#=mZ>?7Wu|1cwAMHSWAp&wyas_9Z^1W7uiRA`nZb*DWV$9(`@7 z14GLb%YArJ@{U-6=DQ-2FMPl;@{zIFThe-P?xl@{*Qhfac0uCqDIDhqZtG-k_~{U@ zC~@02xwx%^P$=aYeV~)@rL44d+Y}5&`J*}7)3y@-QvLHWHP(mm* zl}WG|16k@0?C7K6bb##&6k_9uFFXbv&f1YEdTeQ(LP+#y*E(T7E>|!!>Jy4T%il;n zWo~jdb8xq@U*( zQE?YG=8?+uVxH_Ow!g0l-;IT0iG>n!3LDUWJ65#oIQDcbq;Ng=+cYe1d+>hXnmR3S z2{e8qXOyK;Pfyz>b~ux7;=DEgf^Eq^qX;oA08EiV_VeZ;pMAi@pm{KnXA0YW^vySH zBD^yjZaMKmdF6*GcjzK1xA4$Osw5tv?y(?e9Fyog&^xP)0wVF!*{TJn%eHZ0PHiB#MFZE2WTm+0&^dxa+g$VHW9{<60%BGQh}XT zs=WZULTWEZQLSQ?Tk<6?X~*`N9N-R2*iYvvg$w-7Dk*{lFx{58@);soh>|AYf%?;SzoI%#&Q_dm1`WVVF`_eJ^T& zsZ87Y4G2Bb6dsmoIz*dnU2VJ>28!xCu`$f#x!4zuqMf=P5Y>T}&PaV6yy3)*Jo67* z0^r~kJCNK(2nuY%C!KD|89^=;#iYGOu%7U$cQU6&km;per)c}wJRaDWwnRuoX6cVj zq`eyVA@h|Vl$SKwbE}rWeN;?~N_9eM8zGol-I8;f`$#SPg~0fsrhjJhdqG;5OLFaS zf5bL5MG%O{P@lcLYhn0Fh_h;3;qFHqQ331^#c4$fLuL{dCQWiz8~A8CFYIyR#Cumi zM&TU0Jz(4dYk(?vfox*)-9$WVeyBW7e|V~rMWgdJd0!-c-$n5L7*S!{0c5@A@tbwI6fGWb^#Eh z=b(x#AHBBf1YErjlj2^J->LfCv+$KTcz+btmVQDxCe&S!=}d*b%H*^^?t>u*OqTNu zJ9zX|Ie@)^t#a?$g}5v5yZWj2*mZ(v1|OZ7a-qKQuD~b!%oWq3Hj1|GMVt*BBO&_# zEJUR+@G`GrqzI;uBTlC3{2rEmpEu(bQ?WL>Gvv>>RF1|NWHI*P;Ac8DRB@TzM0mI0 z)9H)D$p)orhqOAplk?j~>x>hbu#@pWiJ9t;QD5UiGUUx32=tKxxN{TW6;h?ur5tjD zYiRnfXcr}ZbsUMLTZ0aXEHDVw<5x?oVw*oR!+J1ca1ME3I~=pXGexKLSJ1OheO(jzcW3Av<+ZRgH5HVo$< zgvGgrM~P4lUKK&I>p`_wA*Zhm#%Kms@PIHI^t9*bwb1^*m4pxzn^@(l`6ODOlfK!T z!KyOVhwn_ejV$5NIFnglt<5cqSS2%N@YfF%6um31Lmj&4Iey>uM6L^bVwB>?rV{eh z`&UP<^%|1V%=CPluK;Y(&^VtQElKOfzv+MRP6PFj=*T?w8BX?w=$cz!^Trwz?55q~ zqiEsoaPMY*o#p3XTi@e!5pK&%uG>6nFt+Q}1ZLO2t`2%M%H8RnyxN|6y}z3U>d5Gx zL?H(J4KK-kB7hKh{YHFF(`&onmS(13+JVcs;Llkv(le({;w6I;(rkb`9l+rxLH}${#h1T%%diJ9z?9mnL{^jqY2uZm z426e%v2lEY0rla7^g`d(W4!xK{*1FI$b;&Ho~ot@Auts?=UB}@@{K8!Me7u(#-Kv*_SDf&;my12Xui zmBe+BsZcmOwc|TAJhHrS1=vRN3V7?*-ZO{-2DhzVgsSgBlfL`=h=2=Wtd_SIBI^r3 z-N)YwUVH_fmS?C2U581l=Nc8pBm->d2|!c}87Y<-2zjRq1^j+tBPsst2dP4Q;t&y5 z^Bt}oJ`9zqKrHNxmmdG|WdF(B3Go=H=J&y?WVjAcv8-mu^e~R6UI}=XBbePXfs8{w zqrzmra>`~4)y(zKgKkW6V*7wXV1+Z=4=&bTIyT7?+9l@yPKZf+?%o=$N6|cg+U0M+ zodL0?YseeN_tUy&2rR3ajR;T~TzBrh3QGS@`{m6KVVD+G=G9d;ZPo`Z+K0lO2TB$h zlKckz+X{N@`Ne*u2l+=1&C6K;S2Z#7Mtog1QDfDV3t=22{s>|OJ?$MunA(gkw2S<( zR;~aE%5VZ1hn=kE&j+CR7W6cAw<48yLZA&86<_1B$MCsEzBJ0R1?!E+o7F!89aQL6 zxNU&XHMYSoLR{~9NGrfOP`&?850rg!7L|FPN(60#757vD*~g5f1uzo_C*FD<0`+GV z!{MZA$7d;J0L);(Kwi|f-wI}z+7NTlx7~B1udzO>k6)spI-mYp$ISCfHZTaNAdP2_ z*D>er(#N23mv5EJa&<3XM|-a!Ob_`SUjwi1kByin1N_e}cf=-V$>9&s$#4$v za7MiqF71tH@HNQy1{#818o|&HQ#d;Fj}I-(_dgByyH8$plH+eLH`KqBW?{F~Aiz;F zdJx)}@)a8)@6`kAe%*RAOb*COsBTn314p(z#kJF|(7!ffTt@I^0*_Vr1aW9vUW9Rm z#uRWP!{-syiJzfCn1GadzM&c}mA69!yjs_8Z>3D=Kub+0QeDCQ6XUvMUXBX9a#r0C z7^}>%ag3LM6#nK5~+1{bdSW+4ctVt-veT_>8c> zaj${(B+i~)W9;h@wu##Pg#?t1!gs1h*G7hZsOu!gWXpFg*wp=?!6j85S3FZ{c1)kA>5LDKKPC0H2&FY)dDm%M3EZYdxg&ygw; z&#E~3&iD*vA~oI(3imIGvxNh&kR7*rk1F8Wbkjk{=^(ehEjS=zLX%M--=S-tNL0cN zj*XiZYFp;B70w^uF=%k;#x&d8WHj*JayBaN{nT7w)F%P1=Q@3IY`58^YusQl;rs;q z{0U{U;%5F@xtiV;2bGGBPeyG>q<=~o{AQ%u4$}Y{y8>-@57<(x=u4;2D7*0#-xh~` zIB-U3+$C`QG-0cy?CeToS+r_+I8eI*feDYG;YB>+f`W)AOZ=R&64NY{LCnJKv~ zkK%HgVqzc%x)R<|veGqdM7AxxY&yVk-(YfAs(I2UsJq=+`6>HkMG(BOGcV{5&QB@A zo{IcJcu4$Fl(xn;oca)r$}+KYYGokzC~3;JcUPNdv66G@;F-t@2>mS;WFZtqusj0N z$!dqU?LQEe_vY`{3G*|Fnfj7*VqH`g)A3g}SckZ_rd=u$bPYfS4Wkygei&-TOivz| zMzILjGRWzHh(Y*~7M+MM{f)vk-y$aCIGt3HKExY5wayN^P;5Rm7rmW|y_Yt&A~)j_ z-gpNnHT3%0Cg=>(Eeux%a7>4t{uJ;EW1@^pF3Q-HlAClVfyLX(m(T}?)dn&FPCuk7 z(7JmDJf+}xbk&Zj&(fV=6P@H@X)$0lf^iB$M50@dR%Kx5ee9o`r7H+V=?9%5#+ay zSt0Y$+cRggG0xUeKy41dG}gaJ4n*NY@bW6{8{~r`fPM2btLC50jAbS>O~yhs3?no3 zMab^`{R{dksc2F)Jd_7e)jeXt-2f_3r%~Y1oTNJJ%?l!YWtkq#682;{HkkKRWC=6O z%1B*5a12C9y39yTCG)M;l&+G@@8#y~p%%L6ilBH5{GDkpKPoY;c{Nqmd?EX12kZ;A z?>k#1uMn7Dd=r#xPf@5M~pcxeph*mtLw_96x&&Ye};|boU)8d9GjOTXI zh)-F&lJ%3i7O@8mDG{80sGcY{Y|7MI;$0sf1N&t;XHZ@RA8_7POg8Jd)KeuspCpY`-v{APs`I*(HaRh1 z-|IBeS-lTjz@g!97^{7HG5wQbXMY@?@SP&;kU2>6tAw(YAY`y59(gRAQj?~`7fgAx z8i=S7j^a%$*XgZ=A}j=wE?=m*spM=z&R!TG^Io{zy<_vJ1cx5fn9Wt%cp6gbi;*2i z8oZ7y_YEo?xTe2NO`&fU{-EC>^*wnD?ezjTWOJ1=jQcX89hm;DnMdz-s1oFG(7Nx{ zdrkpS*21c^N06dqBDY2fROL!&9MP*I_n&>EjqK#80E^CM?*~8E-Y59I^&ikE;JINs z&!Ys*5=3RM`|I{^J5(sml^rgGO;+&N=WUoQOGpFbi+e(>pmlp#O_4gr@Jx9FM@nFv z+rMkBoDXYggN|9C@=YwQWWv+mQD%MwU$TE5ekAi3KKMe>!T1}qQxT^|qn1~p*j4-~ zE*^nMKY%upg$`&-q{vA43d$~hbIoNmrmg5zKwiTaC=!9Q`rZ3ycTzOBfUTL9L+UW` z;zIEGtKtwCSV1zBZ}|ksjOnMJxTP=#j6^=}i3|gd8Bd)YJ*3F~APr5R-^I6t1agsm z*e3)>3~*49$jYJg72wzqdUg55!0*0_rmcBYA5O*szfHFJQ^g!~LMfT}zXH+sHqc@! z+@Q&*tBKTI%6I?pX6q)yZ0XzrChwSyl|*h&ASdkBig0hPvS^g^DcnNWq9hpabU3`i zT;Au7Hr>LsjBd@<@$n9Tt)*l)LQeF7o?P3GTPEPUDb^qkdSIDdlNZkdl&-v#*$QtJ zKN)LYQJD%u3AbT&K`)=u_8}fSckS|(8eL)2L4W#Cy#x&nyLNF23G|p!^Mr&Ha;Fj! z?O9bF^ufq~=gXls;BJwu%<==rTJiZF{?h5|G2kPgdt{MN_a3Cg$A13xIiwB6fHmYN zXmK|k@D9UTx{lV#^q8Gr?c{|BWi9l$a4RU-w|n6W29)NU9ZXdy=yG1Aa=!Olam$ER z@WxxMyA7GWA|xfiZIH~Gbiq@;o`_zMPam=Oq z&4I+!Mm$rNB26>V{vIjwMl*5i1zhn>(-sHjd*2Ha-yVlMB07TI1T}*?L~}711NK)= z@;*QQ8b+l3Zccv2pgH{_dY>i`IhqqlDH;7~g3|a{zdTsm%7GB-DK-5AyB;`6Mhy!C zwM!rAn_OabNQwjP!MT&OD&|)aREnQ}Jcw#Cy^6MgZ3=83QiI-~Jlzk5bp6v_ic^08 zXHoAF4XMWU?+_(Yqc(*h4sGDoHE%f#As|KbIyL_mzXloKw%5F{WHOQ`2{gD8G-^9S zEgODAj?G?6v)oo~clf7!ZF>qlU@p{eq{?IY(S1GeYcT2?0t#LBoP2WvCa33r`fY|qfyyC|w>aF9Umem7-6`>-xvFA29_ zQAJM#PX6KtA-C?ui9*n8@3V9by??0Q>BNdEjW`9+8|`h-Ik43r@2ujXm}79u;HNpc z8DcDydTc`9iu_E%!pwOUD?s{_v6RmaPMot!~qiF zN~1P-7j0k*H;tnw_12$D>X9Pouki8`yZEN8<=mw!ph8 z=DMLqD-$?CKgfVzIEy3>*VLIs{Q2%9^@;NqCPwgnh|Y3Dfe*~*cG;tmi+3pB9< z#rD%oXDMqq4aXt{|DDL>yV`rX3lN*CrsmcuJYz9{bkgkzM5e|a>Klv@VG;o&4<9w` z>+ZLr1_h{3urxAB?(ZpP2~MJ`&?vzm$Zm zaehfDV81ilnqoumVwSD#ywI~-VqkX4S1z$@QY;aSupz?5sN#rV|654OX^R5XOc-Tf z2>Wu5_A(6571d#3T~wHSpif+l(X6;n*B(~KUY+=M=m$vbskKNCJV2Kk{iXh@=`N@#g5kMt$bPyuz!*6v2!=t{VEp$cX%wCZow(Me*z{`odx(W> ztO*@w@~ZkV>ZpMPUOa4#2O;erJKN}S--hxl6 z+F+XmM1<$z&i;oA#d=7U^)p^8rlE+PJT1eMAdD5y)Q6yF{?u z^_WH8$JY`GVp!6$;tN@Y2QUDsAx-6Y?H;JMA6MB)Z_or2gY7cKpmPhiEvzXRx^02P z_m>iZ?K_V&2HE&v?K)mPI}C!v^*_DvL~u`=Cr-3SvYT}%+d
    E49g-n9s!QuNRqc#HvtuK@%*HNZhESA^#bL-D#}z9x(aWXH9q6Vc znOtxHBATnPfVSEQZAdx|iG7Fn)qG~V6#*G*9yCaD3Hi1V8Zf*DR#Iq`kRP9u8IX1t zbMchIGT+Qf6jdNu4nV4e{J7hK1GkHx93kE7MI@O~1FuU*#{2+=cuB^FE<^*3?d5~Z zPHr@qlO~5oraNX1@nvZ`MHyHtLu7)|OK9FM6aKJM6XxF&NxBC%=C6-nUByRvM&}jh zz@u8qZ%T)Q6C+ZyYY-eRLMmdTLY>avrZz=D$3E0e%QuhnUd(pcbZu8SjpnWpyDgzL zWnpGdP{JgmMencU=^xcBR$#`Vm=9fanX3wR{!@p(VpJZ;txa!-)5wwA zias;{H`MBuF!ao_ikc1uY~!d-hV8w2dQtZxu|KHZ7fny7&t{M^s`0JRXg)+yTbm<2 zx1E2!A1Gry{29lSS-5ia6oTnc!nO97nFvCNysyk$_tL%yvF{($jv`mSsWA~$&kD14 zED|@iGFyZcIs4%ITY$@R|E{G3F^OuR+PX9#cyD-1L*TL8msslJ)I)>DOU0oaNJxt= z+S-qfPh1+A;Dj?B1WVt*09@zy+G2CEAiflOov|~?_llJ%4$4ID)T}t5Q4HC0Di~;% z#Q{)xNKo$hyC_*pULMz&kCx(PLR(yq+>J6Z-&(ej$*DM!iVn!8w?}XK>Da6WTG!ZV zt*B7}{8>>8h;>bJUy#D#2xLlHB?)H#8rc^2APsfl&- zsM#Q{e3U#^J{lpgj>td@8i)&o`;(Hh&qnaH1Qr#clu~4qZ*;I@%8JOaqB}_E?!`&3 zxf^^h`=4`iR}GX6Ky82O8+h^4{sUC99tV9UeOlT91i}hnA0xMfDTF~fKt5HuzhpOW zI)~Z|6vHPd3J4fTUb)yy6Fo6IbY_sfYoGpVdXJtwbpxg%mD?0!af0TwMu;#_HJzc2T2X;1y@ww!G*!O(ZmVfxILo{m(mASU)f1SW<=UW}@Zu&5 zQY)kWSIw=isQMVjJ!j(#g@Fk5^YtQ{+tC{8t_Gy=kUR9z@*G76=Rdc6-Yb+-qZRWK zS&93eO^Yl~x=0P=A3Oh;&!05v&mO7csgc8&G6*Wav)>Zf(n-dNCt7KcleyO7+Wbx` zrM2X;_QR*ub?Oc{IC#BNxzi2jzGM>LNx8P}`z8rcq=ehQLm}&hF z{D#^4g~*e2yx$7p&qtyoQ@xpueiu=xAl=WXa$5W(7K-F)v8vwNU|Rq%sS^5f<5t-6 zV}>%pyc*a)b627Sa-lg<^e4qpbpQZaO->b*VDz4#M9<23i+Q42@0(-J(e;+o^BpC>XTD80fyK^E>o`51TaMc zw36~OQr@m8Y`2nZqT7c&4bPHr5ZS~z>oVF0in3|;@8tgIzzwyq^7Yxk1PDb(GXM#? zcFuKH{1hwds+po)hNvJxHZ>EQE6_L;dKX71ehkinrb@mkODT;RHbVI-L3x6G2$|S# zv)N1d#(GGg7lK6^4L{&;O~@Ukl5`e92u1=o8ThTx@7H)}?m|++Rt@I%d`AWsh_TywY~U zppTE(HWKWMEqcmj~=lHGzQ<$bP!6P5Rem53_XS#Ey z(BSp~WJQ2O^-nc{8Xc3oiCw6|aswIFOX1A-%C5*2+XmTFFzr1dU|8%yiUONJ#uCRg z1%KcnQ4DZ6S57gN9)tWmkeyqW(JznUcKN9ohp%KvJjpu(ayyYd0o@Qhj>PgA+2n@} zHBf6|ev*h&;`p8%!7(=>r=^AXLac!?o$_$pHlTZ-dRt|#$2S2|6iM98cA;T)Ocjq} zYMsD4Vbhe(&qnO)rh(*49+D^6$fs@pNE(z_{3CNtfW)H6S1Fd=3BC05Z9-$a$EvdY{yopMD zNA7B*4@c)!dZS|iYIvjGJU(m?z-)-Xk9M)7$lrRb zGyZfVl=W73y54wF+f)vz&*ot9<|wCO@#EgTqSTg$Dg%yR%2vUjH1gngHu8X#pz5XTH1rchU^jE?^0;61t zB{90ONbL3Q6cO5506Gw|k`J-=&52TJqp%zSW0wEPHwT6HX)h5T)(Ym21*!6uhzbQe ztkiWBC4UH9!Ro4j359n<71fvutbMnBC_-3?D!OGbV`O_>Z@5NJgkN`$h{cydmMVR!;Wc?L zZex@~Y@n418A)4TZec@8f>(!OL@L?3NQi7fo^wWYKxGR8z@TvDXX1N{huo$9Zc;(q z`?&JGrw(^$={^aIB9}IX!UE~IF3a!)dk~f(>IT?m)&@9=?skH4J( zeQU0B4HP#&fl4yd!6T3>W76dGkt)kc+a4EX*Et{t3&}@zb=}7RO5!dE^^&`;8(T(=+j01@z4 zL&M!=@^M>MgL=y?)OTzgkg8tO*xu0dfUu$Y-!t<06vgB!5Uv%biR*>rSnyJmuGklT ztGiD$`-+BQ7r&}q6wiF{=tVB(>N9SA>HqQ1kb`I8cQExoN!Yd{hg&C6K84;s`qX70 zp1huLGsPUR;3hiCORtmvWLThLq<5Go$ISP!oZ|}=^|^{35=*_s z3W?W?!R8f7K3|mWg9q24!Oc={kqt4MogjU@MD+E#DSI z;8>+J;gq~{JOik@AXj?6VAjEAKch@QPGW2j>=OygPu`svR`iUcZ)qXH`0E6Mw>k9$ z?kAt#tM>Ap&TRKurPT?CJi-k;@<))|v=p6%%=%eyMlFo*AOS3|CUMJ_7#r_wv#OR&0R(XmugP8xM@Ufg$FC^(Vi1D{9&AG zXBSDdl=o|CjWgX^K6A}o$2Jm}s-R(yR53ih`#A;#^YbtFyvG36DEdIsJpJ~z%ONlm zZ83jW@dzeQSqfG-^cSG9onMi=it33aBhxko{oZ?&9B}^ApPMuxt1z^c4L*(`kI&HI zJMO`2OSs^zkikYiCBKTB9^Hs}P4PFgj85UG3CB{rM(-qd#RE#J(p*q#yVuc`b7T9^LutZid zmjT0wMAiUtUIbP>dhB6~LxsE=CE*=OWDgiA?ePkpT*b!zC-T4--)Rc_;+X|7yz4-v zUIs-1GXYb`8->F?0%1~W%nB>g;6u#ZCTo66nL%-~yuAukb|R=S;74fAjuiClc(s}u zK@lRT8ct!hc8#L(Wd-h#WQ2qq-|Ui*q?5ZVx%w1DcSAnkCd_%Kxtb1R|?NazmM8(f%9lbr9N59}h^3Kj5 zA?~80RX_9*R(tw_B%B(D2j!6bN}t$PFZCv{*!>y&AC0#HAfkWun<3UVnSCJM^rNbsc-yo1nJGZd4=30&L<;;GTdv) z@d8+Z65ar=qAk$DRyZM!Iyhn%aMhG@xg`V)saXW*!>>{21`od?q_%ysTu}FQY^7x?4X}`tPd@5{ z8Lv`KMRz`OZOa7@fc5@euPqm!b*8e;;%%{ZoxzxjkAaz5e!}f-xb4L#KzS|W^ z-UKP3yAmMaQ`@hfKQ}>urD^Z;9KX{Uo6t9zJD&vkVsWAOwoa9V*q9OfHLU$X&r|6x z+H6$Iq>FsJ9aLuf#E-TJW~YX#l{($Q_{&FEl(54|Cg*D$kEqs|uFqa}*?EU2nu)ogjZzSI=~>9?RF zOUgL$kkVm-yUB;;GqGZ-p|7g@b2#K%`7*~N2!=iLdQxO03iSye+mJdhFX+}lhdUDcLJwm?pX z=nIkVOq!^5oGsQhC?+!Z!7lv=xQhO7^nu$^`iV52nmFRk00*fK1?>bo#imewJE)KG zHCRf5sv8Z5ymM)AF)7N>6@_`K;+-q^9WA5!iCEF=?=~uFnRs()B%0YmdUXn&$&wPi z{I(5>u>%q1c>^jaCVi}VETsq$2HaXP4jhCVQjaXnH=i{*ehH2bVkPpTsm(}(_MK$W zx7;L=W2B;2xwtJIhkrgo%t&OV{p4EN$ZsJAZ>2$AHGsi$Df@gh+)hGWBI}GlP>mQ7I-hKyD)?ABWgT*!g>uf_8WB~dH< zGc-^k%Q06E&BZtPDQCzXKjsMA;3MUAkD|!)EjY{1+Etg$P!K5hTEp(@1jHsR8@$@? z$_#WNDx6pu_I35lwY!jxlKh}?rSCc+C=`-3l;W~RNbITr4?fXK@Z0^|U1=&@a#f1; zehJ=DVPR%@0Y2GaM{`(^6=)KOM`z68XM^i*W z=$!r%Y^RA3FnAEiTx^~1;kk~{ZYD{TGf}ExozP$XZAcDuv%o+Y;*x`#PwIW5TH1Ox z?@)+Fpx=6w08nmO!7eg_+oCFH%(|!cQNSyP6;l}m{SC96;+|6WtA+ogmf&G@|9cU1 zvKkg*Gn*>RFd}pQxyXv)W{fcg8K$xQHzjtFE%D@E5q-V#V~t#8`o26i8hb%{dsOPL z=*Wov4`Y<}1g;kHqS~@H`UT+?$lRkM;}8mVl5a@*BZq>*rrvL(8+og!UDl5%ox5R& z1E`_wki9nDVn@7+-XwbGCe9X0YuAeYOp;WUB6LF)il_9cKE^VWk=^MsPLC;hTU#(R zXCD5ip%x5v%qded%|o?DlPHdWLx#qA5wT}}l@qgox{D3e(ohEe7mixh9XZU8w0tSU ze8rYQir46hS2MmXV6Mgh=d!oeL8$7aVK6M2HTaLt#0IUEvoER3-n-oV!_8u)Pny_f z0TKDh;fhyc(QD>?3wM#g7R z?QXg9w=00xVH{#H8?!52Vz>3~U(s<~45sA@3Rg!InjAd^H!{kkTX3zTv*{%{*_>E9 zb|Lv^G`uxc8V>_}6I3>ZSobO&Nmhkp#uxX`z53%bM1je02=)63K&5h=HO}vDsdz~T zg!87%zl)^+u=Z!w+PD;S@-H*1T~t)~ntsBZ8X?4SXJW}8znH5z`l+70#0H)sjrJyq zRj+`c;;&YBAFLD4KtE_+(G>R&^8&*g;I+Tm8wO#-Nw^73SoQ^Jg{V9IA{e*eWl2<_)p@&n~gMf!L{pJdoy*W;c=uq*j{PLCslT}UgC3FppW}mY3?!yv#KmeIEB$o69xESG;ak5 z4i%o$4ovU@!uK<7KfDVWzfQv0*J=k5cC|az{4X~3TldC91Ri^tv*?t|RLGg|N;HuN zjN(na~-341u1^ zK&DV+zt*oARhaemMeZ%KICc8jnm1IwcW32*>>kY27HShQEsNdn}pkT)7N&n-8b> ziwR&$!T|{OwAiPnVE#A~-MQ9QyQw^0qcMkj`7>YiyT52f%`A3j!@_>N}%D}R# zT>I3vbZ~+|Iiou(Kpu-1Z z*IHru`}FFjyG!fH#Cfj0Sr_r`d`FnMwj;T+2V$#r;?QBxTHd%N!X)+sdiX6Vchz&q zAK(+9g#*K+!A!ptGX?sdD!x}+C(alZb(e1*yJMbs6vCY1z6p<15!n*cGgpU4_)&t;Z{K=+^Bd<^Rz^ z#6uzWQ6|xuqUyygyqF07Xp0*?-KmVaa{+Gg_t;hZbe943mH1|>a85d<&Brp8>)*4D#H=l4Ug^!$p$$7O^ zcbCO7zCAZd4c}&qY|Y-};us^;s)uUu#gjaV+?L=MchW2eDS+S{IN&yDx^n#--r(#d zdBnTf%`G*N24W=2KprRCU}B@H*$sX9r@^Vhj9wYDu8cemebsykyv?{-ejdW>nT94t zzX-+N=>=9CVoy){aizS)K;ECNi3MFHtDZAdZhQ|2w?PzpCL{WYUAfGQJD(B%+SlTC zdyNe~Jb^o$g=Ip(tZ>fJw3DAm7GY)TW(s$PA}s$0Zt9!7C6u=WlB!G|cYk?^vP{OO zJ()L!x!dSIYfz&6C%|a=B#Vib6)K*Z?TrSL=UUmBplCDbqB0d_KmZ8k_KiCKr?iy8 zG>)R%WGRkNvcZ1`?8{HznhliX8_Y?}mr4ytn}iN+By1khSEl3;%4}6n-nT$m2)4WU znw-7c3)kPbS8dWd2f;l*&$eazc!5qTh3O5dE^Zxj$2UK4XgE5C60xhF#NL9zH7pf2_!*? z&{2KkjO)w334$8X1#4me9PNjH!z{d<{0}Y@h!af>Wj%3{O!@pBg?(QM@Tg1D4MaE6<`}AJju>G5{y*9^YnUi||ixw2F`8!m9CiY#rmc z(%w0%QqzFj{2LkY)UqP)x}2u`y)BK@-fT6IATzW?&|oiD2~KG8Iw6HAtoA)-Da|r? zIipX6Mc^0xUzGgP_H$j5y`-9aeg@6tF>}suI0Fs~N}oq@eM0)R#cGo|rD1UtfhW1? zlD4oMdWd{N2t8orOUsr{ZE({E%ZQj7Zqsnmgk;+d_w2ZY1o()kzdBg~$ccAIusSyU zV3!(Lo2<^-s6^iAAFPqjGH91AcBNNe;E`XLe*dt&mZj|(vSAMS{e(Tj^#*>o?eNS# z+}1l93Q-p+;f=<3qR%BF7BTd2-S8vb1i2g%U<7=6O8IF#z4~1GRSre?-c~TCJ4Myk zy6_3Hkt*}HCIS5LYrL>h9l#ts9kRxGt!z1~ZiqInBhWyT!uDXPkc+W>YLJ2k4c1k= z$Lz1Q^Zv%gg$bgndfE~Wjp%2?sZ{% zRi~WUX#d9S46HmD$Dzv||3yjxI;M-%ahFx2 zSg~l;Giy(6WC#q{q+D*LP;Q2cu~vF}=p@+!vLiw=`wBQ#Khm~*3k1!_v5-8TA?+gM zit3Qb)TD}_-q~|<+!W9qxrGtgnkrQ~Nh!Wuh(>=w%pKQ#rs{s{b4&Zcz}c9fM#s9A z@$r$t2>fnvNDop7RFz}e8gb!Ua+}VEGXA8l9v(u*bpRjxW+AR_JJyavlqJ+? z;SC|^MqK(MKxCvK@-)uDbHp#lTCl|@xId=499NHQVLKE}{6BgW7CA4r)(n;N+!KcY zLrr&LBqK2?B)h=~r)EjANkQRJg|Z>DIkh0!oyp%c68zfW&Ob4m2%A306=F~Q`|?K1cGwLtuUl^Zx!^Tc z9WyOzNg~K3i~<@}WI60@-0-N_L}zv(4*h6@%+p1b$KhJH2cv8&2C(kik_Q}#cI2B- zAKvjS+1%fM(nXD|ELnG|Q2fWv&sk~(mjcV+wm-qBpbM(hZ7QZ%3cIzychGaK-F$Wt zPQW@Z1w}5&K&7{)dHLN~@B%rFy)OTuQFY9Cd#$2)_8)&O5BhD-+AP>@0Mk_DQ2+a? z1orb-2t~L(cvnIArzKvAv77u6Lbct6U2yXcoXH|Kwe~0YMj~f{_M9q&w=bh5uW|Sq zEgsvL$vH^jNFHANo-ejAkbTYAB>9O1KbQlUyMCF@B3y3>h;|8H#eK1oqy%Uksj44{h zH)Sko|KG*3JN!uP>mTZIVZj}E{tEng-!Z{4Ma_vhMdq> z2}|2nn#(+NSeAqk^ky*0Vyz6uIk(+>IokDyFnt%|A7U?#TGd#=K2XWt*xd^N_m{PB zL_x5IZ%07ng+-oF_z~xnLUiSWY#)66#jFh+?#b&E5dG`&)nvRowlFSpLohPFAv_DK|8GL z=#Uc(V(!4UCs4M39e&`8g5aJvYEomI-A4=A73s$Ud=aCzYq-M zDXB18g$NU^bLYQA;SK|sB{137+f^H(#{C%AZwrdYroIv;5+KSL4*u}6fgy5%mi42t zc~$)RRFb+!QMzbb7z%Lr9GU~egKu0{Xc^in=C1iu?3rU{2a+KNneO^DqVD9;wUwC; z=iJkx*6gVWiE4}QlH9U;rnn{0O92iH?(WfW1o*%>_|hzXbUL%qr9ue$rfmZ;m`ZP+f1}w`fpPJ#~%s=ijW$k1=`zG>|$U%teqHbTrVMjke%ob5JRXT#kGv zdznB2rJQs^ci=$`|ApSPLmM=^A@`@bz7q4^S3%DT_~zf{$j9IQj0{FBpo7dEgGEjt zbyJYd)|J~S&szL9+69Dfync`J(-=PEi5dVs7T~gPa%s>W?PD`t^2a*kufS^FqKs0m zJ~*m>*t`=dW!>8Kl0vCfIKI2p)?rPdAae)cOA{VJOT-r8F}kiBtTQ%+*Z(a|2&_`n z+R7ETX83ORD^6`O)OKt!=kx|QyO4c5be^QpYtX~O?nvn52CYz3-t;ih<{yPqx5sh@ zrFAthHcRt2?nFccw&JQZykN;3)W0+#6hwO?_CHhLVCH{dIS19&I0kNwXuwIZ^lM(L z*ztJtQ*&_U=PJ22hnW`&NXdM#Tp;#KGQwCI858etlnnY(fbE&k+!Q!sFPJ85T4=f$ zWzWh~6loxFJ@=%aYLXvw*(PNv?+65;&hkv6>B+vCsx)o$gxh6WGDO(HNzI~d8560Z z&5(BwZg{mEGB^et7C^H9Gw)a*dWFcb!sJ>s@|;J2L%ZaSznHo$RFCEjLK>s453H3= z;uqkUx>2GzknsQt+XUGe?I$oDJZV^3fUf_ga7aTYGpCPU9qQ`FB8As_TvN7d)>Ms{ z;5PtKVcQASBHk{olILolfcY>76FI|MUx~v}czjG;1yPdJ%4o4Hm0;~oP8WHZZ0ZOoQG8oe(6n|ejs5aPdlZIiGkrxz_;9Fa#| zeKHxx)D;bbDot?zg8oY*bYKlgYmvFqdC5=l`GWd^>OA%YOIc!q?N_eA?YM=dTLZv8 zg%c*R$M{FV$3e?zqG+K%yftf8Gy7*>NF?-A_6a2qBlM3w0yqVYgFJ)sb1th9m)fFU z;)q^Jy~XOyP+KHYQHH%#ddV`h^#cI*ohCo^+Jw71Fn=5H!pF1)r5H z4)7>g4Jz(9pbwt|$iEv>ZQ5BAP-A8Q$u5-TCrnC8h5vjhh1G{VmjWo)h~{Q7X5>XD zLnBav3~1|0$h#ryG@4oCBr*kTGED*}&*@T^d8Zi!`DcllkucFFKv(^d&SWyZCP-9u zpc%lHMg@9bNj}OVpu16kKo)DbAVT=wl9~~eWTvi}rUfyOEZ5b1?*0KhVvqb%XP$Ir z939UX&P z8~NJZb5$boOmP`FGc16Q5IO{*f(SSaLu1cY4;lAozA$w17p-d+9>^~(G<*D&iIQvz zX<3NTB=M7G(WZ@NXoprj;8!dTON*#XTt$e0e-`-rBune?1y*X6nM=Du6FK2E=;RPC z=nS-LPrlS}cxW)aHY9_UskAl*=#Z{y-m>l2J<}$CEgv~Z4Zt&18{9bJ0O%Z=Y<|?M zFRWg+ET>JdhkzKFxMZF8sck>y34?F;u6u{rUJ6HI**YWIoy-u8Hk%q~VyHO_?@o#P zum^iys<1(ikyx7p#-!vHwkOOdHcFL`BA*n*P}yyQy1c>Jn@jK8#M?6*HfFQKM7&p$)WU?g zoq=GOCsOtG6%g&+!Od(~kKL-tfVoupRPy%+M zJ>Zi^_CITERLKq}X^r~ML#+0)SKOhi3c8!Z=4-U6FeWJ5e~8)3^>wkh2%s0U)r>a5 zlc`dp9bhoi4-YS*&SogL>=FFEM369T2aY*Mn&#fZ2xtEkwK4MJfedXxH>i z#!7?TpQ^})2!yMG%E*EZm`r7VYAkWmykIdKhHA9hAy&a2h{MPcs{7xl+zWsIp5yhM=x$w}0RaXE?#O2<^k%by9nw)e-v`h(R0Ak~GoM^`$dgZ- z%2Xj7d}i6j}?8pyTAtUOia+QN=l7cCz{ms(pLeC_=PLy(B$FKEFp_snENV zarI<3BsYap50wCwc<}u@X#n-=0P-_OF8cu|ysUWMwC>vw@K7Hom0WA4Fw!Flp)4a# z?+6Zx|Moy2dwh}kpl@#})~XgnIlp*MwC6r1?N+P}#jkM_+4?HmnIalbxIkzdh#`Dm zB9_QrMtJ&eJVXb|I*;DW)1SKA@W$$AOPAK#Rbm-Z>}=~P{cV97kohN7OlPJ>G4)IH zEuwPYd?5@tjU(Qz%1ch8R?!5ZfC*AMtdR_fQ*A`7nmQf!_305az6ud?;?u?OgSN^z zrVc0{%Wx3;_xf`G7NMMd_4!k1ZH-$IX+m;Nl7g$5Iw!7wzG*T_$~MYG+zn?ALjN-O z7*uV@KCo;&NbFVGu&LsWBmJ5i6o*g>goDJH9r_OTs@=)8jXnRBV76}3Tl%C zXrwxYSQ7?=gXtx@%R%Y;4mzGrF*+|8n7B=f&dvMPK|;>h5}lMUC4D*+&rBW1A8HnI znnh_@nV?yMLZp3?Ur_(iZfJi8!2g+A;MJa_5j}x2Xe;lH7S1s{>MRT(*H(Q56p{WY z4-^qS?a`g&(r8j+-1v)-+l~$HPO}k97H;L-dHk%fVG^WUFDiw2f%trIV4W@<<$)g~(z)g&9`NNsODIuV>oeonHTuY@@_O|Z#tm_D_%d&!(I_x3(-uMQ_X2a~ zFOqPN(QHanD7>++q{Ok!GbUiCa=P}^L^l@tsTAGCoJ)|JILe8@frz9Nc>MkDjxMgyX%ytQ)tyM zVG;{(w*V$_SVqEUD0DCCLLPd0iC=d#K%2Pl%ClBp?GP{uj)K|dsT8fM$LocaD_M>o zsc5?!R}|0?79)(Xln{Zo3$OCcujGLeP9QUH-y0jl8zD0b9UTyU#UX1{7gku95o46D zJU;^q97P$#*z1%P&_*&_i4MG#47!4Ev<^C%Zy0R(>(qEY`ZOk}nt6%nF`^r?u?a0* zyMOqs{+7Ua?!X$RgvSAeb8jcbSgzeYT712Nrh{g}o=h~fG^$K~_AP^*9Fcy5>}`ik zA*&C1@{`jza&Q*MYIdD7ID(5Xq~+XFOeT%AXUWs1bLdleyeH1h7wf>)CE?D71;Hth z$$ZR$Zx5N&LCifIN;QT?ghd=uLp2eARCj`XLLsU1pDa5j$|6nW>JIux?h~31e8;MT z2{q%=!6jcGX^0ud(}U^S5v1Zv4{cb3*%%fs3w*?4c=N*qZ$9x)(oBU%!Wx;g5a2|5&)!s|9@Twp0ZMJiFk0av!ND=Z!ham;f# za$4tHtOsl$Z2mDNX$XJXu4$Jp<1|I3B8PEzhnTsZ)+SoX0cZdT6$5Vs2Zr@sEHH7! z%2yqHZW-xof_$+Q{>$l4p|y7WYsUNZCZj_oQ~xQGUSXL_&_2b_C+LyFH${=kj64s4 zU`~KtJ%;KUB{mKz;Usvse7QNVTC}NQ?)1c#tP&7}}wbpS%_2;Z~xiUokn=2(A zQbg(b3X5pl74WHU-&l9ugNw;I*8an2i zfh(*p*R&1pUdlXFi())qD&iQYoI*_@HM=f0A+>)I#o`lba*NT3_w~kPM`lu!%hy#W zpI`8(FlnPb1h@J}4_G@Ps}r{OB5kJe91NF8z~;ARCHuv)%WcGAv>FyGE0f>9vqE4gO#^tTS&y(PLrz!=ji z3*6H|gO&-)p%u2lhkjZ~eD+P$sAcQ4(Euc3dh#ylPbdkWMu zs08a@M`cRegKgGU0n9h@&~tuK-INgoLs8hbte4`2_fr>Y%G7oidB)!oE7~x@!`$xJ z$#GBqE18}MIGyi482rz|*nzvjk!=6?$3KIA7&eCDl1<~=9E7PdXlw6q!lC*O8vV9QF z>{)u8{mYoDkNZ`)c?h-!oMdAeP8wxaD`wEJZ*w=-266PSu%wr5FT`@iPVpJCr znXRJWehQzp^`Vh7>0Om0Ts=2ZqMkRKEbCiz zr_nj9Ch+6+8YZla1;+H!#5`kCCQy3WPu&T4UnlvOifzM%>H}Uqq`e*ly%-H{QQ5s+bDzpw2?9te`w8KBnPQ6$;FGBK3yNihLMI=9(M67bEeZ^;!V#@-oaPh ztH0(=K4_SVIsw*I`YO5M3)hLs*Z4&MqL8xGWD#pOMat5o-5( zJKrRsxqi45xJ;?wK9lB!`Woq+�^l1#yluTlPmS_p=#?T)VxImIADcj8)5s0-%8n zbc`?BDi0{?sLI=z4*Kn3sz6>TpiW0b+uIu_W5qj(WnM(2>1ZE`H$*8GbpA8J;#VS< z$X&_OjB0Vz&(YjpvS{*_tL^g?7#Vhv%W3i;x?zVGvryTFg?pLMU|Kh!ZFj$RI4PML zuza&rYCPfYboXKznSwz6SlF|u{q#)J$_)GcD?|6`C|L4dkkmYmc%o>^pVaZoX5l8e z%NCuc9BEF}CG~7bOI!y?6SQaL2kyJ*U-T*B|Hy2lQ=d6|@4& zim+L{YqWsEuy-dX6sMe@`n4)|(--0yxst1nEh&C^!Rv@%FLi=Ey#Bj(EAGpS?$BJU zZy+jXHURJ|8f3q-R0e&KVUCOM zkYJjkKreGLJUL5Qkp5CmSK9S$!78R$s~=M#UM-@FaGwfCMCGbdmo1LrATxz;qp?R6 z$-L2)g=6?quh2Qkw5FxVchkbe7oF+zc4ng(je;rAnjCUc!9mnY2gS}f70#285KL)Q zo8N*|QNj!w6VVD%E}Ejb2Zic>s}eg@;=Ut3sF@hqZVq|=HXmBM2>3`KmhlZYRMNSU z|FPq(DFb5hD>apw)#*vDQ^K|OBj>!Fa6I;TgJ>9TOgbNXS8O^R`oY!y4BJ32>tjEw z8$3p%$p)GGM%xg_C3vNSUyv#PMm?bwjd`mc$vd_oAA~jE3oXWlp}LEhJl?mh#--Pc zNsSFo4(~+7AJMBfxf*#tE4|_G91?OYpyPOk`aq(KN;sPm1MEL_$NL3iv>i!y%TzmrMy;}fS-tA|ObxN)uAoa1H;mQc1og{Li8I3iB~4pH0W z0L*ja^4mvYN#et1brNf+AoF{?%>XBm;eQ%6C~dUMM#m#$ zhut(wug$nkfu=F%<${udc2?$_?HZhX%f)d?_nEr9M%#KD@C~al?_W<8Ht28Nsr8VO z{9@Wf*4rf z1xmlJM`d(+##x3c$GoF_t@6}FT$n`ecbH8V50pdu2Wt)G>>W*cCAE-Tef@y=hGpXY zqn(1XEW8JW;psilu7fr#^>{Z7u|FK*t_pBijz)l@Y$`fKJgu+?hYRR>bkQT?B9Fn);M=gtye8_sP)^ zoh8Zl8xxSSQLl|JHrJ!8-^kWb`iN?|f05)?$6N34vlz+_(y{45jGWE!#k+*&;kR94iqQRVW?D-7$sp;y z0((@Ud`7@ukyV3aqX_#?Y3*+9=^kZ&$CC8riM4S|5XMv@?^o&*ar<|P$-Xe298e)d zp}KoOL8#w3hh}*uOuRe41Cw~Y0XB~8bOnOM zGPPXA^_98KlAW;k*Z|Dz05a6l75Ox?XDjm`S9zqY>)LsF^|c+f{W^sse(5FB#Boh= z^QC^{ADTv_Y$|#7bQW>bZ7Bfr_sO0%v9!?5}LxXtwUsbBhHJDiJu-}8pEgNW&`TmR|nYT!oFDn2 zv;2l6m;xh$hnO(-9>8xiP89XL*~>JYnJ;IB-A)yVx}gbwgy@dzj`ww+WRozJfFfX) z7WUGNSl3@N8p$PT-&M495{A)J}(x4qR?11#rHj5SLNzVhBp zIO`_?-s@prW7BJ{{08?{&L}hET!;h)r5WEIZo)vwG?9Lme};tI)?9R&O+_)yOBpM4 zGuumI>r-1{Axx0vc1qOvbVg+a2&|P3W1APDO7yC!EuZU00lw1`I0lO6>N;YH)*9Rw zY%ygS3s>}n5w(hI@%ma}H^q*`1G8~jFRA2w`y^&g5@ zv$tl%HML${ATudtkAyd!7PMR6fqjN;m*>Bb|2wpVOW?0v>wmc~2LGSj7yJJ!wB&DJ zJ}JX_Zh>y1VRpu0dR7XKL8^AL&QZ3eN@|wQbbo*UI6cy>YIbgRhK-38l4D_pO8OuW z@PpE3G+{jum0@xgEP6ty(wFVR5iHRmh#gWkbC@_IJz}*q6RZfUA}~yOst^~+FA*E9 zV6-&#`vj6RwE@%&3*k_?Hmqe^+Cu#XyS-5eVpv=>HMnV)uVljBoe4Lazy89m|byBzQ?QVsbHkvrI#+6p7fJsfb_!5cvRGD~2Dc-ASSe?{; zLccv8_)G#vk{LL=2^#(3q{n5k&{eTvpx&0yKf*BBF@-#UmHLFcJsMQSpvBR`($dn?(&BaI%KCE)b|x$@CV{Wh>uY`cXd&#qDChRb#1Gc%$Fl>ufy^bpY`+gW|RGI;M(ro zbxziLkEufMUZ1us&o zS?drp1RNI;x`g9JQA}waBpyU9NO&{Hz2SHb+d%b5HAjtxID0jB?L}0zp2rHNdPlMi zQ8fU?EtC}VQfs;?OBFr-$TO10{n%^zJ`0tJQY@oO%$fcmSs~1JD&xpm|HNl7jb+E^ za8tcu=l;y=tgROY`>-;WVA!&vvtsC%Xu{kAK5wa6(A$C>h@-p6iN3(b}OMt6_ z8m>~zn^Z&&Fl{pR-48*BT8QK?-GTJ$o!yCgnB*l!zFy)f%v>6}V((FBw~&{Lra(pZ zU0JVR%)9!5toeKV9lmtN$36dn?0Z1}*Su~`*t)6K5&Kv8M-pFK;6bV0gOJ%11-HFU z1ad=@Cdp;#oRdOpL?i*dBh`l`aPrGTvYtOS^Zue?NHkRG28ea5&@7LW)_$^p7E&aU z3A8v83LHJgCS7gwJaW;pZlA-Crsi02UkzK!=KbE_sq$Ki zi_0=EW(UK&y)HmuR?>z;Gu_Ni)@;ivX1r|aWIZzwW@P$mP{hQ-`~Ge5;J<~@u_j?2 zChJ(jEV(5?j-ie~rlmbju+KY&6T)mfcTUjOD}1jyWGz4Ja4Z>FFzCr2mA#U7cI5=*R(NeM{6FfTSo z$F3xN+g1|Fo6zlsk{2?qM$Dyjj7VIyV$IISI=2 zE+m#P$q$l=9d>~@GzejSG@9naVnrOC&N0cCXPzt1v~=Y=JNfUW^QJ0?8|ewR0hzMx z39sVRq#A(g*_=f1)4Zjqa2c0gO-X@u`BrerjXV0_Ka!Xx-*Y5OkOlsSbDhZ6NU;eM ziy$!?l0vCho~M!3Da{ z#;TxmPztKB^be53ZT;s}xv0fehpVm1uE+%!eNk{v*gSVqfu25zRcddFjKc)hVHoAbh!A0d3De%q zi8lTaEB>ECZoj@8)y?SKA-)&oZkVqVOocP!&CvIocQ@|sKnVYuPKOjk6*W^?|2GUD4zL>CdZADZ2s-#E zTBg#z2Y4!)p3AH56B3!x#f^Zz1arjrw&Gb!^p zlU8+`;@I{SI&X&=@P_3}f05}@;6b)UKKkH2#e6b&1S4M${k@gu zlE!>P|KeZ_ZSvB5_T`-cMc>Lk?YvqCi3d$J@DHJIa`s?BwwX%tX z_11zmqEmh07C%2HekNXeJ>{7Pul6FxMwKV?T%WxI;nI+YS^RhO$<0mvi~u3Lw=r>h z+O5D+!9XKds-Z?TmrbQob|6zF&QkSC6;itSqku-RQUR%~ceFYp2>hXJbXX+?7D9#5 zf#V@@FX?|LoFsWC%PE8HkSeoNTVH2l3u*_^_g*k@TSZn(Ir%rc1g;q87}6qDj$1h& zIV|s5`AGVnDy%99rstX}j;IP>*`!H9`LD8-!a!Czvj*nm3up(2b0ifT=uE(aQyqAHYa*i6&{Y`tLx>eekj?nF zt$*{+e3{|r7}ZQ4gZ7^P+AzaAeN3~vKnLUGHZ}M}Hy+vE{V->BZl3Z2)}Ri??mJJH zv*;Wxr!|$KN#dHjT_}_N`Ob&2zp|y{$NHnFf4viY!}FqiDBcG}$gY{r(|{JvvrKjw zD^F=hz#ODqbz=5UVjTM!Z-HeRgz+~lExX>*=RpCJl|tE3P+GZa*A3$H5MkqD%aWEC zalVA}*KV5YTg4l+K!wyF(`O!mfX>%Fv(x7mT13Z(v#-JJ8&gS)lAL(8)PMbKUw{`W zlh5TZm0=9S1UUu97g1?Kc_^2v#5 zyreqTF$=;hLk@?DtIO9TM8)2R-t826=Tm>$7}-Zx<=4016#jcK7A){|F2Mkj6|TF{ zjfthIi-g^FA@4xh~6&-CJ0YSA3m3WC~2EnUhpFBq-Wdm_!Mg#;D81ea8JA}{r_A_ zx6$CX2GE|kXVL81V)IUk77lKLjOR8${R7+)*|sc+KW~{1wN;e>vm%{Zc_1Zti?}XZ zO^i=-!bv>1;t5nk_3qdod>Nq~qxILBSk9F_Arf~~R+-@zw|qytBM2f9HKCkaw=6PXf2F%B=gNhl&eO6rO?Ro~LQ555}5Taar6QZW(4>P=8q%{Ef!e>r< zX5eEc=6jtWRgr90ADV@nfYGMCwt|5xZ>;+s+C z07)Uq6^kf_-9yj&+?nVD??3TQkT2@jh}43o8~9pJ3M|t-5#W~z;z{d^7jBt?Wcoclg{5Io2=g(QY}ThY8=-h zv^D2^!{1?|1?bQ+!u)79lH(0+n>P<3q%tJENo}F~j2?wR#A&wDs4%_=HSZUg9nS|M z!Y?TrMZD%}G-i0qzC~FGz-Ia?klu(qE5b=n$duO*TPJGp51)Q>@4*sClgd%QD6tM}%!myTlFboR zD9Ta@`f=|GyWn;)j@aJhvk@xcg$F*r@SnyIV@{;KJy^F=Tna)8?`tS4S>PFr2=$<* zPy4Wx7%koA?9wgRRC%~UeI7f&{JChRPZ(Oo{iP8#H1Q^AR-wGa$j+Ba-euXy-Dq_1 z?`^7@-(Ku2vN!dWT%aRl(%osjY8{qS^=vJ07?ffC;Aq19Mb z(p0yc`EODduH4@aHKs*)_~Yf@^xW}!jx#=b7pP@6G9^U}T^M+Y;@r*DaZ|%!QRbyY z$@D4Ps@SBfHQrxAn(h^Mc^)W_yPFpmz;Ow_;9C}`&iB3Ad(4ZVjw|1aq-|Hi%#0ub zz9Nfmgh3YIce(QR=W4|8N6f>~V1`-E;J)KMZKGo|VZ+ZpTujj!5N?#K?wDM6k{#uL z{(gMcc9VT{#{k2>_P4<0OQA{(`RF$2$R95AVd#ufq#B6E z8y|_Imf`-V06e=8brlKss+2G0F3@hjxotR2qQppGP;M zaY)8Snr{NacAltK!^XT{t?^mXg0ChXQ`ktp8MZ|tq*T(BR4S+fklq%Q-tU)A0t(8J z68}Unv@t3K(JA!he6`qJ>wP_$na;`iIXzan-@h12s5Q{i&ehd% z*zIctEa>q98k>NCxai0KjwulE@dTdi4n>^K;q|`0oxhV(BH#TDe zhfM@+c@DW4D=<#Wv+%pHtinEd+L#lJV?1cMXC#BUtM4Q1dEgM2`Kd3)in(UC?n6z#>B8M&Vg4kY zUtB*zo8hmY!@KG*GL+hQomF)_$KT88)^>a3#qy*nyM(QLtoQ8~aJVdd=bUzbvIH!n zhlBrAX!*(e+Ft|d30QOZc(XtVKrF4WOo&`EU_!Ils;Scbc;a&Bm`Am5$a7HpSmdbt zTt?W9RCk`e;b%-0v-mDayn{w#2k}e`(Cwt%|3N;n={nXmllmFGh%Scv&O!V&ONa68 zNN@KWX!v}nxKpP&hq6OGY%-#3=wmO4*{LE{CitnWYfmPz^UECZ&y3QkM zMB#p)?SouAF<~OZ2%so`5r{u$pTLrbhN^{BCcaX0gYMx1uM<8ey3=hPjDC0cHz^U@ znBkvABqYdMd`QSJb_7uQ{l`IfjCWf5k3vomz_20&|6SLN#|L>3!%pT+ie^M2!Wa<4 zG4&23!>1K)06Io9j45d4Vj|7t_YnoLV^t zCd~E5PQ$k@W_n{h+Q}GC8k~5icCpxJVKNI&Aju${bWOiz-m(f7J{biJgCr59;?g-$ zv6`uj`xUBT4emYR`Q@Az!avL-fS>)*pNH0;hc{;`0ugP*d>NpM6~?D9hOwCk6{iwr zd=eIf>vzCH>)M|OA=k65SI%yl&ybdQ7nepqO`%tf7B?7EU=80rF#$bHEXvagp*u6` zXNv77s2t>y>klYnRcyj-8pO1EoH^r)uv`~X_Rc}f{qmsL%D)=e(6_E&y>_d}+pU|G zTRfNL{kFFB+{Bj4nxY<+W44IKmSGl6D`ibKkx-e=PwmJ~uBNWaPJSsb_1DcyW!p^h zP^nswOrF)6rK&wIdWt!>YVx}sg zY5YBM)l)YKQ8!2uif3tJug?C10DXbNW>P3!N^uMTdoKiQGPKcHY%W2O(s?PuI#bch zpzAbSIL39e4rKs$tD4?NF(Y564?^QCZ6_Xy`ya~Q0;rB{T^9`=Jh($}clY4#?(Xgm z2?Po5FmZRCxVu|$cbI5!*T-7ty!Y#;8%%qrd)pR{yRDUOEt)jAKC_ z%M&FATR=aUVjTPIsB+=)PO7ri5eU~(fIVlq{yPq7l+bz)Me4gSAU9SA?y+D^>QDZh zdFi^`A3|%QeD}x|SnIt~;r$qAB6equN5}>R<@924|NG4qz)Cp)+3Z%&pnIJw(#H|~ zWjf$12Ob$rwaS z=Tvuui?h&k2%Pvk4K8E{=TS{EotQ=4l*A%S3{%t;yGnfPn^7msQ*LM@xfyBS?i*-5 zgGbSx#B;0aD91Kc>9i2Z%@8N{c`?R>m4?EgF@5|p2DQ96+Jt6)@wKAxu?nF(DVA%* z`c+;MNEhH&hTT+v5^F6QobFyAV97J(_l)mYe3qsJ#8`)SN+Wtr{ofmJM{l!98Ott{ zXj@dpDAkW}-P&7fYp5LBW2LIBFA8l}+8!AAgVotzQ%*F3(aw7Jyc^qG)cLKPK z-3r1+2xMJ_@V$*Cigil)A_iVn&%W)j6?A@ut6(Bi4+76wJBFf8{&tD#gw52GL=|lp zL%t7&W+HGidwhEy$#?6r?RE|WMgNP_=aiXc0KCX>NR&kY!8d0z$Xx%!h(2QrYym~?uA26ZH3OXFa=@29 zB`(?BWGKlGcLo;mK-y8D^@Alu5$UTV1W+Y;^ZN*s^|yI9`JT?2PFNU@1qR(9>Qxfy zFC00Cw!-xF96hPd$Hj&-dD$JE9k5xIQyV{fDO?-7V8z}c6Eiq-UdJq39h7s?TQzbe zd8F+juBN^pb%iJvWh;MkxW5T{M@qxdBNyK!6+Z^NC0M53CG=yWrmLBlJ0iNylGvF& zQ_mG?E~sA3a_z4qRM>Wbt*Di>^r)051S8?tK+S5_TO7VFb~EK$^$T59S+^Jrrs1Jf z$^Ha#mY_>{Oq9RF;|3G4u#E^R(uN=T>5O3b|Il=&m1b^+edbGFdu|7ZN*CG|3_J3W z8gGFWpD25W6@GS`!|!b_K6C>krm%~!p2zZ>xDG45pplv&QKmPHClKF;F1_Oy$D*4R zECG#w-_UV4uRUG}knPkr=br71W-dF#Xz8xUs4~=GZXLJ&;Rg{_%G6wv7-T`MkCn8s zWzYVOXukr30@s!2?O$z-8lF152y_c5+B1Nd^@-RxjZZORg!uz)&RF-NqxhRuGn&wX zyc#UHYZ5y+KDu|qdW&D45Q@x{r;AMIv?&LB(s%+sIAJ>m2TE}Yy>AF`^x7ED+UMZN zlpm1a^q!#nkZh0sk-Q)qjbXJg;fOXomcD1^2@x@=!D>D?wi*US!GDl|BS+$Fev<|b zu5$~XeJid|{+J#6w9Cx|5%c5e**4q3k3E4>>97 zxqa#exzP?@qIW=60`@aGlG(jPg{4A1^-OmG!cS*MK8^ZK$R8sZSySJP6e-JBsh!To z_GHhQmC^hXDe^u(wde7Vc*KMksHuVjBA1_8|Im8>;G@-@5MstwmL+l)Hwqii&%w%U zVl5Jk;GWHWT>GOpH26b$Phh20sr`nt+V8S!nb0aGx@l8sb;$tsK%ni8G^2sh}F{H>RPy zldUrUWJ>26CO*Akg_P4WCMT(2L>yU8i5h%lGzEFoGk`p>3_MloKr`x5>@QJ`z zW8oMp^32LIlT$y=vOAj;ee^UBySCKAU>(|nD1+B6WSt?qgZIOzckutIm}x;y&3yZC z8|!?WgrD#~Ih#2;Fk0E0TA7%+{Pk<*Nzcr{&cMRR#Ask({qGdBzum@X|DRhpIaMW1 z`!roUJ~du5e)tpQI0C|Z?sqWoy{2m+t;k5lG&Hh_fNv&O@FfU+&iCv%|Kh+6`P5&& z_OYi#h<~sztN*yKz5m`9C>fBJumea*Hu%er-9Lb8kOt`eGJHV<_bm_zRw`y_2KG2W z4)8h}uzwV5`90TxDp}I(`C`}yv1UP(GAs&ZLE_ivt$4ukOs%$R!Of^1A z^$nu0MdeB+!CCo-lCo;AINE^=Xz!pfXu_iS*CXBKFK%vpHdy?u2ew#SUwvbHYi3G0 z-M=}5rt;jVKVkkC&h(dC(EogJM*L4g04)C_XQ%(o8D>UWR(hJoU#w02<@`6#v#`vu z05@G>0*7Uoj2~Z{+llXA1J@TpGl38>QDHTKM}r_eTbs+fQElqqi+>@Z5P_(AG4A+p zf`m-?MNIf1^w4D1U2_n{03))G0~G=QuR;u8ZbKO(nq2o=CxeK1?@yIPkpM?Vk{+XEggzETxf1qOO{Z3DK)1m8zXs_V&z{+vYga`R33^xjq^cK7UY*k<> zN+#Yk7QUAX1#A)~H!4QQuDfZ*Rspb;cnRF$44kBZt_kre)efc3H#rzV`fXz|#ch=F zTbM4Khb%|E^mmf}nwC6?%_0(GmQopX)fv7$!6?UJFQ-IbQW{Fb<>UJePt}Fn>t%ZX6TV;9 z8`%H+qn@Ksw5|W3@E^tZnEpEo|3Q4u1kwK+--GRtqY;aY3JK;#^%^b4SWSo3YrYo@ z*BS(=k`s866Q~XDn1JwIys+CX_#1|3uBKYN)~BQ{u)=(;n;B)AulVLBB@{kva*85= zh_7r8^x?kJ{w$)piHK$PG{e79KTB-!|1?LD@SvGvM6Jth)c)kQFH{-djKFR^`j>F$ zz+oq!6kGqRbi35Hd%Pli`sDFXtcfQ7j*CB9dLPj}ZUP5uEgtu0&H??|>*Ul|%4A|Q ziNhTMJ^?e@1(I>K_{-Yb_2ff#mMgl;;8aQ=g>4e4dF-jX#I=y_M44=)TSG*tZ@F`8AYd``AqyN3W4`5}r08RS3Opbem zst9V*rI`C zhA@A=K)uy|`zmbT&mB9UL`Y3z3d{1#pGwZ`9+-QYuQjm%l*W|s0t_=S9Y=p1jrA=$ zg(ekSkXB3UJAZh*TMAx8dzBE>U5TyxV?D{?>uC47HwY0mv7dwuuUKmJl6H-svz;3q%rA@tV%=s3@>#7(KWJr~S(#Mo3 zHZk64MqF+gTBY2PnDvSzht;qJ2M}X^gGNMmuzIG7Da#l!g!&4veS+06aQ?fyGRX5( zn&q>}@e*smkASGIeylW`NPpV;gRb}WCj0PTr0F&%h)5ZO$D@NiDzs4+U};L9+6qPd zS=LwDJ1-^z!ZS_@XkE6pPK|*~9Zks11JKn?r*8;nJ{TA-GomuJ6mmdWBPaP<*#&wd zCqRv@)^_YZTsehW44r{yrz!+c*eG~yL%jfC2di^u!rSX^dK|Aux{B6F=_0TUw!Ga* z#gzX|Wh*4hlG9a|q;8*fYkOG;afzUYtj{}8NrUCP&!;xitjl8_P7R3?CC?c*ox!)n3G~S5+llq~y#= zqqcSiv4>g{&ig7QbkBh+!DF}KisTQM`V?Kr0#QPGo+%01MtRN(u4CCaZ!5mB)YC$Y$m-bzTPkNU)dsawg)gqpto#V(!= z6D*&q14TJgiC$xm#1O~IehCGP^SL^#v)2TxGb7gl)}Zyf1H8fHoX5!h2=Xwp*jGew z8OWKhHPnt&j?@>TUx`11qcB;bUrnLr-gGG5ec>TVTnc3fW=D~p{f(6?N{e7|hCVi1=u%-z-2Tj|#1+}uR=NB>p5IG&WTPm{qouV`Fiz^EiS~~S+nui;t{-t7b{(mRJBm9> zz=_3i)^uS@FdH(GRi{VV;qo6zc9LnFP--KnS*?{u3jHY-oe3!(h3~-2m-oA7xZ!@Q z?H#{^0v*XD$zvM5KqyrHFc38{7kvFUgpU!pd(T&d9wrCh1Sar&Cg}TauA5%@!ytbq z!%r;jL`_0SSBl?BPQSY!4tI79$0BG69g>G|W&6cdgX>!P?5U1cxjncO8w zq8(*d5_qs;&pfTSildTJYNc1Y`Zbe9Meu6|K(#MgD$AL4CVC{5xFLEZ6A44)KqDeG zawdv{Bbit*FKNt|wiHRBX7cMY6+rdta_OkZSz*T7SBIKH+PIyvbS2@Yj?$|?%UfkR zed*MWgGhYiTONDyVbiM=P6RMX>BzXZh%ft&)>+_W{0J$xsx2gu- zGD9Parv0Jglm*|l{R-w?ZhD8(n)zgUhg3r)O^QubjxE7bJ`H!V)JdLp@D~o{E!VFv zGs-9-+E1Dp{fIZsid~HIJJ8;?d7N@PAb@K|{eTp?y!)WknhXV@7U)|>e~0OgK1;dN z%&9G_H1Kk(ZfCvkrS}~9nvu#o%XZLMJA9~cD;HKkNmI2TwgFqx>07vnGM)6Lgfibm zptQ22$%d)cl5(@Mr}A*)O{ns?Bs1+mJNkLjjsW$sGM(gQ82V6wr!3qf!qZ1fj^CD# z>%G@Wn|N7*OFOe9tbmbU-$j_ej;xLd19Y5e@EUX5r~Z0 zQsP^g#Np#mYbma99lg;cL)*9a*9hF&swg0C-89|iaeuLhFu$E5 z5o4b?XlOhd)>tj7Bif!v+nt1qM4{UoYK^6 zzKJ}aryIr`0`btC`ia7A-Pia+J$BoT`cf$G!f4V$RFJCgo0s{lmVP^ufC7(@;BPmG z9L8M^LOIAhEn45naD%e><30z{>|8l@e1g2nq2gI*P5XqQ2TRvfrbb6GP}oNppm~ri zB;bkp#H5q#QOS_pci4om|0PsQLDn<%glR&noqxcFoX-^|VznuD6ZtLljzwO4juA~M zE0ukS4uERRM`ktQ*pHH*Vo4+3PDv3hEt&^VxD&^(rQP9|{XR%iG@R&T(VrsPpAeSH zSxDlpDqoEE1VwTXcrkT+a8WmIy65v*L2dB}&Cfu!jM>fF`K$i@iFiYJ{LQdn$;>?S zWN`wTpt|rPrsyX20j^h?%@k>e(mSR)^C~K0;Om2?_0K9nZy7|mmg8eCJ+DZOEHXQ| zz9Jpp^4tCL*D)dh>c%Lkfcu?g1)2HJ#-OYo2< zD8pepQg_%jb&SlzHZ*PD|AA=^-0zHOEx^~yEgvbQkJmiF4osZS0xZ%Ij3s2^`QYb? zeh9-tvheKO0i(!D$%t`Ggs%HT%GUPp`WpyjA$v^9AVW+_I(t6j(6wSeuv@9n27z;? z$T`i#%!t^o?+H~S!7q#Z?cpl9NB)GY2}YGjuW#T5j6ZzE2TT4)_;>bAt;Lu*fH_Z@ z%(GOFIdz7&LD*m-T42^CB;VpKABCaK<#t1b+&Y2xb@k)n#Q=4E5M36HSX>Q$zHOl< zt_2i?vj%ykL~bpvX?cRlycDq?J4U>KqjyTAR*NiW_c$|E)bQXeEfRMT$-J@QZi^8D z#z;?^3?L`+los1Rw_I!fR;9xSvEGxY>YZTBAii_=zp%>&$VIl^Re!xFk@bEPI7O~5 zDco|gOug0~yvPz6elOI9qm;rlrR0wZwbJVY-s;SMp32vM0EGB00GIwOl=WX{3$K4> zEayApDR`5{UO@0L=@>!VK9UgGTLARBVLW$?Sp%{&pxHIZ`>Ie$@6jsvUPa57yyA8- zuJl!nNIp-Rq~)?w+d#T4*GZKcltZJv+iqXuUJc9Kc|os|`OmO1h7ZW7T}_${P>1&h zz)@*NF2^u8WJl3i-M|TszPU)V-<0iVs=M}q%wIQlh1-hMxa_aGjEO{&M!08RQp5&U ziuWfkzIfq2rigjVNxAeG-HrONj~nb+b-?B~J{Em+n)2%-qmw0`ZIqpFEX;2{jpNgz zf~r=WH-DeN6orPHqEdv*mcnyy%VK{Sn{nU~UEU!F1q$;bKf~kbA5_EkJx8Tc&po=? zD;|~kMV1LYKytz^^CED-{9%W73n3VbLx$rKE9F2-x{!UB-`+Q7JC)DcTZH1>>?*05 zeG-`G2K6F0*~4c)x8}ccE@r@ZuEuNdK(tv(mT1#dS^(`@d~=)gJM!zwRjXX@Z_Z9n zI5C||btPI({0LmkgznFjUDprQ-S^KX+}CE~#`e9PjE$+v3D=lYCbZc@^y$a7_DPpZ zH;~q;>YOwM@?1acbK6k}`{}Rm>F-2LLt(H=qLFins7~VMVzTAIc{_+JWL-2gnXN@- zN$cJyDna7*7I*6hK38pXb6?e!TY$qDz(1N_kFoM{W*m{jrSAyU&K2R_%WIU}&3K*j zxtv6;5T!keo;as5?(W!mhKzg@verBu0<>4@eVdr6RvS21y{3=+FXk>EvM~UMm@t^y z=bNR~8=5iW@!i=5I)W|j++KXrGb5kROdD(6ULJl~Ov_e9T!2(AURNjXh_b6ZHUVmV*A4uRN;!bw~Ih z2afKCc%kaIrQTCTAs_T-_^XKAD|^L92db;T(n^eesg|fr1|rb^a1^!u$`(PxfrO*KOm4x=sfTujoY+&)!u(RAa`xS^Fg2$`YdThiL=%r3r_62F4NR} zE#oXfT)HsjWRQv%V)&dd+J9~SW$}T_5<)<-j^8X$WpFHf3bG>;yBn9V5DrO2d}uO( zAZKwh%cYkqH@KPcIQ6@B;iQF(-Ri5>Y%pP>pZb|z1RQYE2|J3c>|Qp_jl789p6;Hp z@+jh|-jI^WnG%_SRQ&9-UebPn#h;b!0b1$#?)kOtgo5YVn9_!T%{wO7!Io6d(N2e4PK$1jYUD`1@O)4*!P<>c4EtFl-cEhTj7z6{g(& z)#5~wHvgRTVZw6({|8s+`j1x!?d_hQ5)h&89pe)1;O^n!pC29U;P!kN^^B1d&E4UV zz-n=@p?(HvU)XW%dfN21uZ0C$u?RwZW@5=HGv@eX(#v3U`MC`e{`!%096B(Tb1w3cCwBHB zdrULlk6n}ETA&z3R&C2#UL=)d>?i}8iQ{^3NH=+gX>$Bp7t;|ixc>Eo7{4fGOlU!b z8;dfiBGlRaMM86`ev5tBo2q!1Cnc_c82)<1V0`p=#P-pa+7=t^;unt3@0yCVq4BN3 zk-nYDt?r58!Izd8fJ~YhISlFb`Ti;Nu`F*Cw$@Ws9jjE`_}kEU{$%&W_@L)}TAbOm z&)p|x3>at>2x!d74r@Bf{nS)Oe30^&Z=3=5WaiN13=HCC(9rhK9Hzs=gI9~`UWF*T zYEtIO4KZFA{ff8Fb=m9|Z`Qs2zyo%yxy!36r+vXx3g!S~{ULBnW5X-K9Og$T2y`~D z2v)FvF)t!$6z{Ws{q#v%<{vz4&VTo?=QCjI)dm3P0Sz91yv|)hS=YS2k9jigWQMw; zMnPnb^kcW>OYu-3t&iC7f=Z9%?`1q#r%39)gXfJWMDBbF52PC1`Wa3FCr_0}k{3Ap ze8-(}gSX;-dvd$by4I(mR_y+#+Wqz9`MrzBqIObiN?$Kt;NV%1@7^ax&v*Lqz`*_T z5lio4cWm-`vIPs`iLOQa@^#a|Y2KmQ+OG8??WF2`!r>j%nyvesep20fw^LsA+IC_Z z$*kFMrdl>_i@#KoHeU22N?ax1KHkGz#U!5`^PcUf=rqZEeGY$-zO|IRO2q1-T05}6pmd9JNvnbOVqSjdk4q8gs1UGv>DFaSq8? zp8fu7n)mN%hIKj_MS62z4VirVUmcBXAH8R6Wa|Ec^@qu)x9aPtK6X~K^tFr0?tj6f z-+w{*zu^r1&TTxdwB^^FCQS?|gs*wUaEG*C`@i3!eYY+=tAs0HC`_S2%^3wQYMa_3 zXa^jfQyM0Z(Ly&Xe|kOAyf2xrE-ET$$>rjz^WZtuQB(iT{FMN)LlqLm#wzd@{N(6GMoc;U2}V;whWdZkLogh`v%)doS_ zrueV5e?Hds+nMu~RVpYO%IYW)SZ|AuOR!4+2>ci%Sqh>k|L!^R~Wo#MaY zQ6u)w2C9r3xO6ZLM$RZymGBQ%ERJCNIQTG>YIQ$Ei{|*11ZTkzl{XJX!H$1As-th;QXy{fYo#s(AEJH1^ zG4V7hf!d+@QC(gCEP2CWO4`Opi+_vSphkin<=ml0ZW<|C`&q7`dxI7;+zUeDj<3!j zCjM<6V*tW>G-3tXJg-v?wYH(*2+YkaII5#O!dqj$K>JA@z44|}x{>21arjJ4AV+sXoWR&CB>(^&V9r^{eeCV6%%=Kzg1u`##lU4^z9(t%kin54iRKIdd_TPglZ6kG$Xj-fLPY}55Nkw$@rzr;|s_hC6(h$iKV{9`;h&S z98{{zTOwJ=n}8zC9W>nG_gldIP-)j-$iS9et>Q=e1&sm7+ZS3jh|aHEk9 z{mM?no8icHbn&aN7%7|E_v|9L&oBj+A)t>d1y5JJInotppJioVJmr!h6o13@f!%={ z-`#U{JFJfv>71wiz;SNdFVtkRP(&0*3`6XT*hhCFF)*!>LrMA61!>C6kZQN8wk44||<*o0xPY>3uP2;3L#hVl_Nz6EPa$l!Ehx6^x2v zl^?ja$~j*)-$F{{g8chnkcJ2ZTpxywBF8jS+VqlJ>0GqVt~NEdfiwF^DQXPmdK}Gb z`@li7$}yglF0+oYOosdBwG-g9S;Ckf!+o2ryI1$VHKFVB4sd>-IK_?uxs7iJxW2EH zGPGtd9N4wNcE}s4cIvo%1ROJK8rx-1Xy!Y*N*tSI@F7dam}=%bxyl&RPdU)jcU(TW z3Lh(HsFo_AMQUk3b?)BJ89QNcXl_3QZtj!AmgeYOY>mnViBpL~wePzu9oS`)3YyvM=IlAa35ZWPn8Q)KenG;hfEP? zbNWHU6ysrpz+#BghCm`9Vq-G#8H#qd37kLT-+FCYWKCfY*a$;CxySM2JU!`|(R4!I$EB`~q;qI(A^5t>b%kv%GZ; zb9U9TdO(=%^;GaDqAmjHIXC^S_r~XiJ)6!>=G-aL5E@0!g@K}t8wo`DS zP;u`yZP7L5o-N@=c)h$`H+vwJUF`b?qwsusxx8Pudf=2z>6>-$)nk!8B_j~9hm&pL zmvy~-a{YZONI=K8@riG*ZsH)@Yv};x7e>r9p$XrhcjPtpxl_>4&$C_#ZXCauu3hn% zU_w?tbMNu1=H*??m}Np2K9KiIvJOAUYYoIaZ4y6Z8beK}!e8Vid*2+AE{dSL;mmz{ z)i{Qd;-S0Y<~6l%*~E?W8hx)9-Qv6DytXfr5~C~N&b{qCvyYbYK1mV>o-(B?;PKdN zRzvlDNHNBUu#dmW?B|eV%ql-9Lfez>&U19vDu$nM%CndhwrUqD#*dKE`}E#(VV66m zgK)|R|HgA**IWN~X}@-L*C~dQFY88gr*_T+FytJwS=Vb)I7C2L&Zq8EajAJyJ89A} zlt~yc1s&7IcXro2w5uPJ!l!@7wN*Q9qT)gPgDmL$I%h{9=9!O>Fly($b^uW&mFD-Vh}PC5gW^~%h> zh-ICzceflEE4VrII9xOLR`T~{Fc)wTG+$0LyPRKAQ<9BL zMQ7$VmhRsQBSsM+4G}|#!A5Z-;?eB}9~cf(Ldho`6dey+!U58ndJOLpEBvHW<-@sB z9iAoj5zd13;@E46?I4~CZT|w4tYzFAhp+>3u#NJhGX7<()8B^@Bg#imOSuCG+ee`#Q5RoBsZ~AvQp;m3*S`|x zhH^rB7P<80(zf4a`baWVE1Z%fE7Ft3Sfl?eY>4zMyjiE0YiA=Y9RrV)Lu?JpDWD$| z217y^xrAEFzcU@CgB~Mx8rvP$bH!oo-k%ZHg3=^D{!!mhiFD^K>=6Z?1XG+YT$9cd zJ`ZKA6@`GoBzhG9$affsMo3$XWmP`(8v3p6?-JmRFnqWdn%zIX z2Z2>Xu9bPR8_IRBe0Kvo5fg*pfLz0H^yubar{En|S;MPgHdN{Mi=PAENQ{{V3x(J~ z;v$g}y0q#LiQv!H0X{1dxp@5^VCufF_!3_{H_Pq20@7INwPKay%;1{5S3S8u%g{Jb3YLO0HQrD+@DkQuvPPDfxMv83l0;tI7xmJr-vT9D!k1V% z+pxv=O8!E)zcUPr^iHB3{nEMrM_3R_by$YvchPptOYeR>lu(k5NKe|Gn!PZ{KGgM4 zq{<3a1O+CNo?*y*)JSCNSu|O>i^h_?9m#%PlymWREGM>|*DwU|A#_0(UGPTeZlzadIubtrC0(@MtLCFOGa$ecxmD&=9xc%Nm~%D0~L1MM;C|Y%|;*yDDst3?Kr2lSg!B_^d_OM3wxL4iSLM zLT~ClEJw^LY#uTW*TVhLoQs~gP6YIG9MOx(RAzV~HdMGV`1ErdGQrAHi6$nH&s1!9 zG?rIbJ-8yd5?PB@bC`ja5XpAefkQ)|6(ahe{ z4%tNB%-{0@36Cn*_hxXEv!dE|te9C=elxakktr86NQ+6KnP}(~NDjL|rkQT27nlTl zCRrm*CSS|b^91QZp-JOR8JKJ+76==Tx35{AT98`NI#Q%tYxR5C^&-Q#ddF)3-Wgr3 zy=1AHi>%mnKqp+DFg_^LHqy6B#a4rl1_o0~FFZ3`FPy=hmN3O67T*;^fjUe5G>+}- zk-}x?F(&wJxm-}^AaA$_xd)ZLN_&;3$Ys_}$qxCzSh(Qs_nn*_i~;7yuzaqn2gQC3 zQyC}WOE-BM0VUtE2U&q_s!l}&4s2tlIm1+^)>4(1%0W(Sd!}B)f*z_MJOm2tHA|*x z!}=cVAR?v(!;Bu!ASvhs#3F22_EdAG^(~4ZBImU&k)Rj^0S+#si=RCS&={eT>KFXMGWAS$!n62K+2IBPmTkyX5rj5Vx9maOUSlt44mI1PUzaAPAHbUOKZ2f zN**yvXp)OgZMVKk8!2qJzX};4PMDGtV0#?iGf6xjDL&>CnQmDrpdAaOj(vq!$(vTtjs-#dl*ygofAfs6U->a(d`zDGhHX9h&@nitT=c9$ z#3zVnY)giYDLw1Z_tIOQ#RtOj-8>J(Y^ zToTMf&KSt3jEc;YLFNOZ!ikQws5H4W3APBf2zD7ZeyuXCBCRT| zT&>a;kvi!*ZI^72zZ^k2JF+F=ob4=M#WI7T)|xr6*)o8I ziSfa$ab3T8=>&LsFo%iV;BsVBOHWZeEC=U`Y(2o#C4J%#$F-gL$~drPS%5%Fae$tq z&JJ^#t`WzYb=d>j{R2+$uab*isG-z7XZtTcMAYyuG zt=T!n40K@P(LVu4b_61x=^Lf6tppbEXWC1>WLz|7W183{*s?97tptAKFZz~vsa~g= zOEir@zAl$87cN(N>0bW?NlmDv6|?)WQL^1;%(drxX}L1`Lni0x%w za@VaO#0l0k3-&3iJL{~)c1LaVK(N!<_LAjlFR5$bmeFPX0AYj$T~@>Q1G+nHK8MS| z0f7iCdVw0xshyI6XUuRDOk^ECf*xu6bhH zJ(QOK#7pO>c~~=SRNE&Rs)QtvPcX<8>4ts4w6P5x3X8_<~`f+B8N56zvLIU_)yb0|ijp~vH9w{V~!rE%jJe#)xeM+%}_ z%e>i-G2xV73<;EQgP6RxQqO9mmx2X@l1W2rAu=gK_1w$!i2; zmd?s;=<*wck*;By*t#z5IcE30BBhJDWH|U%ApCMHFLeSXsjJM+8EDE)kED$eHFPL#KTCwSX z7Sk(Td;oTJWfON+h2}&3#&s^bmhMsY`R|Ca8|lz~33Zkh;l^dgX~wNP6axqYvI9l~ ztKp=hvYs0HE@JE2P|3y6r4hG^79G3xxUNktW4iW@XUux?uPr>A!yYaEohTr@U%vTF zn^dEm;nz{7opGa_GmqjFn^Ys5((iLz&#L)!n<~lY)Km9~2R<}c{1}f}qrM7eoy9bp zDsE-P?FDQ!FK?B@xZi3KnVj%bvfaZagw~d@RbwRnbyaR8>dENTC>wfTxuigHIs; zmB*AMTZo2nRK|~07!NphrO`_&qmi0dRVx~!Q7+A!nYju#otR2qw$FY+@VjEY8JYS` z`Ht>)ZSn*fx|xVMN++6S6f80@>Wm{=t7hID-kd-0_1#rydOuUm57!-*nR-6TzMmC5FBbSe9K`3v|GUHD+s4%Q_G~aOL5M4V=!`2D@glCwU%jG6u?*7W z%0ft|EONTD-@M=b;Kln?HqiTbtG|#7v03Qp`#(v`BLPN{3 zs>NODAN*8zBvRfcSKcNPUd~`UEKu3nmo{dNeesnKsKRNgVawa{oU!I7e+Y2yqW~Zc2{coo; zr*15cn1&~C4t0~#c|N80Ob0c1R8$7YLc@^_LynSb+6qqLKCP_%ttev zwg$A;6Ho0%D~k4PRf^cjh0=TZRpM)?Kc~j4%WY>Jfv@;JXbB<_y0TqK^Y5vdCpC>jeI1njp+BMqgdm77d~Yi@F$_*HP@%PQ>o4V z^@}hyfcy@=cC6o&cEaG79)o*us%8K8_eid++WnY!SUWzO%(y7%l$B;KUIvE92!p|3 z$auVn;PSO*s>q0$<}jFYf2V0|tV}1A&GN~J2#2A8`&|jClnubjlqHH z-JRp+VR=_S-4|kpH|(KF@aQy^1&C*TSb?t)hC`33>`{eqC9Wx3}yKekZuk z?1^gG0ShtEey?qU57RAT{Py_yI~Zz@{+&T3UeS&!zs6M{Ojtww%3lYp{D(TEAWCScnKLwHR*9!f~+G@=|hAk`i)K zQlrVFp@U-))0Yy$HVaq5ptLAS1IiY$s3|LJC@E#hVlhr8xl29MtaK=o_bRWyg= zq)JJMl?L|4n-(>HC3jHK2O6*1s!`7-nvaepm;qFa$|`_lTDVAh%zz;M(8; zcfY)|9a~lOC4Bgc;O ztSTv`Qfh^8uVp(A@G0**STqK=a)aFDz%0*(0boc$JBgNq@Mu9r@z=IV#j0Y{btxgz zH?Vk;7X4I$xB}g(>@K^8l{HP3_~>{cDIqzhYFRHJ3n0=vsSpiNLPMRap_nd(g&sVT z!=k37nRKc)qpFdftQ%KCB`YyMa%@_C7Bu}nSKCmWFBXoCW8KIvGYrp7#{yJJtlc&InnW9cLQKHr zAVZi>ef2_}Vx_3>;1#u#-1EQMopbQO9_3oXnH3oj97VyyLj^6Yn>S8{cTHKk?f}cn zW$-}H#SHzM_Dc@9po~IqgOqmUH>}`=yvh`wE)L4g=W{(YVQr1aP)_Uo+verk*^xMv zEak27sK>MU^k+Hl#I9YP_Z!BAcgX)4^@8NnXY>(D^v}|CIR0HI!vA#?)qkbwd>$Ac z%1cRB2H!svwoJwZYo>t6sY(Q*he4i*m)Oh1R>75Q(U#=!D>5-+^@65<{2o`)ytx|9 z30zairCC?P|0{t91!VoRl@v-$fLsuVWU>g>%>XjZzvS@+<||t^a_e(0+@5C+QW$s( z+%a_vTS?Bs->EvbA!taoD*qCh@!@BZRN?vb>9gTKh|FOBcXa$|5710J$b4VDKk(1; z!VWR=G{+uv!X7ljcETTW!v5kENE*!*E%_N$IQFZMFf6L@XW~RnVJh;Uio&s9j1kCx zYRXU*4t|uLJl0-pSkOjpSg5wqv#Gp2S=?={I%!>Sd)+XZ>6qkRBhtmYhD4FEv}Ik}jK?MEb)W>FsF>X-W0mgOGqA8XJU=o@HUHdOmXS;i&V z2A0Hy8!P!HK*2T5dw`hIRW2IKiUN7##R-|HHWE?pODX);hL2w-vw1tBnZOF+M9L#V)(1?{} zSJCZ;%B>z#g~on8zPJ)B?c*J^TsZ=-ni+!5(~B_V6oE+kmEc=#Q5Is202R^K6S4`J z;?(d*wj?c}SGa1*_H9N`r`FY`L@&@v6FwN=PAZr4BIDNhe`4Ex$5Sb*r z5WjLE$c<2=FAp9O!bw+O*l4|`EJHS22H~PcAjRcEw9v^NAZp3@!4IEs;Lg~sCwd?N z5^AD|tJP?l2)%?QkcE=nWuctmz2v3;NJp-R(ll3PwKnjFEKwm4+mZmgDOari5t%#!HTgnPF zEG?Q!(LM@Ayf#ZY>oKxhhvQOg^9{%V4HMTzyjq0xW@Gc`*vvqhV}Ei(B5J5Q2-c^qOGk zDtFoV{m6$u{OZ-k!g48VuDiC(-vF*EHudAP!G7>}V?mKwK-@M36P{ z|6%N$f-4K+bw4wi*c02fZQIraJGLgy#I|kQwr$(CcXpCH=ic*j&%-&Vs#ia(hpw-> zy1IJxe|_KYWfn<5H;259_<)tAj3Lhwf=)t;N-g>~f!4#&<1lptQOm4uHWA>bU2I!j zLKAGXw{s@#0HL-SOxKm#=`HTNKbZMIk1 zC1ANYK--Xnr$0lbpy8||?+A6WKpKXM4$>^75psLsqJS1gD9oX|i z?aJDfdI8S^D{k}3=vs^-V$Th5!ra2PGH16dk5{zdl9xwX&pHr(mlffD!B(*te5r_R zXo$O_{S#^)WV!$cJ~%IJ>@A?1Ha@QFBJ&V93-JJFaDbIslBYnU^p6X0V_~)iiP}}^ z4imr%?f?}n!>GpYDgXNyL>K*PpWRXx#n`=8p#1#`OuOeEdeMX`V1n_qhVTS^Cz(e# zIC>ZTt1ICPcZ7>uk|K`D-(*6MbKH{es`O6d_%+FLtmYW+67?k3qHJBp2{Wj;d@WI< zD2``z)$(rns=(M}dZ&ibc@L^1G{5}sdIj$=uVd*diYmZaiGYCGtPBH~4+QgCxaqU1 zqh9i&a0z1bEXz9cZ~#B}fngF-1uKVr2cvuP+8;dV8f2C#n~n`AqNK=hjV-1=VWd~o zV!#E>VZ;@22NrwRY5@*-pB01hfQ-&O62$P{g)KKC6MX67#b!OTm9!ZWh(&4}h{Ewp zu5FnUI?)T-tik14xG40BrWOQ)A5e`AOxr-}jy29eL+;Rx$!j$O1dZXpyOWj$zyEzx zuu6ip!ldl+r$`P^?7|qEVg;$*G2 zp022v*UA_a8lT0IcFu+-t%%ZEdJ?L`J1bH~699!9y>Y@)ModGc7xb$=LWwvB$SL8# z4|rgyS(-uWI?7-Fq!%P35EDw0Z^Ov9W*DnD6%?x)oHtn!3ME51wC=_?SsN=$NeJ6i53$SI9umJRM8U;`Fm# zd7QD~uu>*<7?7Nlrol})d7sh1`?n4jOU=nkyEI{fXRi!%n084@KW5t@5JAX>`SeAw zJ+$8=P9Y~;))S}RdAgTZb}e`^*I3;l3qzCQ$mHqHVpAqzkgVJRsr`6@bc)ay^K2n8 z!wNC7zBks~wRM3m%}IBcFE3rb6|mhQLd)Qex^%L4zTdl0k1>60r0xxN`{+AmXK9JOIaY?_JOav zZ&Acqhd1KklJo|r#FBxv{MVX+s+Q?EEw0!CX}W1#9U{2d+&|vrPdE{%Blmv2*sTz$ zb&=&m@3=E@Ps8S+iw5+=)d1IurU;tQkSo7yk^KBg{%&4Rt2MZrTd#n6>Nc>2#tZ}{ z3pozFU+m7HFM%E*b5@ovSw)@44x`@z zh}KI^rK4viB&|euA?Ou{lMN=LXJXOoHc61R2F%jB+(|w@qV=9XhqWJ&2AN0|9B?!H zrtMgyt*#0YCTFqKePUGy?DdEP-gvROJ~$oOf<|^zs&mT$;o7h-#^}1rQ}3E-H8~91 zDDzF}NoScj3En&(S4O5*y+!l_0eqa(6O+fu72)Le!6{8VZeb|8+y_M-Xk$a$7MCLO z6fk(@Oy->@uZ7IGgeUxqj&b9eL-K#(m=Aft^{#3e#|S)robT(iM$OyuV*>-fA7AtX z5Fog^`4pe>+fo$jE!;$9JUH-nbxtL54sp2nJPV0@uQYsG5W<2|*>Ba>{`* zD=^bKg{SP8Qq^d{=jQ|-unb=Qh{z}DQ7Th?66Qg8jj^Iz+E1UKk+0KsaUNCk zha8~@NsCgEGxULTHbTLg2l|j@nI*@G%$!}$-=^~C*nEO}+jvpEHRQgnpbT@2!t+`1 zRVM^N^(+xVUPv5);znEh2cmzLRIDVD(hdX`flCWVz~n>Q^;zLoKOFfG`}KEKzp2v+`-i4#S$1zp0j%$Ph-6zyOK%&0e4; zHP^Hwbi*{XpF?@YMohd_BckOyYj}tfU#N)cG|dx~HFXN<(*i)>RevmZdb(x@alrPP zw9s0unHx#9+|%$SIki~J)k=y-7Ep|GiOY3dtH3F#5z8e+eZm}mBB2R{+G*=xY`;fT9NnxB*&;;{TMQDp{Lb41@GM)kxD*%Iu?%p*6gT#Ef8reH z6O&uYFAm8Ha7V&=Tl<79d?;?KPwF<5TuN`m3hk1F%^uDQeV??&{aX{Twaa1u18g%S zvNxEvY|aEVY5*741@Im?IUdk?8|?t7n66gT0=8O$<7pU9P}`o&>u-+DEE_mu221UW zJ0@fNyFAG*K$pWvm)NVw*uPywNYZu*gY8f=W4q6H?TA)xph#WOJu=7L|Fa-xt2T@U zM+JJqaQL;B#HT6{C}=?ekQq_p`MZldj`-W(qCE1Afh%JoKQ0jYD@_K;6pcIrMj zjHiHW`xAJ)@35^P;kLGuw*hr%I&R1>^YY?Ec-_aBoVnof!{@q)j7cG3lW`_oT`^sV z|64+ReKC0r^NRWCa?8M$L! zA14_yWR0W{g)Ja0vUN~&a0tdegBQF=HaPdwhc&I_jU9lj7>&wN+SR4;^XE6c9^e1v ze0c9NzeG8XS~;Q?vf=Q)UCUFl9L7e!Xr`%D%~{cII#RqZT4uCwubgsBi>m8M0KwDq zFA8}T)UEtDCn;K5YL^`{*|r3=u5oh^fm^i*`NdJ5j$H4Aw0Xfwr?^!|(j|a!*`b8i znU>@9^PdxO($cHmqvA8})ed{n%&#ocj{~=+jSqM!UdfaAN;?y4&ArJK`ogv4Jo^wy zrYpJ_8hhgQ}>yjP}j0)=>;qaa1N$L3Yc(3v7Rx#|Wqy)SG_Z_v7&Yu$>qwlei2tzfH5N_wKh`3^OIO%#7pliuph*BIV7ytW1|uIXUJIq^Cmqw_# zOhtu8E(5>k9kJ8&Aez!xp)3evPtcB6#CK`D4uLhvf!iF`95}W1U$bjFT~yK!xNIIB zZ1YOB3%Py|`MFI6`F5L;zwWW^*f5H<*OMwdp6R4m)FAt(>jDbx*d?wQqgwa4(gZ)` zn0QjRWM&}yIyMR|uUx74Z1chUW9$tmgRSr7Dmep0rPH~^ zYj$h!1zVj74Q#r6AUnRdBBxq|UD3JW^JfQZX@Qxtb1025cE`xLDpw?M>?mf{%6o_` zz1F^)AUx3{E!9TYk6PJY$`i8+>K`M>jrKdF7v)Rk{S}?0by>_6ky44Nt%iJ2$(`#ClO8XvS-ITVKzXwAtVV}rS z+vkFoQkoa@uUxaa2F|_4>j3E-BVQL!REh(CY*CCXGwxU&fAZ^*B(}YDN%7cfH+AqN z!!jEKyhl$@X&Ms_3$Pv8s3nHO45{=gNR;G0@@6g|;YWqa?nqvnE>4IJAJ{Zv>yX+2 z+|Al;aQLyN??4KuCl7gNVNH}9if|Qu%8E{ ziB%mf*Sab6JLp4;%=PlE>P!$_;j<;GNg?%-Q=}wK5;jlJXZvO4Q~=-}jkf?ou#D9lm1*Y)*Xm9iXpO-{r4CdbX{aQ1Qxr zzutv~ZBLt9gU->Na*T0msmR%jnKNVS=;WRdYV{=AaI_g;cA72GYK3GUz3#;vLD6?t z8Le#5jwyITPY{1|v{oaRX%{;wo0hDZvW8Y~mzhYTQ#F}ng`DqT?00eeiDq<-%sBy~ zm8qJ%Jl>~UOnzqcU1OSCo#H7-Nm_C`-VUhjPhiaaYl^xZ%ipz~*+pQfe$J-%4}Y6k zz+=)*638j*7<&4bsvt^b(}C11Cs=b#>{vdqO7eU+`;7nOn2!>s&A-bWPr~EbLBAiI z`b#YJu_g53OU%zHQf&p%lY`;6l>i3s2;vWw>EQyLESN74EB z)mK~8b=5I8A}KR_R=1-pir+6}5yjkE3qo{lOHm<26S#%Eh>Gc(5&aj7LkL1#@(bfi zw#YTJ(CU9eV`@A9_EmEvb!4^c&oj;F@k8L6g}ONKkUfi(zoF0>Tv)9sS};BvU4&CAdV_ZdkAr5g3(ezDKa;u25zLSUsyy z5Gb>3#x7m*Mo&)kgPT;aRwD$ZFqooF(Jvo1pau^LZ_{isUOfM%VuR~_qsTvG*|k0T z#+=~Y66>03aEiZ0T>XVt=<0CP(TDfRHZ9%K+Rhqc2{#7B*(a^oM^LbE#Zh}Pq`5cQ zD)f2HDEI6G)Lp_Qp$iC#jiIhw3A*Cc1G(s&|GF&1w#LAa@4~XLIc25yg7e3FzK$~B z9;q;1eFL%4iBCz<$Houy_CM9KYSmCr*xR$j`-ZYzi(p(FB3Qwwr5J8bRT|2ulRJW? z*)3AZ;VJe=dC`oxkKBmI@~i+YQ5bBkqPpPYDIIcYIz!uevzVRzD~&U^q7{o!M9v% zfWJD=*eslGcXlpuA6!c?t)-ft0puikbM8{i4l0B1Ls7NN z5M#I--k&ec4h+I+6C{|wY_kaBun2Wbs^-^^ml(6?8Rcook&Ulxn<@^>=ZE2jS<1wE zD?+&CZzK?JpVvI2X&o6EfXbITvywz$fgC z;^^2j`UCc=1W(0);bL2=CAbK#qs|*xPsPMzllI zW4k6vxM*o&OZztmx8^w#^5&TUo%H&Omsusl;KGMNiF4X;?2s{Q>Rp!N=`@?Wj>qmw zUL^#_MkdA_9X<%A8O2yz^tasU(L0>Qa*%rR0X4VW)vZCWWAKe9|41!X9%9xd`jsj{ zzHK?b_#T5dCw9ZV&2+5J(e-f=#RVI=i$`+ifhjBK+SJzuf5g)`4*(M#tF%M5U{us+TOk>{?7dG=9O0Uz16B@#`rp>@6%z{i1!S-}Mu^s-TL1k=|)L5_eL4?(+Q<9a+~=HEJjWBo%+-BLojdkwg5u) zWo;Fc3N?(3{jj8}Fzt=~#HiraLD3r86RUq~0yo2452Hig$8C*P3&ZD<+p1e{+DPi= zr=q`wP@c@~o+^d!CMCc^C=5dk02TGngtM{C2~g$k||c%!~XE5&t%Ki;cjKN7kDJ!^?oUBD{^ z`L^wydQDI31J5_9o1l39D9xd)s;L!Q>5sw*e4W(CzGXm1bE|XQ zb7ISi@3!%mmqhdP)!_?pEnaUZDJ+kIe@be-$zmwxjV0!8MfYy|pr$jY$3(+Nu>&yn zj%N44c^LX}TZA~?kN^w}dBNxwt4PWzOx~$ymq}r$Y0*9~QnB`Oa#q)eaO|d-Yfrt& zrK^n==+~jsY0*SMS#kfRb@Z;2^VI<=REX(my3m}D5jfKvz-U}5*lD^;2uBm3bi4j| zD~j;k202&o-K8XLJQx6LBj`Rs`@qlB6X1LHMw9zk^}p#5sctlu>PUBLe8BmWB@J|6 z8GhlQ+d$I>xqkv*B$B;q(pN;{HCn`yTL#I-w7deEmPLIa-q!i?1eUL;>eWy7tfUW{ z+N5iyPQcBl_St4)2AA=x!$rYK!0)ypkY8|rlS+eT;6Pb-(Iy5iV^`Y~6Rgsc5Z8+d~!&~U0=a%w`Cv5wGi;hOQEipHKbu7JAamtSCCLoIxr zU!+!EBl9LM)-{*dY@bndM#5{Q)*XTm#8(YE%PuXCi35G-P})FuUU;6GpD%I%dalOb zx7cfn|MsjT>L_}pBPGZ!nk+})H+Y{+mOEF-Q-waaOV{|M>2gaN`e4!5JYdm|9Pb>4 z7YI^mqSaP%4ZiZi=z?-sncdgit|c12XK*91YWYadBoL8(i%q6{MX2RKuE>X~0sL5_ zvFqRcumgPq&V`uQR3wnx?GOr*U7j%@#chXa^6R zSZyS{#yVz4*t@$i{&jxbqy#UJpVRw9?pWyj?z>B(+8ra-lIDT`y!r*P^zIzF4Dwg@ zm1z#YBIza2V)&zUwMoUerg_w?YKo)1%&g!9nts_ zxu({WA35@Z-0+m2`pBUEbce2KE&QgcMDW$9JmVHE|*QOG8tIMMho=#8GEwx?l zX1~q$eoB0nyY&pGZpSDfcO8NiYkx^}9zJTLiSH?BB3WY+egQSzyA<++Ti1JQ%A%5J zSa#6V?1+?gYE2QypkeuQ19$Ga%A1d9-2q&Rg+Qhk-whwc1q#V^LdzJVC9_cjYrSHF z-zn!SAUFll5(LFV{AAM87KOgI@1kAf>1(J@GWnqswJ_5T@#1Gm9A|i&eIc)5NNSO- zCq~^4fiNXuVGbL_i}S7c-K{ouAp%F z>oPdB3a9xW!B8Hc70KnJl5WhF&n7mV#~8W@4-iBo!yh9ioxyGi1u}1f2+h$q?JYdY z?LGtZg%!0J3bKbTPiBV$!WJv0cAq-NeroGjl@|9_3!i<2frW{|&_Dt~0OC4YaB8S) z<$Gwx9NNI9>hM(vHE?Lj+U(NE;>-zsIhVyu8$ro`2fIX5DM6=AiCxq!OCgG6=6A(S zQNDi-O)|K*_iE<>9guADLSRJu&djYHY=$nPbW6hVeuo%k?@KNnC){UGZRGx9)&9q2 z9l^aLx+QFV(hRG3jTFh51Y?z?+d1Af%FG^Yxk04>_|rvRih++(L*GL5!D^lDkfAb$ z&v5YR`yqeWBEbN5rRMG72rcRzUbVLL3x$d1R}{NZnUssRr-ST7js;hQw;>eM4%x-G z`Gp);xjaEb=du~1xtnq2jwRpYV-8mxft0Y|z!!%f2EPX=G`}Dl={3)`g8;zNOM2d- z@Ue(=v`>`vQn}+CCykD`3p?X0l>2(uM4il|av?^JBy?|_35r^A?Jv!4F7JzV+B-Z` zttVwQqwx&Qr&bqto>x-Mwy|cLp>O)D_+zGb=`Ie!gVTS<93xw%g5PJ?K6WONtlck~ zkZ}qoJSlI?(g=ar)K)6Eh`~0@K9KNvYhl|AXHFzaEdAwAfNDddL zHloFst0H9)zu>Gibn^Lsb=9Pk4aJA+@2KT^a69bU@buDeyKtVf6nDH&|5$Vd;NqFJ9Q~+OU?6S&bX) zh>cm1P3{}SwVBVc?spjO?7vD>zZ&r*%M?gb0woUl`_us6d=Ll zHr;Y&%W9U!7YDh0mA6<-8RU9AWdzLyDB??OjWkA&FMEe3Rg5dP>wEjd&c39ccihtI zvq!XF_J{$gI*mN)9N%wlA&hncI+;iEWYt1t48}hAZqar+cflB0_Wd^Qa8x5oP!@^fj$95 z#}Q83LoKmLU59-7EIW+0bH(MFpAR~&B+rceg1MJz4ut@9axZz8uzb`PI(GkFTvI_m zJ0au{phcprq0__59{$KZpbCFwS=efST}-btdHL?`-s=3Mqx%q1Q}6ZK-6R${g$o&#d~X~P zQ^4l%ig@12CyQf8t|{`o!7UNj&ORWp7k=EiG=ASF%9N;|=FnEr4$q%YvyXM>-^k}* zg1`kom?Tn~W!e<^c<&*Pqj}LN2Bs#+f=A?4#DO8DbKR{7^QMANl6AM#9h2gHV;{t_ zoO~adOUo*2-q2Zy<~^HcAX>~g*007St}keq?Ov9h#~SP21gkR=M~2iPIM4Ks_>~9r z>*0omRk5u`_l2C;yMGB&sLR6zxxR(_a;!2l?e~?HZfT7$J%i$TE_3{#Kdxx|B?!jCeLqTo&uJ0!t=t0O->szQ#hdgfC9<4qg zjBix=j}r&|UX3K}DNeKMo~rZtjL>vPg{aErKZR`Oe4qsc`hLg@a9^A+w!$s-i&?FR z+Ah+l(?>8D+i<9!M$T~!hmijCxF&k0%$6SaP%eqnD&N*J*&r%+HXG1<*SRU5MSp26J>2GX>IZ_i)6YyNa0w|;w-@;f;={KW=A@Q+Q_M7`4XQSmmi(| zdjxYsydIRtfi0ZlmC;OgTD8ndU9hl5PfEc@756NY<*A{$@nXKv>}AR3Nk>Ic)9llO zse_GPHK(;YDQ1;%gQCs1OjF~O4Q%pRG#y-*-V3dgwQDKW;iRLu`s^3w{niIMIE-Pi z2Na4$QJc5>$KN1v=if!qi?Lj^W~-?UkdpLzIOPE=@y|GM*c;Ji94lvy>EOuFbK^~O z9&(vv<*!7wx_ugi0aQ@*JT%c|EoWi}0593}8xmU~Ro(PsqFPZN*rH@LSa87jO1_bv zb(1`bMbrv3ky$3WPL-NUV1fv&jFt&(!6-VF7Q?s)G!1R|QM_T^4h}}0zC^iZ2)Ki? zxW7ozJgpjqc~>^LdV-SbxEC|soC&qTj2wF#dW8|_E-sqVGf=up6I}#Qd>ER_M!VcJ zPa;+w$=G|QNbRB?gjqBnH)tpv{;$ke87{VZE2pMwE!^CX+d-&KCr})wTPQsjY%x?6 zna^p=szX!4Xc6{PYkxDo7AlO%G0~vFZPF55{UX_6^P6xvRMTv!Vj-P25;9E@1*K|T zdn%^Vv<|HVt(2x-ZWJKipNh+CaJWiFHXcAjj71RzC0fLwL9aHFtGZh9~072#kaA4ZV3_g^c>ljUV4`gTGPBV{8$P{j31{*JY#T;UxR-%)z zI)I~zEL5V4HC|d4?NZK*BP@5xh!R&*v?9|I)1)K9R-eXtJ|(-ODK`#OwytgcnN*0z z7gkTi&#cV!(LC zFSQ6k3%+U2RJ|Pqg^fx!kF~fxD+0MlW3UI>#d$iyg9L>bH9ZJ>8p&Tu(nDKHI^HC7 zT$L7$6bdNLe4MF|v!c5OPw-?>50;u?!BILQ)-5+_NtQDMF1e1utO+cUUX_-Dl$@ zE8I99YYWfX!ei2$@8NO^J>9=E=;cw6>%^Z>Iw!Jqk?#JeRy=e{wR_yKU=S7Q3Qxwj z80vyn@j?E?sIWW1TifS0#tB|f{8^QH?Pn5_nTB72+FuSoQ_R^*cls=T8@l#I6A+z` zL4&}aNj_1xQ9dID3@2EygRa<&U5rU-kzvaWqdx2?Thv*ryg(?Ql_LlEGJW;rcFPgf z`&hgW!fBa_*}K_!6ufpKH_e)8YAUK)?=7Gp0}n8CZ8W7lN*&#FlhL$Z;(&!t?Fxfd z&hs3Hn8r;f$;k%Q57_n1PKe`3G2}Ud{IibL-r!v&w`_Trp{vnElI{NS(|teD(IVn&c{T>(#&=)fFd>?yTdCaOdG~P8%Tp2XjAby(g_6i`$AZYpfO6Rz=a)tQ~X5l{`4MqH1!fR zTMf3sfmtSt5?T@alwmxA+f#Fm1dN~s>l?fZAGFv*t=oyTRQcSs#4!bA)|R=@hWSjt z*vMyuU7=dw{HXGkhJxU!#fiYUoD5}Hf++9_7)K;rC$=Ksk@ASz|8GK4)lZOO}|$F;biN*3hF|;)@lqE8}r zJ`j~Usq^_>sx=_1QF1fj1btG;66OQBoZUQUt$t6!`k;9;=V3)UG3ZcW7mkcq=QHyY zM?`V$Ggt&pqB=k`f34Bw!83F6hUgQ=idcZKN{`)3`VJNyI)aBtOjOkFfnRDXRHGUQ z6`^dld*^62tw(W|6J2WnMzQ*WUnP+zHu6#OJ30 zj{b#r_0`-@*F;5@R7Z zATT!&pO3W-sc+=|R9zB*5yKU#wl4+eDf#>cqb&B@1XlM-AHS&!pC94>&O5h!*DX;?ag~=&tkZ;K>`4Sg6?g*q@vFK6`GsblxfP-A)cWCW zU{sfK=S2*$$6FveUV;N}#2qnecu2CQ?fspUdjYd+RI(M5(VH=8j2w=un4n zdM^Zet%_`QzPN!nto-s++IRT{@2nNz4fN>h1MceeEW&H<+!v4dFtEmrrP z!s~jz9PGm~%~ne3C0?~aSr|>k^L#!)D?H~n((uj!0z2QaH)y5P7~aTB?(U|R+j0v) z>pA_>TE~b3W7{q;(5!*4^XxLQpud%4o>#5;2{lF)0UyTI@ru?;8Qpiu@R_N@bbv`8 zq|(1kJN|}*x)GFfpXK1?Ct!?{5xgKE92fr;)`J|Ck*!RpS+9mkxbS02rY&4eUOeseMSTmGbI#{-u+n zbGN_YR`0rn#CH^dFeFw5N<}w6D7cD9A(wwB8ua>`KGEpWb%K*-QP!uDk7RHXzRoXG zgGT>B!M_4F^nS`tjA0|Sn8;QHD)Jx}%ov{7t8j3$=Z}$uIQee>6Ik?J_lsJW+X>P901%FN@K% zm_5#2^b8z)xUpaD1Zt$f%mtUa@;0Bt1~z{hb23j-XkBcJEi;^AI~#{?KGQqL&4|Gn zhUt%9^v@{0LRxDjm9t;fVK>5US zV#72ME53dN1I<1mw=&P^2T0*Ror0&+zi3FEjoRF0qHycf?lzs`9?V~vJ~c2mFHqzY zO19yTa7xiIYLbnTd70pnK6(C{zT;f75_3?v4j|u&6jEucBAgtTR-#om-6*SbmQl7k zplCd)8()sRVstJ8tp-ak?*z#1C{%x)j8dli1$9k$({!m?e(>!h7>+nTfmh4wVECxK zPz|{fmrqtymX`x*+JR;_^Z5KX8}06qe0N@c z0@I&~*vA#h85N~x9%}-smxAh$vaUZvPEaRK+XW8)%9ZJFRk}kIMMAPc3wsb2na{U4 zrN;@UP~@o18>)Z~c>{2>Y~m0vjHV(v&%Jv9=LYdJkqu_igJkyMK1dCM{AOnu;g~n5 z!7bH(t&l(F$JF=3#=8+-C|?y13_!cEv3<4);sW)_Yb21l+|hsj{S^Pcg!AK-e%{da zds6X+*K-a!oTrqN$ZclvJ$|}LcS37l`(VPNoTsLTF%NCX9V3mSB4zgoNUbIHi1Rg& z2R6nwJRDRYn;KM5=VIF^8&mCv5JQ7VTvK`@V5X|jbw-7IYyXq#i8AYilIC4?)}9VWxXA}uSpVmSBgCY7~9dl|~nv|#DAX;BHe>2LVr-`Zc| zJ}9w30Z7`;-A>(SW%lZIqp(1g+yD{})pYlKUGWA3XJ^FcyaH217%m3IFzsf?>=X63 z;4C<$Se^S>Xjs|hpw2J(VxO5`ke7q~*R4j`8xOiUBOhJW1<=y?MLQ`v%s_$==8zQN({^3DW;Z8J~XB`vl)z! z1EKS-na`#h+=*~;TL|_xTV*)u;c_YJ6<(RYY!o-w`Vexse z9pJq?TQ>&zP!Y^{#2-4D*Hb+Syh>(0z`OZyGr6fU+mqE>4RG_IK^lwN+8C^VK3}*j z(?ZH((BMO>j9Y0;S{@ey_aTBFX1U);6$FOx$@B2`moJikQ>CKu-T!nEH1JB`ZIG$m-0>w17RLbKxKnz1Tjs* zb}fp@nFUd`PfEYsT^w=_xEq2unK20;IknCdCRFC^T-dM!8G#kng2>UJcG5%1H8MY5 z7Y64QM#@D3ahc{09iWxl3Ta2UJ+%hKG-dbXLAr?=BvSM~OkfWIFCC`!V|Yo?q`X+o zZmZ~?NtLk)^CdDavMBdV91+8vjdhTKYW5$BnI6l;+g{P2+tc0{!mJpf*#`;-_@Z? z#ms(%eTn?ywvimg>qYi1u=mOU(h4)ErimLgWAAthAyK+@YnBKHOF}t~iyRX8Vf%o&O64eQ@9)kKr-tMUwiM6i8O8fWWEUIey1*DUvXJ(z@I5L_6V3G zv~k?y$Se-YtU&~#f7hP1%xj#{y@q15jMM#Zf{=YeF8FOujS`ACMMh4DJ+qzraF+d8 z48v>oiGhYcsqH)mDPKf9iOJo+v@%5uAntMV15}+N@xHS>J_Nimhfz#+8i3y|O348k zQ2tEM3(?ymwps@@xLS(;=9DLCC3HSOt++w~TAcf4WV2A?hE$hEycFC0^a_>j#b=!f zL2)>4wf-~zy)jX(X@SXd5%PU*UFKmCInILp*c2dj#M2Yr;;_2(R*mzxLYl!dX@>dh zIOw1ekuP8OfidCuXU#=Dk&m?F7UrLR@N^59lF7(BmC{S`H^W7a5HQo{+D&}*RCk=b zOnj3Wzocxm8}VRMhwPgfxz7c(%f0%$zj}jE8Onhui0yAPKB!rJ?%SPPtv}D$r)N{N z=>PpvkYW*kq9%S8nHk#TzkfW z2r6VAYi6Nym#WQQYFlGZGwJ(~_k5cyIEPk)6~j^X3Hi>N)bvE6SJ27_JgI`>15+T0 zB1B*UZXtJDw7wt%2(dqp{Ap#448|-?*RRL)?~XRdtBQnTGIBJPh3v&ayGOLb)9)7Z zD(H}yBQyl2>MIIl1YE8GiE4m=^)^C#c)KnCq>GI%4aF=&&~vWW4>G4k@v3E-eP>R| z4vcY~PR!v|wrZLI&M|o+8wUFDAeW#HCUHa&zH4EN%1aaDLabdJo_wt7n-me1Np_d^+=+DY)QuW6UVge{ z&=O1RnWAY*Z!I#wulJR151rc+1$vOe|L@#o35J`0`$ro^!24fVRayUAtE#4qttu1J z<0Bc8C%SbUk`8^ZKOUqYa(JMBVjx+`uJEQmSnVFBVYVK@#uf?!<1}fS`@X&^ZY% zq0uePA^YS;P(XguuxZrN_E$*QZvPEkchxZRLCR=3p=?c661cQI~v+YwFHF5vi$yR%5kd zyk=Bx`aW3WFw;$i^$s>}f-`juuO~iP^WcJ;Ao<2+S1E+QH2{nB8o6u|t~FHijLwa$ zr^#*_UoPIKJ9&1IK^V{27KBU;yjL!2w+W916*@?(AgDFM+1jO?4++kW$a*cFC zz%DrOqS~No%FZTv2|p|am2>#0T^Oa_E?x=FB0)dh(gQ3}K1yuV>I+fvhM_u#K9fiXd%Xktje67HMNZkx+l*ylH(r znQ))0xy-C?hPxk-B^8wsDe|lU%c6u=`_lG}kp@Hjz=ZN|7wKLCm7Wnc6m=vwo7-Q? zitt>faU_g-PA9&mjQt@r-4~nB$ir}oiYosw>~P>&C^VSvUD-fXlbddBMgNj(UImV= zQ#tXnBvV}7$>X3UcW^=aZz7>0hMu~d@p7wP&?wJ`RGOS2}@ zq(Y5<2p4~1{^ehVs!OJJ;RV6qo!{ImVySqi4F^($5TD$N6En!@*BU|h-Ls$t%YI+` zMInFN%8ZB9A0$S97n7y6GV5ZVpN^kT8VF#_>O(X5#s7oojc~hI2QSfUn4Xr$>4FAr zeVn{&;wdrwNyrce$;KH_?1`zQt|6|}EJdX8vi}U1wsp-kSmS3@Vf>DyEQEhp2#z)5 z1Q2*QH|M#tQ@ZWF9SSm9FXKq#g45EHVRl8uoWysO`6|--L)+^`qMN12Ca+x4s)GQJ z7|I<+T3>9ft}S>Rlx?maTv=Elwd6Y{iCYoUJflLM1|S#p4PjMx3(UPGo#}9j;aYLP z`_FdQbFgxe<9i2M>JSjf{x9BlfH&c|{J$sw#edKL2*63bG7X|R201Qmsc!R|B z{r>PvKL+%FusRs?d;c%2)|#!jUgWR`70IV0?pWdjG7s4=lQ!MAa8xpSo}_rZ5?y(%Xd5Q_ z6AHA8P?VB*X2gp9aDn{J>+SC;iKU5IwWKOL&r}z~8x?`ATYD#) zuk&Mj6AzoOkgt!bxZ|KM1(mA}E6<$i&)1Lz)rsyW`-jV{o`(hdpWD)ePF{Dbxi`W3 zt~+)Jyp64`jI|veMz)6TJ{m5+-2q{=3W0v3IJv;Pp`{S?W3v>D6!p;5#SvX%dfIH@ zZw^rhP@F|5=yH+uQeR8kra$$D-H^3#rJ2&wABB~OXW1O73ZElyKPTXhk;fgOvKvTr z8o|)loa=+0c}i z`6gq=#!+tA9(?KS#Z)w`hI4@EKTx?fci71T6N(X=e5}7eVB-8^8l)NF5A>m9m}ozazw#T`KI_BXvhB`!n9H znyFVYmw>8I0j13IjN>|!LS|828 zf_iFQK)DBM~C$KH?ip4*QRJc0iOz`CNj8O8{4!9@}Pw zYEe7P#f+8KEIeev$!pf*XU^bADS_|@RKY6ihTofwq8R<&cEB&S?`A;c5YI7ZBt8XS zBWzoF&EW9f5y>mwE-{hGN;Z;QjeWE$wS17TQ6RYHayKZSeOIu6QqMgJ%ZJf$o=bH6TL63t=+3oK@W8 zM~3ipcdW5%$hQXO2jebz!*{i{^I|U&H*I1xxGHIJhEZ4Akd~QWzUC@U& zi!Yc^YzQhzp%NOguj7`TNNMHikQVoW5@SFMq}5fm*?BHb-HJ3 zjqVf6+x#Qle^^5y5N&W>*vV>Wr)}qnaOa7DUu*254{)qjWODhpw}>)-Ltb z?QZP*TQJ^fHr^W`ec}|<$sYoe3`y*;L=qO>_;iLN#5($KN-?tyF(|I`d~j}0JU)9i zzN%~?td6K5h$WKaLKVlL+3bn)1-GTue@f$93R+QeT&8Zh_6>uEt;-he?Ae7UK6*q( z;1>zY>&w{xF&Zqiby&d2)UUVamjp6#sQyo9=K;%OFwbIJFY%9lD;8BID#_GZomv3=^4>b5Z>0*LtXSZ@HZ8M89#=C0P5u zn+)OlHyqEskYmAdgxN6ldDm192awo$YT%rZ%UjqBoRO2f2X$Vo>oE?(9o^?~6jJI& zaL6AZ@i+NQAk2L|ff;5!&TDHg%EHcW?TU1-lIG0oRelPYR5r;I5r;Q3_(>-b6|qo;${p%b2SrUj=56b@$^|>PjhV>2Rsx40;W|J{D{4%N-E? zqv_@p3eOdLk7Ad1bNLhH&dJ5^A*GU1XVWCJ95Ez>YCSf^)tkqPl+SslZmEUy%Lwca z4!Q7}b5e95LOXj#l60=6i679^Q02aQuGT)csB1$9jvP|`<7=uAi%CFv35&oGp3 z#TW}+SNm3!Q9B%!VI&84L?-t#!2$#w!Zz|6_-ID%65{k#Pe^wgu=r>jn-A^3=Ra@K z-OJAha|pXjP|gWkUMYB`JO?kYR{Xjvb8zk2;b^Lj?Q2#qO>nYwoKheEdU^#oB_EQx zR!HLuekC=R-Ci#Qe^9*fltbD?`^;Q$6Cpb-zF-{0(wL=AG5g+j$~r;DehMR=+#?9g zr{ysDAZ&vjZbI#2-W~N0%s6z<7raT8keoI7lg9VciS6 zsJjq;y(}~xFW0E)QV{etgd(d#fu7Hd@QEJ+Yvyv0U+hpoB=mYX-I7A5eOwh~$;~DPY&AbEy)Ewj6Or33EY}pFGgmzX zvOOY&B8;d!`F5}Hy^FylQ}YHk5r<|Z)10FkzQRXLDh8QBq+Uf3pS#9ZcI{jV8%(7o zPM7UjrGN=yzeQB8EXi%OCj6X8@zzQu<)h=CLZ7SNfKPVYwj=XdDp{oT`70?S%qZAp z*-U2LgyW0Gh%6x4BuFWu-L*sS7D@QhkjxFZX5qI&hKDsnwG3q*Y)b=Qc7oyw3TH}{ zm}tvN$;y;kXiKd-c9j$^JTzV4Bj?YfnA@H%J3n+#KL;o zS^F01N3Sqk6y}s7lZsLy)xG05_P%ouy0>=T=Nr8clU}_yCu&_TGG#8i`Zd!#XOO|l z$GppvEwGbUNpRaAQ)?j}C2XsWuc+QH|9*H*bmdD=LsjX=`xb zehj9y&Mu%3?+aqB&+l@>d65EaBGoz6>th5V0dNMb(%V&dd z^pgx@a6GaNpUN8rj)^7@Ov@5)SG=Oc-qi2)nz7nyWjyTLXYojE5&&`LN~kmf$0vJ+ z4+?y&Fmh+4)L-+$rPK!wHJfpRjw%EveEe4MNa*0`)>){8qQzq*QR3c>0}$mVuAGui zcB<`7&(XsaM7kHPP@q9+IHo&Y^t1rI0*Qk92KC0-I&w}Y;HYMbwVa2})RR$r)AjCn zsp3(gcX48-G53AEN>0F4bNUQOCK~t@c?abZgLmB*?WMN>PY?Tj`h z=dtwIjP||C<|6M~%puDSmJ}RA1H9_9j2VwIFDdXk>0@9K3)8w^$yQO%V+WUsi3sa=9AHe>0W9C$QaCQsjFFJ3sFWD0iVgh%nhSDP_s3-37h;cn=J zzC0JOm{E@qUsUF(cS>d%KYqbsRiTMwBUNdI@pL7tK`s2HFJHD?xO7=k02`b@bv{;@ zS=D=UsD@dp^2_6VhVCTFE?UA&krK%K+0H+Ln`c&RSV zq-&NdCg;$;Typ|AfY3I0o`$5MEvHX^2fmr1fUHTb(<YHM_2)P)#BC)(3t#=d@luYhPM+jIZEZWqoqJ<;4j|Att_pRGoADI9gDd z6)xD<5i9lL8z}ME_-*e2iQN8s$Le8R*{zt`6LQpnKK1U)p_D+hTZB%;3;9#LX&)m5 z9PCLGtbw@DPsR4wKQWd)Nh6Jd1BZ&lS+9cd=1g`O)J)L{&X$q7wbV!+EzhAtB-+TrBf9%*b6kr7aGxaE~2l(q0xrbkUxF zXVZ4{MyN#TR=ILUxHS7r=0yK-73Eq^Tg1k8*nQVeWf3#<*?cM-nu)Il%B3{)T4Mwg zixcyrKHH@(@_D-V8oH!0TX$h=2dHr?E!!jQv$ZV^kd)t+Z-%~RuV|}bbR7NcP+M4c z5ZW_gaB_e1EB?4wVH4aD)z}yk21We98Ju<4AP^Ub9^`D{t{W!~Ce5UD0C@qC@H@6OFw&jdLSg_XX?uO1QAvEYDSX zy!2p8CEi@mWg9Ck$Nyk>#wBF-5&e^te&Pn-0?X&4X+?c2e(&*ttxAj`^KV!ULs?}#$}A zH4!~Q2ywZPQk$=@jd{j?Sf;syd6N>0TiaJMW@T1^sL6?JAu6GH{z@>0(e&llAG?>P z>uz}27M@v`jZn>vX~u2ac__Vz=0`rq`F+m*IwTevQ0g8?b&>?2`A2PhY3-GsMms zZ60oMlnpyaV1DhmfRlJv;z;W8>){ne^{Ap5HsY2icJh*u`u$llkE%o!;%3V zI4q0gk0-M)ui{1rxP@1}aS8LFuCCy5^xAbwe`6VAwP=`xPfKy``OE=*H3>V|b6j|n zvAWCp>U0lXU9N5I@a`o8NtF3(W=RKOMRzGo;VQj}OA17}#8kDKx_f+umnDrd<#s^;k3Fqro*#!$Lyo=hb;; zQ!79$85Vnojq7?gG+9rGNVT7gDHpmbrRgMp8_Q^uR(Idy^kbmx^@`jLFn(zqgi%M( zn(wZgoL9Le>S@S14nI$3xP4A7jMb26?w%(E&ij3cp5j^L4itN!_%`;lbvjg2%Bg;?jR#EG0i?f8`9IWR zi)eX*KnR86`sQ!}TN_2Uwnsp}RxGJ{i(eG`i2=dK3A%~IwUG03J$8Pb^-lVD+c=#v znAVwoFl>l>Xyjd$iyr*}yvEi1fyrLS0*gGP;u7+`j0h0hw z2n1?$NLBjr@Yf9+MDy1ZDdAh^Ipl zFA5j}CC5LDhY0=|Kmp_p??-7VD)B%tfY;0mz{Wf%CJz5I;5VoJQ<45ZkFsJBZyBI7 z1JW7(R_J??uH*bsArlivbakqBT)Sih1A(+6eq{*box*THmmw}&ZdV29Vf>pO^LD2& z?9pX3R~qYzlYl@AOuy=}V}A<64qe8y%0;9+F$mN~|0^Td`xJ&Px(q$6zJW`Ctqg#K zTILKu3H`x|O-yXiWrToQo{t0a6M(h^l~HvX;|{tE9#)mA&FF)Yz#I1y>;{^xog6myCm7U(jDIIi1O z06P3$2iqT=!hoU6a4D^GJ0b#s+~6@KJq^^yjxFU0K3Yg z$3R^G^fQACgz@LL9as*89s_kvz|V{aSf?<66$0onP-paiW-wx(!T{#?(PN;_+5gPQ zI*kE@_UJKC$7z0MkWrkX2QX@b9s_lF;$IA)N& Date: Mon, 27 Apr 2015 15:58:21 +0200 Subject: [PATCH 150/236] Docs: Document `http.max_header_size` Closes #10752 --- docs/reference/modules/http.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 72f4993d139..9fb441a646c 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -42,6 +42,9 @@ to `100mb` |`http.max_initial_line_length` |The max length of an HTTP URL. Defaults to `4kb` +|`http.max_header_size` | The max size of allowed headers. Defaults to `8kB` + + |`http.compression` |Support for compression when possible (with Accept-Encoding). Defaults to `false`. From d164526d2735a04960f224a8e40912e1fdf91570 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 27 Apr 2015 17:16:23 +0200 Subject: [PATCH 151/236] Remove `_shutdown` API Thsi commit removes the `_shutdown` API entirely without any replacement. Nodes should be managed from the operating system not via REST APIs --- docs/reference/cluster.asciidoc | 2 - .../reference/cluster/nodes-shutdown.asciidoc | 57 ---- docs/reference/migration/migrate_2_0.asciidoc | 5 + docs/reference/setup/upgrade.asciidoc | 10 - rest-api-spec/api/nodes.shutdown.json | 27 -- .../elasticsearch/action/ActionModule.java | 3 - .../node/shutdown/NodesShutdownAction.java | 45 --- .../node/shutdown/NodesShutdownRequest.java | 110 ------- .../shutdown/NodesShutdownRequestBuilder.java | 72 ----- .../node/shutdown/NodesShutdownResponse.java | 73 ----- .../TransportNodesShutdownAction.java | 306 ------------------ .../client/ClusterAdminClient.java | 26 -- .../org/elasticsearch/client/Requests.java | 19 -- .../support/AbstractClusterAdminClient.java | 19 -- .../rest/action/RestActionModule.java | 2 - .../shutdown/RestNodesShutdownAction.java | 73 ----- .../client/AbstractClientHeadersTests.java | 5 +- ...anyNodesManyIndicesRecoveryStressTest.java | 4 +- .../org/elasticsearch/test/ExternalNode.java | 8 - 19 files changed, 9 insertions(+), 857 deletions(-) delete mode 100644 docs/reference/cluster/nodes-shutdown.asciidoc delete mode 100644 rest-api-spec/api/nodes.shutdown.json delete mode 100644 src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java delete mode 100644 src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java delete mode 100644 src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java delete mode 100644 src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java delete mode 100644 src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index 088de8d2ccf..4c823119daf 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -46,5 +46,3 @@ include::cluster/nodes-stats.asciidoc[] include::cluster/nodes-info.asciidoc[] include::cluster/nodes-hot-threads.asciidoc[] - -include::cluster/nodes-shutdown.asciidoc[] diff --git a/docs/reference/cluster/nodes-shutdown.asciidoc b/docs/reference/cluster/nodes-shutdown.asciidoc deleted file mode 100644 index 65030a384c8..00000000000 --- a/docs/reference/cluster/nodes-shutdown.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -[[cluster-nodes-shutdown]] -== Nodes Shutdown - -The nodes shutdown API allows to shutdown one or more (or all) nodes in -the cluster. Here is an example of shutting the `_local` node the -request is directed to: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown' --------------------------------------------------- - -Specific node(s) can be shutdown as well using their respective node ids -(or other selective options as explained -<> .): - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/nodeId1,nodeId2/_shutdown' --------------------------------------------------- - -The master (of the cluster) can also be shutdown using: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_master/_shutdown' --------------------------------------------------- - -Finally, all nodes can be shutdown using one of the options below: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_shutdown' - -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_shutdown' - -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_all/_shutdown' --------------------------------------------------- - -[float] -[[delay]] -=== Delay - -By default, the shutdown will be executed after a 1 second delay (`1s`). -The delay can be customized by setting the `delay` parameter in a time -value format. For example: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown?delay=10s' --------------------------------------------------- - -[float] -=== Disable Shutdown - -The shutdown API can be disabled by setting `action.disable_shutdown` in -the node configuration. diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 4d9ca005706..4988d05006c 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -405,3 +405,8 @@ The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are supported by the snapshot and restore operations. These parameters have been replaced by a single `expand_wildcards` parameter. See <> for more. +=== `_shutdown` API + +The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating +systems and the provided start/stop scripts. + diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index d2a33d9e477..3a87a049563 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -94,11 +94,6 @@ This syntax applies to Elasticsearch 1.0 and later: * Shut down a single node within the cluster. -[source,sh] --------------------------------------------- -curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown' --------------------------------------------- - * Confirm that all shards are correctly reallocated to the remaining running nodes. * Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org: @@ -149,11 +144,6 @@ This syntax is from versions prior to 1.0: -------------------------------------------------- * Stop all Elasticsearch services on all nodes in the cluster. -[source,sh] ------------------------------------------------------- - curl -XPOST 'http://localhost:9200/_shutdown' ------------------------------------------------------- - * On the first node to be upgraded, extract the archive or install the new package as described above in the Rolling Upgrades section. Repeat for all nodes. * After upgrading Elasticsearch on all nodes is complete, the cluster can be started by starting each node individually. diff --git a/rest-api-spec/api/nodes.shutdown.json b/rest-api-spec/api/nodes.shutdown.json deleted file mode 100644 index 6c8bc42f0f4..00000000000 --- a/rest-api-spec/api/nodes.shutdown.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "nodes.shutdown": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-shutdown.html", - "methods": ["POST"], - "url": { - "path": "/_shutdown", - "paths": ["/_shutdown", "/_cluster/nodes/_shutdown", "/_cluster/nodes/{node_id}/_shutdown"], - "parts": { - "node_id": { - "type" : "list", - "description" : "A comma-separated list of node IDs or names to perform the operation on; use `_local` to perform the operation on the node you're connected to, leave empty to perform the operation on all nodes" - } - }, - "params": { - "delay": { - "type" : "time", - "description" : "Set the delay for the operation (default: 1s)" - }, - "exit": { - "type" : "boolean", - "description" : "Exit the JVM as well (default: true)" - } - } - }, - "body": null - } -} diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 49d841567b4..7e596dd5a93 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotT import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.TransportNodesShutdownAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -222,7 +220,6 @@ public class ActionModule extends AbstractModule { bind(ActionFilters.class).asEagerSingleton(); registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); - registerAction(NodesShutdownAction.INSTANCE, TransportNodesShutdownAction.class); registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java deleted file mode 100644 index 8906d658c47..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; - -/** - */ -public class NodesShutdownAction extends ClusterAction { - - public static final NodesShutdownAction INSTANCE = new NodesShutdownAction(); - public static final String NAME = "cluster:admin/nodes/shutdown"; - - private NodesShutdownAction() { - super(NAME); - } - - @Override - public NodesShutdownResponse newResponse() { - return new NodesShutdownResponse(); - } - - @Override - public NodesShutdownRequestBuilder newRequestBuilder(ClusterAdminClient client) { - return new NodesShutdownRequestBuilder(client); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java deleted file mode 100644 index aeaf5754f99..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.MasterNodeOperationRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; - -import static org.elasticsearch.common.unit.TimeValue.readTimeValue; - -/** - * - */ -public class NodesShutdownRequest extends MasterNodeOperationRequest { - - String[] nodesIds = Strings.EMPTY_ARRAY; - - TimeValue delay = TimeValue.timeValueSeconds(1); - - boolean exit = true; - - NodesShutdownRequest() { - } - - public NodesShutdownRequest(String... nodesIds) { - this.nodesIds = nodesIds; - } - - public NodesShutdownRequest nodesIds(String... nodesIds) { - this.nodesIds = nodesIds; - return this; - } - - /** - * The delay for the shutdown to occur. Defaults to 1s. - */ - public NodesShutdownRequest delay(TimeValue delay) { - this.delay = delay; - return this; - } - - public TimeValue delay() { - return this.delay; - } - - /** - * The delay for the shutdown to occur. Defaults to 1s. - */ - public NodesShutdownRequest delay(String delay) { - return delay(TimeValue.parseTimeValue(delay, null)); - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public NodesShutdownRequest exit(boolean exit) { - this.exit = exit; - return this; - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public boolean exit() { - return exit; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - delay = readTimeValue(in); - nodesIds = in.readStringArray(); - exit = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - delay.writeTo(out); - out.writeStringArrayNullable(nodesIds); - out.writeBoolean(exit); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java deleted file mode 100644 index fc0f767f02c..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.common.unit.TimeValue; - -/** - * - */ -public class NodesShutdownRequestBuilder extends MasterNodeOperationRequestBuilder { - - public NodesShutdownRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new NodesShutdownRequest()); - } - - /** - * The nodes ids to restart. - */ - public NodesShutdownRequestBuilder setNodesIds(String... nodesIds) { - request.nodesIds(nodesIds); - return this; - } - - /** - * The delay for the restart to occur. Defaults to 1s. - */ - public NodesShutdownRequestBuilder setDelay(TimeValue delay) { - request.delay(delay); - return this; - } - - /** - * The delay for the restart to occur. Defaults to 1s. - */ - public NodesShutdownRequestBuilder setDelay(String delay) { - request.delay(delay); - return this; - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public NodesShutdownRequestBuilder setExit(boolean exit) { - request.exit(exit); - return this; - } - - @Override - protected void doExecute(ActionListener listener) { - client.nodesShutdown(request, listener); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java deleted file mode 100644 index 7375038ddb6..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * - */ -public class NodesShutdownResponse extends ActionResponse { - - private ClusterName clusterName; - private DiscoveryNode[] nodes; - - NodesShutdownResponse() { - } - - public NodesShutdownResponse(ClusterName clusterName, DiscoveryNode[] nodes) { - this.clusterName = clusterName; - this.nodes = nodes; - } - - public ClusterName getClusterName() { - return this.clusterName; - } - - public DiscoveryNode[] getNodes() { - return this.nodes; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - clusterName = ClusterName.readClusterName(in); - nodes = new DiscoveryNode[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = DiscoveryNode.readNode(in); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - clusterName.writeTo(out); - out.writeVInt(nodes.length); - for (DiscoveryNode node : nodes) { - node.writeTo(out); - } - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java deleted file mode 100644 index 422deb29b41..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.Node; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; - -import java.io.IOException; -import java.util.concurrent.CountDownLatch; - -/** - * - */ -public class TransportNodesShutdownAction extends TransportMasterNodeOperationAction { - - public static final String SHUTDOWN_NODE_ACTION_NAME = NodesShutdownAction.NAME + "[n]"; - - private final Node node; - private final ClusterName clusterName; - private final boolean disabled; - private final TimeValue delay; - - @Inject - public TransportNodesShutdownAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - Node node, ClusterName clusterName, ActionFilters actionFilters) { - super(settings, NodesShutdownAction.NAME, transportService, clusterService, threadPool, actionFilters, NodesShutdownRequest.class); - this.node = node; - this.clusterName = clusterName; - this.disabled = settings.getAsBoolean("action.disable_shutdown", this.settings.getAsBoolean("action.admin.cluster.node.shutdown.disabled", false)); - this.delay = this.settings.getAsTime("action.admin.cluster.node.shutdown.delay", TimeValue.timeValueMillis(200)); - - this.transportService.registerRequestHandler(SHUTDOWN_NODE_ACTION_NAME, NodeShutdownRequest.class, ThreadPool.Names.SAME, new NodeShutdownRequestHandler()); - } - - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected ClusterBlockException checkBlock(NodesShutdownRequest request, ClusterState state) { - // Stopping a node impacts the cluster state, so we check for the METADATA_WRITE block here - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected NodesShutdownResponse newResponse() { - return new NodesShutdownResponse(); - } - - @Override - protected void processBeforeDelegationToMaster(NodesShutdownRequest request, ClusterState state) { - String[] nodesIds = request.nodesIds; - if (nodesIds != null) { - for (int i = 0; i < nodesIds.length; i++) { - // replace the _local one, since it looses its meaning when going over to the master... - if ("_local".equals(nodesIds[i])) { - nodesIds[i] = state.nodes().localNodeId(); - } - } - } - } - - @Override - protected void masterOperation(final NodesShutdownRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { - if (disabled) { - throw new ElasticsearchIllegalStateException("Shutdown is disabled"); - } - final ObjectOpenHashSet nodes = new ObjectOpenHashSet<>(); - if (state.nodes().isAllNodes(request.nodesIds)) { - logger.info("[cluster_shutdown]: requested, shutting down in [{}]", request.delay); - nodes.addAll(state.nodes().dataNodes().values()); - nodes.addAll(state.nodes().masterNodes().values()); - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(request.delay.millis()); - } catch (InterruptedException e) { - // ignore - } - // first, stop the cluster service - logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur"); - clusterService.stop(); - - final CountDownLatch latch = new CountDownLatch(nodes.size()); - for (ObjectCursor cursor : nodes) { - final DiscoveryNode node = cursor.value; - if (node.id().equals(state.nodes().masterNodeId())) { - // don't shutdown the master yet... - latch.countDown(); - } else { - logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node); - transportService.sendRequest(node, SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node); - latch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node); - latch.countDown(); - } - }); - } - } - try { - latch.await(); - } catch (InterruptedException e) { - // ignore - } - logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master"); - - // now, kill the master - logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode()); - transportService.sendRequest(state.nodes().masterNode(), SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[cluster_shutdown]: received shutdown response from master"); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[cluster_shutdown]: received failed shutdown response master", exp); - } - }); - } - }); - t.start(); - } else { - final String[] nodesIds = state.nodes().resolveNodesIds(request.nodesIds); - logger.info("[partial_cluster_shutdown]: requested, shutting down [{}] in [{}]", nodesIds, request.delay); - - for (String nodeId : nodesIds) { - final DiscoveryNode node = state.nodes().get(nodeId); - if (node != null) { - nodes.add(node); - } - } - - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(request.delay.millis()); - } catch (InterruptedException e) { - // ignore - } - - final CountDownLatch latch = new CountDownLatch(nodesIds.length); - for (String nodeId : nodesIds) { - final DiscoveryNode node = state.nodes().get(nodeId); - if (node == null) { - logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId); - latch.countDown(); - continue; - } - - logger.trace("[partial_cluster_shutdown]: sending shutdown request to [{}]", node); - transportService.sendRequest(node, SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[partial_cluster_shutdown]: received shutdown response from [{}]", node); - latch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[partial_cluster_shutdown]: received failed shutdown response from [{}]", exp, node); - latch.countDown(); - } - }); - } - - try { - latch.await(); - } catch (InterruptedException e) { - // ignore - } - - logger.info("[partial_cluster_shutdown]: done shutting down [{}]", ((Object) nodesIds)); - } - }); - t.start(); - } - listener.onResponse(new NodesShutdownResponse(clusterName, nodes.toArray(DiscoveryNode.class))); - } - - private class NodeShutdownRequestHandler implements TransportRequestHandler { - - @Override - public void messageReceived(final NodeShutdownRequest request, TransportChannel channel) throws Exception { - if (disabled) { - throw new ElasticsearchIllegalStateException("Shutdown is disabled"); - } - logger.info("shutting down in [{}]", delay); - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(delay.millis()); - } catch (InterruptedException e) { - // ignore - } - if (!request.exit) { - logger.info("initiating requested shutdown (no exit)..."); - try { - node.close(); - } catch (Exception e) { - logger.warn("Failed to shutdown", e); - } - return; - } - boolean shutdownWithWrapper = false; - if (System.getProperty("elasticsearch-service") != null) { - try { - Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager"); - logger.info("initiating requested shutdown (using service)"); - wrapperManager.getMethod("stopAndReturn", int.class).invoke(null, 0); - shutdownWithWrapper = true; - } catch (Throwable e) { - logger.error("failed to initial shutdown on service wrapper", e); - } - } - if (!shutdownWithWrapper) { - logger.info("initiating requested shutdown..."); - try { - node.close(); - } catch (Exception e) { - logger.warn("Failed to shutdown", e); - } finally { - // make sure we initiate the shutdown hooks, so the Bootstrap#main thread will exit - System.exit(0); - } - } - } - }); - t.start(); - - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - static class NodeShutdownRequest extends TransportRequest { - - boolean exit; - - NodeShutdownRequest() { - } - - NodeShutdownRequest(NodesShutdownRequest request) { - super(request); - this.exit = request.exit(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - exit = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(exit); - } - } -} diff --git a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 87ade248cd4..0169151fc93 100644 --- a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -29,9 +29,6 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRes import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -248,29 +245,6 @@ public interface ClusterAdminClient extends ElasticsearchClient nodesShutdown(NodesShutdownRequest request); - - /** - * Shutdown nodes in the cluster. - * - * @param request The nodes shutdown request - * @param listener A listener to be notified with a result - * @see org.elasticsearch.client.Requests#nodesShutdownRequest(String...) - */ - void nodesShutdown(NodesShutdownRequest request, ActionListener listener); - - /** - * Shutdown nodes in the cluster. - */ - NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds); - /** * Returns list of shards the given search would be executed on. */ diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index d8717ffb095..6fad2682a2a 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -21,7 +21,6 @@ package org.elasticsearch.client; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -423,24 +422,6 @@ public class Requests { return new ClusterStatsRequest(); } - /** - * Shuts down all nodes in the cluster. - */ - public static NodesShutdownRequest nodesShutdownRequest() { - return new NodesShutdownRequest(); - } - - /** - * Shuts down the specified nodes in the cluster. - * - * @param nodesIds The nodes ids to get the status for - * @return The nodes info request - * @see org.elasticsearch.client.ClusterAdminClient#nodesShutdown(org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest) - */ - public static NodesShutdownRequest nodesShutdownRequest(String... nodesIds) { - return new NodesShutdownRequest(nodesIds); - } - /** * Registers snapshot repository * diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java index 1ff8b42d7d4..f4a6e58e49a 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java @@ -33,10 +33,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; @@ -233,21 +229,6 @@ public abstract class AbstractClusterAdminClient implements ClusterAdminClient { return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds); } - @Override - public ActionFuture nodesShutdown(final NodesShutdownRequest request) { - return execute(NodesShutdownAction.INSTANCE, request); - } - - @Override - public void nodesShutdown(final NodesShutdownRequest request, final ActionListener listener) { - execute(NodesShutdownAction.INSTANCE, request, listener); - } - - @Override - public NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds) { - return new NodesShutdownRequestBuilder(this).setNodesIds(nodesIds); - } - @Override public ActionFuture searchShards(final ClusterSearchShardsRequest request) { return execute(ClusterSearchShardsAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index b8724b8a75b..874a91f3af2 100644 --- a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -28,7 +28,6 @@ import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerif import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; -import org.elasticsearch.rest.action.admin.cluster.node.shutdown.RestNodesShutdownAction; import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; @@ -132,7 +131,6 @@ public class RestActionModule extends AbstractModule { bind(RestNodesInfoAction.class).asEagerSingleton(); bind(RestNodesStatsAction.class).asEagerSingleton(); bind(RestNodesHotThreadsAction.class).asEagerSingleton(); - bind(RestNodesShutdownAction.class).asEagerSingleton(); bind(RestClusterStatsAction.class).asEagerSingleton(); bind(RestClusterStateAction.class).asEagerSingleton(); bind(RestClusterHealthAction.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java deleted file mode 100644 index 950ce9083af..00000000000 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestBuilderListener; - -/** - * - */ -public class RestNodesShutdownAction extends BaseRestHandler { - - @Inject - public RestNodesShutdownAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - - controller.registerHandler(RestRequest.Method.POST, "/_shutdown", this); - controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/_shutdown", this); - controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/{nodeId}/_shutdown", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - NodesShutdownRequest nodesShutdownRequest = new NodesShutdownRequest(nodesIds); - nodesShutdownRequest.listenerThreaded(false); - nodesShutdownRequest.delay(request.paramAsTime("delay", nodesShutdownRequest.delay())); - nodesShutdownRequest.exit(request.paramAsBoolean("exit", nodesShutdownRequest.exit())); - client.admin().cluster().nodesShutdown(nodesShutdownRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(NodesShutdownResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field("cluster_name", response.getClusterName().value()); - - builder.startObject("nodes"); - for (DiscoveryNode node : response.getNodes()) { - builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("name", node.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.endObject(); - } - builder.endObject(); - - builder.endObject(); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); - } -} diff --git a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java index 022eb815f2c..05561a9dec0 100644 --- a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java @@ -23,8 +23,6 @@ import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.GenericAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; @@ -80,7 +78,7 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { IndexAction.INSTANCE, // cluster admin actions - ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, NodesShutdownAction.INSTANCE, ClusterRerouteAction.INSTANCE, + ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, ClusterRerouteAction.INSTANCE, // indices admin actions CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE @@ -119,7 +117,6 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { // choosing arbitrary cluster admin actions to test client.admin().cluster().prepareClusterStats().execute().addListener(new AssertingActionListener(ClusterStatsAction.NAME)); client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute().addListener(new AssertingActionListener(CreateSnapshotAction.NAME)); - client.admin().cluster().prepareNodesShutdown("n1", "n2").execute().addListener(new AssertingActionListener(NodesShutdownAction.NAME)); client.admin().cluster().prepareReroute().execute().addListener(new AssertingActionListener(ClusterRerouteAction.NAME)); // choosing arbitrary indices admin actions to test diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java index 45cbd022f1c..57bcd69b91c 100644 --- a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java +++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java @@ -72,7 +72,9 @@ public class ManyNodesManyIndicesRecoveryStressTest { } System.out.println("--> Initiating shutdown"); - client.admin().cluster().prepareNodesShutdown().setExit(false).execute().actionGet(); + for (Node node : nodes) { + node.close(); + } System.out.println("--> Waiting for all nodes to be closed..."); while (true) { diff --git a/src/test/java/org/elasticsearch/test/ExternalNode.java b/src/test/java/org/elasticsearch/test/ExternalNode.java index 705f07d3e2a..f304b71cd4b 100644 --- a/src/test/java/org/elasticsearch/test/ExternalNode.java +++ b/src/test/java/org/elasticsearch/test/ExternalNode.java @@ -212,16 +212,8 @@ final class ExternalNode implements Closeable { } synchronized void stop() { - stop(false); - } - - synchronized void stop(boolean forceKill) { if (running()) { try { - if (forceKill == false && nodeInfo != null && random.nextBoolean()) { - // sometimes shut down gracefully - getClient().admin().cluster().prepareNodesShutdown(this.nodeInfo.getNode().id()).setExit(random.nextBoolean()).setDelay("0s").get(); - } if (this.client != null) { client.close(); } From 31f26ec1152ce652151c70801ac7af298cd91b30 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Mon, 27 Apr 2015 17:10:03 +0100 Subject: [PATCH 152/236] review comment fixes --- .../search/aggregations/AggregatorFactories.java | 12 ------------ .../aggregations/InternalMultiBucketAggregation.java | 9 --------- .../aggregations/bucket/BucketsAggregator.java | 9 ++++----- .../bucket/histogram/InternalHistogram.java | 4 +--- .../aggregations/bucket/range/InternalRange.java | 10 ++++------ .../search/aggregations/reducers/BucketHelpers.java | 4 ++-- .../reducers/bucketmetrics/MaxBucketParser.java | 2 +- .../reducers/derivative/DerivativeParser.java | 2 +- .../aggregations/reducers/movavg/MovAvgParser.java | 2 +- .../aggregations/reducers/DateDerivativeTests.java | 1 - .../reducers/moving/avg/MovAvgTests.java | 8 ++++---- 11 files changed, 18 insertions(+), 45 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 84318096080..4bbc8ba662c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -160,14 +160,6 @@ public class AggregatorFactories { return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), orderedReducers); } - /* - * L ← Empty list that will contain the sorted nodes while there are - * unmarked nodes do select an unmarked node n visit(n) function - * visit(node n) if n has a temporary mark then stop (not a DAG) if n is - * not marked (i.e. has not been visited yet) then mark n temporarily - * for each node m with an edge from n to m do visit(m) mark n - * permanently unmark n temporarily add n to head of L - */ private List resolveReducerOrder(List reducerFactories, List aggFactories) { Map reducerFactoriesMap = new HashMap<>(); for (ReducerFactory factory : reducerFactories) { @@ -184,10 +176,6 @@ public class AggregatorFactories { ReducerFactory factory = unmarkedFactories.get(0); resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, factory); } - List orderedReducerNames = new ArrayList<>(); - for (ReducerFactory reducerFactory : orderedReducers) { - orderedReducerNames.add(reducerFactory.getName()); - } return orderedReducers; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 856b96979f2..db2ac49bf38 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -98,13 +98,4 @@ public abstract class InternalMultiBucketAggregation { - - public abstract String type(); - - public abstract A create(List buckets, A prototype); - - public abstract B createBucket(InternalAggregations aggregations, B prototype); - } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 93fa360b113..041c15a5dc1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -43,8 +43,7 @@ public abstract class BucketsAggregator extends AggregatorBase { private final BigArrays bigArrays; private IntArray docCounts; - public BucketsAggregator(String name, AggregatorFactories factories, - AggregationContext context, Aggregator parent, + public BucketsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, context, parent, reducers, metaData); bigArrays = context.bigArrays(); @@ -113,11 +112,11 @@ public abstract class BucketsAggregator extends AggregatorBase { */ protected final InternalAggregations bucketAggregations(long bucket) throws IOException { final InternalAggregation[] aggregations = new InternalAggregation[subAggregators.length]; - for (int i = 0; i < subAggregators.length; i++) { + for (int i = 0; i < subAggregators.length; i++) { aggregations[i] = subAggregators[i].buildAggregation(bucket); - } + } return new InternalAggregations(Arrays.asList(aggregations)); - } + } /** * Utility method to build empty aggregations of the sub aggregators. diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 1fb919558d5..5c10e0d3ad4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -234,7 +234,7 @@ public class InternalHistogram extends Inter } - public static class Factory extends InternalMultiBucketAggregation.Factory, B> { + public static class Factory { protected Factory() { } @@ -249,13 +249,11 @@ public class InternalHistogram extends Inter return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } - @Override public InternalHistogram create(List buckets, InternalHistogram prototype) { return new InternalHistogram<>(prototype.name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo, prototype.formatter, prototype.keyed, this, prototype.reducers(), prototype.metaData); } - @Override public B createBucket(InternalAggregations aggregations, B prototype) { return (B) new Bucket(prototype.key, prototype.docCount, prototype.getKeyed(), prototype.formatter, this, aggregations); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 1bf62b9abb6..db0ccee33e5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -225,7 +225,7 @@ public class InternalRange> extends InternalMultiBucketAggregation.Factory { + public static class Factory> { public String type() { return TYPE.name(); @@ -236,18 +236,16 @@ public class InternalRange(name, ranges, formatter, keyed, reducers, metaData); } - - public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { + public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { return (B) new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } - @Override public R create(List ranges, R prototype) { return (R) new InternalRange<>(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), prototype.metaData); - } + } - @Override public B createBucket(InternalAggregations aggregations, B prototype) { return (B) new Bucket(prototype.getKey(), prototype.from, prototype.to, prototype.getDocCount(), aggregations, prototype.keyed, prototype.formatter); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java index f6cdd8ca1f9..4ac1bff7cfb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -53,7 +53,7 @@ public class BucketHelpers { * "ignore": empty buckets will simply be ignored */ public static enum GapPolicy { - INSERT_ZEROS((byte) 0, "insert_zeros"), IGNORE((byte) 1, "ignore"); + INSERT_ZEROS((byte) 0, "insert_zeros"), SKIP((byte) 1, "skip"); /** * Parse a string GapPolicy into the byte enum @@ -172,7 +172,7 @@ public class BucketHelpers { switch (gapPolicy) { case INSERT_ZEROS: return 0.0; - case IGNORE: + case SKIP: default: return Double.NaN; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java index 7d773747a8d..87afd890e34 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java @@ -47,7 +47,7 @@ public class MaxBucketParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; - GapPolicy gapPolicy = GapPolicy.IGNORE; + GapPolicy gapPolicy = GapPolicy.SKIP; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index cfca5c60978..3536377644b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -45,7 +45,7 @@ public class DerivativeParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; - GapPolicy gapPolicy = GapPolicy.IGNORE; + GapPolicy gapPolicy = GapPolicy.SKIP; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java index 5d79b1d1e7a..0850587de35 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java @@ -64,7 +64,7 @@ public class MovAvgParser implements Reducer.Parser { String[] bucketsPaths = null; String format = null; - GapPolicy gapPolicy = GapPolicy.IGNORE; + GapPolicy gapPolicy = GapPolicy.SKIP; int window = 5; Map settings = null; String model = "simple"; diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index ede94abd973..b1ac6756f1e 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -51,7 +51,6 @@ import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest -//@AwaitsFix(bugUrl = "Fix factory selection for serialisation of Internal derivative") public class DateDerivativeTests extends ElasticsearchIntegrationTest { private DateTime date(int month, int day) { diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index cd6ac6cf490..ae0f89ae868 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -121,7 +121,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { alpha = randomDouble(); beta = randomDouble(); - gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.IGNORE : BucketHelpers.GapPolicy.INSERT_ZEROS; + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.SKIP : BucketHelpers.GapPolicy.INSERT_ZEROS; metric = randomMetric("the_metric", VALUE_FIELD); mockHisto = ReducerHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); @@ -172,7 +172,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { // Gaps only apply to metric values, not doc _counts if (mockBucket.count == 0 && target.equals(MetricTarget.VALUE)) { // If there was a gap in doc counts and we are ignoring, just skip this bucket - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { values.add(null); continue; } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { @@ -726,7 +726,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(current, notNullValue()); currentValue = current.value(); - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { @@ -785,7 +785,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat(current, notNullValue()); currentValue = current.value(); - if (gapPolicy.equals(BucketHelpers.GapPolicy.IGNORE)) { + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { From 935144a064484830c42fe4ab0548357a397489f0 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 27 Apr 2015 14:32:20 -0400 Subject: [PATCH 153/236] review comment fixes --- .../aggregations/reducers/movavg/models/MovAvgModel.java | 4 ---- .../reducers/movavg/models/MovAvgModelBuilder.java | 1 - 2 files changed, 5 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java index d798887c836..b244587c9b2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java @@ -63,10 +63,6 @@ public abstract class MovAvgModel { return predictions; } - // nocommit - // I don't like that it creates a new queue here - // The alternative to this is to just use `values` directly, but that would "consume" values - // and potentially change state elsewhere. Maybe ok? Collection predictionBuffer = EvictingQueue.create(values.size()); predictionBuffer.addAll(values); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java index 96bc9427de3..a8f40d474ac 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java @@ -29,5 +29,4 @@ import java.io.IOException; * average models are used by the MovAvg reducer */ public interface MovAvgModelBuilder extends ToXContent { - public abstract XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; } From bf9739d0f0bd8fa5c4f1a78208a65b46b5268bda Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 27 Apr 2015 14:40:04 -0400 Subject: [PATCH 154/236] [DOCS] review comment fixes --- docs/reference/search/aggregations.asciidoc | 2 +- docs/reference/search/aggregations/reducer.asciidoc | 1 + .../reducer/derivative-aggregation.asciidoc | 8 +++++--- .../{reducers => reducer}/images/double_0.2beta.png | Bin .../{reducers => reducer}/images/double_0.7beta.png | Bin .../images/double_prediction_global.png | Bin .../images/double_prediction_local.png | Bin .../images/linear_100window.png | Bin .../images/linear_10window.png | Bin .../images/movavg_100window.png | Bin .../images/movavg_10window.png | Bin .../images/simple_prediction.png | Bin .../images/single_0.2alpha.png | Bin .../images/single_0.7alpha.png | Bin .../reducer/max-bucket-aggregation.asciidoc | 2 +- .../{reducers => reducer}/movavg-reducer.asciidoc | 3 ++- 16 files changed, 10 insertions(+), 6 deletions(-) rename docs/reference/search/aggregations/{reducers => reducer}/images/double_0.2beta.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/double_0.7beta.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/double_prediction_global.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/double_prediction_local.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/linear_100window.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/linear_10window.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/movavg_100window.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/movavg_10window.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/simple_prediction.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/single_0.2alpha.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/images/single_0.7alpha.png (100%) rename docs/reference/search/aggregations/{reducers => reducer}/movavg-reducer.asciidoc (99%) diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc index 74784c110a9..7f081616375 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/search/aggregations.asciidoc @@ -125,7 +125,7 @@ experimental[] Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding information to the output tree. There are many different types of reducer, each computing different information from -other aggregations, but these type can broken down into two families: +other aggregations, but these types can broken down into two families: _Parent_:: A family of reducer aggregations that is provided with the output of its parent aggregation and is able diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc index 75ac8b9a49a..d460fd5e450 100644 --- a/docs/reference/search/aggregations/reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -1,4 +1,5 @@ [[search-aggregations-reducer]] include::reducer/derivative.asciidoc[] +include::reducer/max-bucket-aggregation.asciidoc[] include::reducer/movavg-reducer.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc index f1fa8b44043..8369d0c1ba0 100644 --- a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc +++ b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc @@ -13,7 +13,8 @@ The following snippet calculates the derivative of the total monthly `sales`: "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "interval" : "month", + "min_doc_count" : 0 }, "aggs": { "sales": { @@ -64,7 +65,7 @@ And the following may be the response: { "key_as_string": "2015/03/01 00:00:00", "key": 1425168000000, - "doc_count": 2, + "doc_count": 2, <3> "sales": { "value": 375 }, @@ -81,6 +82,7 @@ And the following may be the response: <1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative <2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units would be $/month assuming the `price` field has units of $. +<3> The number of documents in the bucket are represented by the `doc_count` value ==== Second Order Derivative @@ -179,7 +181,7 @@ There are a couple of reasons why the data output by the enclosing histogram may on the enclosing histogram or with a query matching only a small number of documents) Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both -the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior +the current bucket and the next bucket. In the derivative reducer aggregation has a `gap_policy` parameter to define what the behavior should be when a gap in the data is found. There are currently two options for controlling the gap policy: _ignore_:: diff --git a/docs/reference/search/aggregations/reducers/images/double_0.2beta.png b/docs/reference/search/aggregations/reducer/images/double_0.2beta.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/double_0.2beta.png rename to docs/reference/search/aggregations/reducer/images/double_0.2beta.png diff --git a/docs/reference/search/aggregations/reducers/images/double_0.7beta.png b/docs/reference/search/aggregations/reducer/images/double_0.7beta.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/double_0.7beta.png rename to docs/reference/search/aggregations/reducer/images/double_0.7beta.png diff --git a/docs/reference/search/aggregations/reducers/images/double_prediction_global.png b/docs/reference/search/aggregations/reducer/images/double_prediction_global.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/double_prediction_global.png rename to docs/reference/search/aggregations/reducer/images/double_prediction_global.png diff --git a/docs/reference/search/aggregations/reducers/images/double_prediction_local.png b/docs/reference/search/aggregations/reducer/images/double_prediction_local.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/double_prediction_local.png rename to docs/reference/search/aggregations/reducer/images/double_prediction_local.png diff --git a/docs/reference/search/aggregations/reducers/images/linear_100window.png b/docs/reference/search/aggregations/reducer/images/linear_100window.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/linear_100window.png rename to docs/reference/search/aggregations/reducer/images/linear_100window.png diff --git a/docs/reference/search/aggregations/reducers/images/linear_10window.png b/docs/reference/search/aggregations/reducer/images/linear_10window.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/linear_10window.png rename to docs/reference/search/aggregations/reducer/images/linear_10window.png diff --git a/docs/reference/search/aggregations/reducers/images/movavg_100window.png b/docs/reference/search/aggregations/reducer/images/movavg_100window.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/movavg_100window.png rename to docs/reference/search/aggregations/reducer/images/movavg_100window.png diff --git a/docs/reference/search/aggregations/reducers/images/movavg_10window.png b/docs/reference/search/aggregations/reducer/images/movavg_10window.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/movavg_10window.png rename to docs/reference/search/aggregations/reducer/images/movavg_10window.png diff --git a/docs/reference/search/aggregations/reducers/images/simple_prediction.png b/docs/reference/search/aggregations/reducer/images/simple_prediction.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/simple_prediction.png rename to docs/reference/search/aggregations/reducer/images/simple_prediction.png diff --git a/docs/reference/search/aggregations/reducers/images/single_0.2alpha.png b/docs/reference/search/aggregations/reducer/images/single_0.2alpha.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/single_0.2alpha.png rename to docs/reference/search/aggregations/reducer/images/single_0.2alpha.png diff --git a/docs/reference/search/aggregations/reducers/images/single_0.7alpha.png b/docs/reference/search/aggregations/reducer/images/single_0.7alpha.png similarity index 100% rename from docs/reference/search/aggregations/reducers/images/single_0.7alpha.png rename to docs/reference/search/aggregations/reducer/images/single_0.7alpha.png diff --git a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc index ca6f274d189..a93c7ed8036 100644 --- a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc +++ b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -34,7 +34,7 @@ The following snippet calculates the maximum of the total monthly `sales`: -------------------------------------------------- <1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the -"sales_per_month` date histogram. +`sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc b/docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc similarity index 99% rename from docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc rename to docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc index d9759629b75..a01141f0fec 100644 --- a/docs/reference/search/aggregations/reducers/movavg-reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc @@ -71,7 +71,7 @@ embedded like any other metric aggregation: <1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals <2> We must specify "min_doc_count: 0" in our date histogram that all buckets are returned, even if they are empty. <3> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) -<4> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as it's input. +<4> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as its input. Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. @@ -121,6 +121,7 @@ the values from a `simple` moving average tend to "lag" behind the real data. "buckets_path": "the_sum", "model" : "simple" } + } } -------------------------------------------------- From 240bcc3f0857658e5d96de4aef0c7a6de3d8565d Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 24 Apr 2015 16:06:33 -0600 Subject: [PATCH 155/236] Don't create a new BigArrays instance for every call of `withCircuitBreaking` Since the circuit breaking service doesn't actually change for BigArrays, we can eagerly create a new instance only once and use that for all further invocations of `withCircuitBreaking`. --- .../java/org/elasticsearch/common/util/BigArrays.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/util/BigArrays.java b/src/main/java/org/elasticsearch/common/util/BigArrays.java index 693b552d8ed..fa202f2d719 100644 --- a/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -364,6 +364,7 @@ public class BigArrays { final PageCacheRecycler recycler; final CircuitBreakerService breakerService; final boolean checkBreaker; + private final BigArrays circuitBreakingInstance; @Inject public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) { @@ -375,6 +376,11 @@ public class BigArrays { this.checkBreaker = checkBreaker; this.recycler = recycler; this.breakerService = breakerService; + if (checkBreaker) { + this.circuitBreakingInstance = this; + } else { + this.circuitBreakingInstance = new BigArrays(recycler, breakerService, true); + } } /** @@ -410,11 +416,11 @@ public class BigArrays { } /** - * Return a new instance of this BigArrays class with circuit breaking + * Return an instance of this BigArrays class with circuit breaking * explicitly enabled, instead of only accounting enabled */ public BigArrays withCircuitBreaking() { - return new BigArrays(this.recycler, this.breakerService, true); + return this.circuitBreakingInstance; } private T resizeInPlace(T array, long newSize) { From f599c237bdee7624bd5a67cf26828ccac32dbcc7 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 27 Apr 2015 20:29:57 -0400 Subject: [PATCH 156/236] Security manager cleanups 1. initialize SM after things like mlockall. Their tests currently don't run with securitymanager enabled, and its simpler to just run mlockall etc first. 2. remove redundant test permissions (junit4.childvm.cwd/temp). This is alreay added as java.io.tmpdir. 3. improve tests to load the generated policy with some various settings and assert things about the permissions on configured directories. 4. refactor logic to make it easier to fine-grain the permissions later. for example we currently allow write access to conf/. In the future I think we can improve testing so we are able to make improvements here. --- .../elasticsearch/bootstrap/Bootstrap.java | 3 +- .../org/elasticsearch/bootstrap/Security.java | 81 +++++++----------- .../elasticsearch/bootstrap/security.policy | 6 +- .../bootstrap/SecurityTests.java | 83 +++++++++++++++---- 4 files changed, 101 insertions(+), 72 deletions(-) diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 19bc81b9972..d351a9d4ea2 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -59,7 +59,6 @@ public class Bootstrap { private static Bootstrap bootstrap; private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { - setupSecurity(settings, environment); if (settings.getAsBoolean("bootstrap.mlockall", false)) { Natives.tryMlockall(); } @@ -90,6 +89,8 @@ public class Bootstrap { } }); } + // install SM after natives, JNA can require strange permissions + setupSecurity(settings, environment); } /** diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index afa8362771e..67ac531f0e7 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -29,8 +29,6 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.util.HashSet; -import java.util.Set; /** * Initializes securitymanager with necessary permissions. @@ -68,55 +66,42 @@ class Security { try (InputStream in = new BufferedInputStream(template)) { ByteStreams.copy(in, output); } - - // add permissions for all configured paths. - Set paths = new HashSet<>(); - paths.add(environment.homeFile()); - paths.add(environment.configFile()); - paths.add(environment.logsFile()); - paths.add(environment.pluginsFile()); - for (Path path : environment.dataFiles()) { - paths.add(path); + + // all policy files are UTF-8: + // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html + try (Writer writer = new OutputStreamWriter(output, StandardCharsets.UTF_8)) { + writer.write(System.lineSeparator()); + writer.write("grant {"); + writer.write(System.lineSeparator()); + + // add permissions for all configured paths. + // TODO: improve test infra so we can reduce permissions where read/write + // is not really needed... + addPath(writer, environment.homeFile(), "read,readlink,write,delete"); + addPath(writer, environment.configFile(), "read,readlink,write,delete"); + addPath(writer, environment.logsFile(), "read,readlink,write,delete"); + addPath(writer, environment.pluginsFile(), "read,readlink,write,delete"); + for (Path path : environment.dataFiles()) { + addPath(writer, path, "read,readlink,write,delete"); + } + for (Path path : environment.dataWithClusterFiles()) { + addPath(writer, path, "read,readlink,write,delete"); + } + + writer.write("};"); + writer.write(System.lineSeparator()); } - for (Path path : environment.dataWithClusterFiles()) { - paths.add(path); - } - output.write(createPermissions(paths)); } return processed; } - // package private for testing - static byte[] createPermissions(Set paths) throws IOException { - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - - // all policy files are UTF-8: - // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html - try (Writer writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8)) { - writer.write(System.lineSeparator()); - writer.write("grant {"); - writer.write(System.lineSeparator()); - for (Path path : paths) { - // data paths actually may not exist yet. - Files.createDirectories(path); - // add each path twice: once for itself, again for files underneath it - addPath(writer, encode(path), "read,readlink,write,delete"); - addRecursivePath(writer, encode(path), "read,readlink,write,delete"); - } - writer.write("};"); - writer.write(System.lineSeparator()); - } - - return stream.toByteArray(); - } - - static void addPath(Writer writer, String path, String permissions) throws IOException { - writer.write("permission java.io.FilePermission \"" + path + "\", \"" + permissions + "\";"); + static void addPath(Writer writer, Path path, String permissions) throws IOException { + // paths may not exist yet + Files.createDirectories(path); + // add each path twice: once for itself, again for files underneath it + writer.write("permission java.io.FilePermission \"" + encode(path) + "\", \"" + permissions + "\";"); writer.write(System.lineSeparator()); - } - - static void addRecursivePath(Writer writer, String path, String permissions) throws IOException { - writer.write("permission java.io.FilePermission \"" + path + "${/}-\", \"" + permissions + "\";"); + writer.write("permission java.io.FilePermission \"" + encode(path) + "${/}-\", \"" + permissions + "\";"); writer.write(System.lineSeparator()); } @@ -124,10 +109,6 @@ class Security { // See "Note Regarding File Path Specifications on Windows Systems". // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html static String encode(Path path) { - return encode(path.toString()); - } - - static String encode(String path) { - return path.replace("\\", "\\\\"); + return path.toString().replace("\\", "\\\\"); } } diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 44b89c47c58..92056a3ab81 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -37,9 +37,6 @@ grant { permission java.io.FilePermission "${project.basedir}${/}lib/sigar{/}-", "read"; // mvn custom ./m2/repository for dependency jars permission java.io.FilePermission "${m2.repository}${/}-", "read"; - // per-jvm directory - permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp", "read,write"; - permission java.io.FilePermission "${junit4.childvm.cwd}${/}temp${/}-", "read,write,delete"; permission java.nio.file.LinkPermission "symbolic"; permission groovy.security.GroovyCodeSourcePermission "/groovy/script"; @@ -86,7 +83,10 @@ grant { // needed for natives calls permission java.lang.RuntimePermission "loadLibrary.*"; + + // needed for testing access rules etc permission java.lang.RuntimePermission "createSecurityManager"; + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; // reflection hacks: // needed for Striped64 (what is this doing), also enables unmap hack diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index d3a27f56b1f..4c2ddcd47eb 100644 --- a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -19,30 +19,77 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; import org.elasticsearch.test.ElasticsearchTestCase; -import java.nio.charset.StandardCharsets; +import java.io.ByteArrayInputStream; +import java.io.FilePermission; import java.nio.file.Path; -import java.util.Collections; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.URIParameter; public class SecurityTests extends ElasticsearchTestCase { - /** backslash escaping (e.g. windows paths) */ - public void testEncode() { - assertEquals("c:\\\\foobar", Security.encode("c:\\foobar")); - } - - /** test template processing */ - public void testTemplateProcessing() throws Exception { + /** test generated permissions */ + public void testGeneratedPermissions() throws Exception { Path path = createTempDir(); - - byte results[] = Security.createPermissions(Collections.singleton(path)); - String unicode = new String(results, StandardCharsets.UTF_8); - // try not to make this test too fragile or useless - assertTrue(unicode.contains("grant {")); - assertTrue(unicode.contains(Security.encode(path))); - assertTrue(unicode.contains("read")); - assertTrue(unicode.contains("write")); + // make a fake ES home and ensure we only grant permissions to that. + Path esHome = path.resolve("esHome"); + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + settingsBuilder.put("path.home", esHome.toString()); + Settings settings = settingsBuilder.build(); + + Environment environment = new Environment(settings); + Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + + ProtectionDomain domain = getClass().getProtectionDomain(); + Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + // the fake es home + assertTrue(policy.implies(domain, new FilePermission(esHome.toString(), "read"))); + // its parent + assertFalse(policy.implies(domain, new FilePermission(path.toString(), "read"))); + // some other sibling + assertFalse(policy.implies(domain, new FilePermission(path.resolve("other").toString(), "read"))); + } + + /** test generated permissions for all configured paths */ + public void testEnvironmentPaths() throws Exception { + Path path = createTempDir(); + + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + settingsBuilder.put("path.home", path.resolve("home").toString()); + settingsBuilder.put("path.conf", path.resolve("conf").toString()); + settingsBuilder.put("path.plugins", path.resolve("plugins").toString()); + settingsBuilder.putArray("path.data", path.resolve("data1").toString(), path.resolve("data2").toString()); + settingsBuilder.put("path.logs", path.resolve("logs").toString()); + Settings settings = settingsBuilder.build(); + + Environment environment = new Environment(settings); + Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + + ProtectionDomain domain = getClass().getProtectionDomain(); + Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + + // check that all directories got permissions: + // homefile: this is needed unless we break out rules for "lib" dir. + // TODO: make read-only + assertTrue(policy.implies(domain, new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); + // config file + // TODO: make read-only + assertTrue(policy.implies(domain, new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); + // plugins: r/w, TODO: can this be minimized? + assertTrue(policy.implies(domain, new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); + // data paths: r/w + for (Path dataPath : environment.dataFiles()) { + assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + } + for (Path dataPath : environment.dataWithClusterFiles()) { + assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + } + // logs: r/w + assertTrue(policy.implies(domain, new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); } - } From b25259532ef678de8dc6cbd9d0b193860e611b86 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 27 Apr 2015 16:41:32 +0200 Subject: [PATCH 157/236] Release: Fix build repositories script Minor issue with specifying the correct version when starting the package release script. Another issue fixed to make sure that the S3 bucket parameters act the same. --- dev-tools/build_release.py | 2 +- dev-tools/build_repositories.sh | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index d5038d90364..ca3223c0a92 100644 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -513,7 +513,7 @@ def publish_repositories(version, dry_run=True): else: print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version) # src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this - run('dev-tools/build_repositories.sh %s', src_branch) + run('dev-tools/build_repositories.sh %s' % src_branch) def print_sonatype_notice(): settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') diff --git a/dev-tools/build_repositories.sh b/dev-tools/build_repositories.sh index a7c7dae9311..9bad8ff2c14 100755 --- a/dev-tools/build_repositories.sh +++ b/dev-tools/build_repositories.sh @@ -32,12 +32,12 @@ set -e ## GPG_KEY_ID: Key id of your GPG key ## AWS_ACCESS_KEY_ID: AWS access key id ## AWS_SECRET_ACCESS_KEY: AWS secret access key -## S3_BUCKET_SYNC_TO Bucket to write packages to, defaults to packages.elasticsearch.org/elasticsearch +## S3_BUCKET_SYNC_TO Bucket to write packages to, should be set packages.elasticsearch.org for a regular release ## ## ## optional ## -## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org/elasticsearch +## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org ## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging ## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/ ## @@ -51,7 +51,7 @@ set -e # No trailing slashes! if [ -z $S3_BUCKET_SYNC_FROM ] ; then - S3_BUCKET_SYNC_FROM="packages.elasticsearch.org/elasticsearch" + S3_BUCKET_SYNC_FROM="packages.elasticsearch.org" fi if [ ! -z $GPG_KEYRING ] ; then GPG_HOMEDIR="--homedir ${GPG_KEYRING}" @@ -156,7 +156,7 @@ centosdir=$tempdir/repository/elasticsearch/$version/centos mkdir -p $centosdir echo "RPM: Syncing repository for version $version into $centosdir" -$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/centos/ $centosdir +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/centos/ $centosdir rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm echo "RPM: Copying $rpm into $centosdor" @@ -191,7 +191,7 @@ mkdir -p $debbasedir echo "DEB: Syncing debian repository of version $version to $debbasedir" # sync all former versions into directory -$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/$version/debian/ $debbasedir +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/debian/ $debbasedir # create directories in case of a new release so that syncing did not create this structure mkdir -p $debbasedir/dists/stable/main/binary-all From 5812753dbcbac645c2fc8a5ed460178b3e5b60a9 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 27 Apr 2015 14:43:47 -0700 Subject: [PATCH 158/236] Backcompat: Add test for missing filter The _field_names field was fixed in 1.5.1 (#10268) to correctly be disabled for indexes before 1.3.0. However, only the exists filter was updated to check this enabled flag on 1.x/1.5. The missing filter on those branches still checks the field type to see if it is indexed, which causes the filter to always try and use the _field_names field for those old indexes. This change adds a test to the old index tests for missing filter. closes #10842 --- .../bwcompat/OldIndexBackwardsCompatibilityTests.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 4178534405f..206bc4469df 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -204,6 +204,7 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio } return FileVisitResult.CONTINUE; } + @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { @@ -285,7 +286,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor; } - void assertIndexSanity(String indexName) { GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(indexName).get(); assertEquals(1, getIndexResponse.indices().length); @@ -311,7 +311,14 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), FilterBuilders.existsFilter("string"))); searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); - assertThat(numDocs, equalTo(searchRsp.getHits().getTotalHits())); + assertEquals(numDocs, searchRsp.getHits().getTotalHits()); + + logger.info("--> testing missing filter"); + // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache + searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), FilterBuilders.missingFilter("long_sort"))); + searchRsp = searchReq.get(); + ElasticsearchAssertions.assertNoFailures(searchRsp); + assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { From 180403fc32cd7f525374e82cc6786d15cc7642cc Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Mon, 27 Apr 2015 10:53:36 +0200 Subject: [PATCH 159/236] Fix JSON encoding for Mustache templates. This pull request replaces the current self-made implementation of JSON encoding special chars with re-using the Jackson JsonStringEncoder. Turns out the previous implementation also missed a few special chars so had to adjust the tests accordingly (looked at RFC 4627 for reference). Note: There's another JSON String encoder on our classpath (org.apache.commons.lang3.StringEscapeUtils) that essentially does the same thing but adds quoting to more characters than the Jackson Encoder above. Relates to #5473 --- .../mustache/JsonEscapingMustacheFactory.java | 33 +------ .../mustache/MustacheScriptEngineTest.java | 86 +++++++++++++++---- 2 files changed, 75 insertions(+), 44 deletions(-) diff --git a/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java b/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java index ebec396131e..7734d0334bf 100644 --- a/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java +++ b/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.script.mustache; +import com.fasterxml.jackson.core.io.JsonStringEncoder; import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.MustacheException; @@ -28,40 +29,14 @@ import java.io.Writer; * A MustacheFactory that does simple JSON escaping. */ public final class JsonEscapingMustacheFactory extends DefaultMustacheFactory { - + @Override public void encode(String value, Writer writer) { try { - escape(value, writer); + JsonStringEncoder utils = new JsonStringEncoder(); + writer.write(utils.quoteAsString(value));; } catch (IOException e) { throw new MustacheException("Failed to encode value: " + value); } } - - public static Writer escape(String value, Writer writer) throws IOException { - for (int i = 0; i < value.length(); i++) { - final char character = value.charAt(i); - if (isEscapeChar(character)) { - writer.write('\\'); - } - writer.write(character); - } - return writer; - } - - public static boolean isEscapeChar(char c) { - switch(c) { - case '\b': - case '\f': - case '\n': - case '\r': - case '"': - case '\\': - case '\u000B': // vertical tab - case '\t': - return true; - } - return false; - } - } diff --git a/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java b/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java index ab01e465588..3b6f0f479f1 100644 --- a/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java +++ b/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.script.mustache; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchTestCase; @@ -38,10 +37,12 @@ import static org.hamcrest.Matchers.equalTo; */ public class MustacheScriptEngineTest extends ElasticsearchTestCase { private MustacheScriptEngineService qe; + private JsonEscapingMustacheFactory escaper; @Before public void setup() { qe = new MustacheScriptEngineService(ImmutableSettings.Builder.EMPTY_SETTINGS); + escaper = new JsonEscapingMustacheFactory(); } @Test @@ -73,43 +74,98 @@ public class MustacheScriptEngineTest extends ElasticsearchTestCase { public void testEscapeJson() throws IOException { { StringWriter writer = new StringWriter(); - JsonEscapingMustacheFactory.escape("hello \n world", writer); - assertThat(writer.toString(), equalTo("hello \\\n world")); + escaper.encode("hello \n world", writer); + assertThat(writer.toString(), equalTo("hello \\n world")); } { StringWriter writer = new StringWriter(); - JsonEscapingMustacheFactory.escape("\n", writer); - assertThat(writer.toString(), equalTo("\\\n")); + escaper.encode("\n", writer); + assertThat(writer.toString(), equalTo("\\n")); } - Character[] specialChars = new Character[]{'\f', '\n', '\r', '"', '\\', (char) 11, '\t', '\b' }; + Character[] specialChars = new Character[]{ + '\"', + '\\', + '\u0000', + '\u0001', + '\u0002', + '\u0003', + '\u0004', + '\u0005', + '\u0006', + '\u0007', + '\u0008', + '\u0009', + '\u000B', + '\u000C', + '\u000E', + '\u000F', + '\u001F'}; + String[] escapedChars = new String[]{ + "\\\"", + "\\\\", + "\\u0000", + "\\u0001", + "\\u0002", + "\\u0003", + "\\u0004", + "\\u0005", + "\\u0006", + "\\u0007", + "\\u0008", + "\\u0009", + "\\u000B", + "\\u000C", + "\\u000E", + "\\u000F", + "\\u001F"}; int iters = scaledRandomIntBetween(100, 1000); for (int i = 0; i < iters; i++) { int rounds = scaledRandomIntBetween(1, 20); - StringWriter escaped = new StringWriter(); + StringWriter expect = new StringWriter(); StringWriter writer = new StringWriter(); for (int j = 0; j < rounds; j++) { String s = getChars(); writer.write(s); - escaped.write(s); - char c = RandomPicks.randomFrom(getRandom(), specialChars); - writer.append(c); - escaped.append('\\'); - escaped.append(c); + expect.write(s); + + int charIndex = randomInt(7); + writer.append(specialChars[charIndex]); + expect.append(escapedChars[charIndex]); } StringWriter target = new StringWriter(); - assertThat(escaped.toString(), equalTo(JsonEscapingMustacheFactory.escape(writer.toString(), target).toString())); + escaper.encode(writer.toString(), target); + assertThat(expect.toString(), equalTo(target.toString())); } } private String getChars() { String string = randomRealisticUnicodeOfCodepointLengthBetween(0, 10); for (int i = 0; i < string.length(); i++) { - if (JsonEscapingMustacheFactory.isEscapeChar(string.charAt(i))) { + if (isEscapeChar(string.charAt(i))) { return string.substring(0, i); } } return string; } - + + /** + * From https://www.ietf.org/rfc/rfc4627.txt: + * + * All Unicode characters may be placed within the + * quotation marks except for the characters that must be escaped: + * quotation mark, reverse solidus, and the control characters (U+0000 + * through U+001F). + * */ + private static boolean isEscapeChar(char c) { + switch (c) { + case '"': + case '\\': + return true; + } + + if (c < '\u002F') + return true; + return false; + } } From 2ce0ea15b016aba09304b465973ef3fbc98e376d Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 24 Apr 2015 14:27:52 +0200 Subject: [PATCH 160/236] Internal: Remove deprecated METADATA cluster block level This commit removes the deprecated ClusterBlockLevel.METADATA, replaced in #9203 with METADATA_READ and METADATA_WRITE. --- .../cluster/block/ClusterBlock.java | 4 +- .../cluster/block/ClusterBlockLevel.java | 43 +++---------------- .../cluster/block/ClusterBlockTests.java | 17 +------- 3 files changed, 10 insertions(+), 54 deletions(-) diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index a5bbcd1060c..aee6c7f8fc4 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -131,7 +131,7 @@ public class ClusterBlock implements Serializable, Streamable, ToXContent { final int len = in.readVInt(); ArrayList levels = new ArrayList<>(); for (int i = 0; i < len; i++) { - levels.addAll(ClusterBlockLevel.fromId(in.readVInt())); + levels.add(ClusterBlockLevel.fromId(in.readVInt())); } this.levels = EnumSet.copyOf(levels); retryable = in.readBoolean(); @@ -145,7 +145,7 @@ public class ClusterBlock implements Serializable, Streamable, ToXContent { out.writeString(description); out.writeVInt(levels.size()); for (ClusterBlockLevel level : levels) { - out.writeVInt(level.toId(out.getVersion())); + out.writeVInt(level.id()); } out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index 3ed8d999ad5..dace9e8b4f5 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.block; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.Version; import java.util.EnumSet; @@ -30,16 +29,8 @@ import java.util.EnumSet; public enum ClusterBlockLevel { READ(0), WRITE(1), - - /** - * Since 1.6.0, METADATA has been split into two distincts cluster block levels - * @deprecated Use METADATA_READ or METADATA_WRITE instead. - */ - @Deprecated - METADATA(2), - - METADATA_READ(3), - METADATA_WRITE(4); + METADATA_READ(2), + METADATA_WRITE(3); public static final EnumSet ALL = EnumSet.of(READ, WRITE, METADATA_READ, METADATA_WRITE); public static final EnumSet READ_WRITE = EnumSet.of(READ, WRITE); @@ -54,35 +45,15 @@ public enum ClusterBlockLevel { return this.id; } - /** - * Returns the ClusterBlockLevel's id according to a given version, this to ensure backward compatibility. - * - * @param version the version - * @return the ClusterBlockLevel's id - */ - public int toId(Version version) { - assert version != null : "Version shouldn't be null"; - // Since 1.6.0, METADATA has been split into two distincts cluster block levels - if (version.before(Version.V_1_6_0)) { - if (this == ClusterBlockLevel.METADATA_READ || this == ClusterBlockLevel.METADATA_WRITE) { - return ClusterBlockLevel.METADATA.id(); - } - } - return id(); - } - - static EnumSet fromId(int id) { + static ClusterBlockLevel fromId(int id) { if (id == 0) { - return EnumSet.of(READ); + return READ; } else if (id == 1) { - return EnumSet.of(WRITE); + return WRITE; } else if (id == 2) { - // Since 1.6.0, METADATA has been split into two distincts cluster block levels - return EnumSet.of(METADATA_READ, METADATA_WRITE); + return METADATA_READ; } else if (id == 3) { - return EnumSet.of(METADATA_READ); - } else if (id == 4) { - return EnumSet.of(METADATA_WRITE); + return METADATA_WRITE; } throw new ElasticsearchIllegalArgumentException("No cluster block level matching [" + id + "]"); } diff --git a/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index e704fb3ecbc..efccb3f778c 100644 --- a/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -28,7 +28,6 @@ import org.junit.Test; import java.util.EnumSet; -import static org.elasticsearch.cluster.block.ClusterBlockLevel.*; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; @@ -64,21 +63,7 @@ public class ClusterBlockTests extends ElasticsearchTestCase { assertThat(result.description(), equalTo(clusterBlock.description())); assertThat(result.retryable(), equalTo(clusterBlock.retryable())); assertThat(result.disableStatePersistence(), equalTo(clusterBlock.disableStatePersistence())); - - // This enum set is used to count the expected serialized/deserialized number of blocks - EnumSet expected = EnumSet.noneOf(ClusterBlockLevel.class); - - for (ClusterBlockLevel level : clusterBlock.levels()) { - if (level == METADATA) { - assertTrue(result.levels().contains(METADATA_READ)); - assertTrue(result.levels().contains(METADATA_WRITE)); - } else { - assertTrue(result.levels().contains(level)); - } - - expected.addAll(ClusterBlockLevel.fromId(level.toId(version))); - } - assertThat(result.levels().size(), equalTo(expected.size())); + assertArrayEquals(result.levels().toArray(), clusterBlock.levels().toArray()); } } } From 933edf7bcc410b261485ee0a8861f990f8936fe3 Mon Sep 17 00:00:00 2001 From: Jun Ohtani Date: Fri, 24 Apr 2015 18:00:32 +0900 Subject: [PATCH 161/236] Analysis: Fix wrong position number by analyze API Add breaking chages comment to migrate docs Fix the stopword included text using stopword filter --- docs/reference/migration/migrate_2_0.asciidoc | 4 ++++ .../indices/analyze/TransportAnalyzeAction.java | 2 +- .../indices/analyze/AnalyzeActionTests.java | 12 ++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 4988d05006c..af77ed20ad6 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -410,3 +410,7 @@ a single `expand_wildcards` parameter. See <> The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating systems and the provided start/stop scripts. +=== Analyze API + +The Analyze API return 0 as first Token's position instead of 1. + diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 143670809a7..7cdb026e9fb 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -220,7 +220,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class); TypeAttribute type = stream.addAttribute(TypeAttribute.class); - int position = 0; + int position = -1; while (stream.incrementToken()) { int increment = posIncr.getPositionIncrement(); if (increment > 0) { diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java index 67548a83e3b..7541de9132f 100644 --- a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java +++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java @@ -53,18 +53,22 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { assertThat(token.getTerm(), equalTo("this")); assertThat(token.getStartOffset(), equalTo(0)); assertThat(token.getEndOffset(), equalTo(4)); + assertThat(token.getPosition(), equalTo(0)); token = analyzeResponse.getTokens().get(1); assertThat(token.getTerm(), equalTo("is")); assertThat(token.getStartOffset(), equalTo(5)); assertThat(token.getEndOffset(), equalTo(7)); + assertThat(token.getPosition(), equalTo(1)); token = analyzeResponse.getTokens().get(2); assertThat(token.getTerm(), equalTo("a")); assertThat(token.getStartOffset(), equalTo(8)); assertThat(token.getEndOffset(), equalTo(9)); + assertThat(token.getPosition(), equalTo(2)); token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("test")); assertThat(token.getStartOffset(), equalTo(10)); assertThat(token.getEndOffset(), equalTo(14)); + assertThat(token.getPosition(), equalTo(3)); } } @@ -107,6 +111,14 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { assertThat(token.getTerm(), equalTo("a")); token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("tset")); + + analyzeResponse = client().admin().indices().prepareAnalyze("of course").setTokenizer("standard").setTokenFilters("stop").get(); + assertThat(analyzeResponse.getTokens().size(), equalTo(1)); + assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("course")); + assertThat(analyzeResponse.getTokens().get(0).getPosition(), equalTo(1)); + assertThat(analyzeResponse.getTokens().get(0).getStartOffset(), equalTo(3)); + assertThat(analyzeResponse.getTokens().get(0).getEndOffset(), equalTo(9)); + } @Test From 8b0674c18d65479eec531bd09da22874f2d115d7 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 23 Apr 2015 16:52:43 +0200 Subject: [PATCH 162/236] Tests: Add backward compatibility test for cluster state with blocks --- .../ClusterStateBackwardsCompatTests.java | 70 ++++++++++++++++--- 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java index 3eb69750894..c55681d3fd2 100644 --- a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java @@ -19,16 +19,25 @@ package org.elasticsearch.bwcompat; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; import org.junit.Test; -import static org.hamcrest.Matchers.*; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.hamcrest.Matchers.equalTo; public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsCompatIntegrationTest { @@ -36,13 +45,9 @@ public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsComp public void testClusterState() throws Exception { createIndex("test"); - NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet(); - Settings settings = ImmutableSettings.settingsBuilder().put("client.transport.ignore_cluster_name", true) - .put("node.name", "transport_client_" + getTestName()).build(); - // connect to each node with a custom TransportClient, issue a ClusterStateRequest to test serialization - for (NodeInfo n : nodesInfo.getNodes()) { - try (TransportClient tc = new TransportClient(settings)) { + for (NodeInfo n : clusterNodes()) { + try (TransportClient tc = newTransportClient()) { tc.addTransportAddress(n.getNode().address()); ClusterStateResponse response = tc.admin().cluster().prepareState().execute().actionGet(); @@ -52,4 +57,53 @@ public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsComp } } } + + @Test + public void testClusterStateWithBlocks() { + createIndex("test-blocks"); + + Map blocks = new HashMap<>(); + blocks.put(SETTING_BLOCKS_READ, IndexMetaData.INDEX_READ_BLOCK); + blocks.put(SETTING_BLOCKS_WRITE, IndexMetaData.INDEX_WRITE_BLOCK); + blocks.put(SETTING_BLOCKS_METADATA, IndexMetaData.INDEX_METADATA_BLOCK); + + for (Map.Entry block : blocks.entrySet()) { + try { + enableIndexBlock("test-blocks", block.getKey()); + + for (NodeInfo n : clusterNodes()) { + try (TransportClient tc = newTransportClient()) { + tc.addTransportAddress(n.getNode().address()); + + ClusterStateResponse response = tc.admin().cluster().prepareState().setIndices("test-blocks") + .setBlocks(true).setNodes(false).execute().actionGet(); + + ClusterBlocks clusterBlocks = response.getState().blocks(); + assertNotNull(clusterBlocks); + assertTrue(clusterBlocks.hasIndexBlock("test-blocks", block.getValue())); + + for (ClusterBlockLevel level : block.getValue().levels()) { + assertTrue(clusterBlocks.indexBlocked(level, "test-blocks")); + } + + IndexMetaData indexMetaData = response.getState().getMetaData().getIndices().get("test-blocks"); + assertNotNull(indexMetaData); + assertTrue(indexMetaData.settings().getAsBoolean(block.getKey(), null)); + } + } + } finally { + disableIndexBlock("test-blocks", block.getKey()); + } + } + } + + private NodesInfoResponse clusterNodes() { + return client().admin().cluster().prepareNodesInfo().execute().actionGet(); + } + + private TransportClient newTransportClient() { + Settings settings = ImmutableSettings.settingsBuilder().put("client.transport.ignore_cluster_name", true) + .put("node.name", "transport_client_" + getTestName()).build(); + return new TransportClient(settings); + } } From 78608536e2962a09fe6a1718f7ec71bd2615eab3 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 2 Mar 2015 12:38:51 +0100 Subject: [PATCH 163/236] Java api: SearchRequestBuilder#toString to print out the query without wiping the request source Best effort to print out the search source depending on how it was set to the SearchRequestBuilder, don't call `internalBuilder() as that causes the content of the request to be wiped. Closes #5576 --- .../action/search/SearchRequestBuilder.java | 17 ++- .../search/builder/SearchSourceBuilder.java | 3 +- .../search/SearchRequestBuilderTests.java | 128 ++++++++++++++++++ 3 files changed, 143 insertions(+), 5 deletions(-) create mode 100644 src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 59d6db804b0..e51bbb6234f 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; @@ -28,6 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.ScriptService; @@ -366,9 +368,6 @@ public class SearchRequestBuilder extends ActionRequestBuilder Date: Mon, 2 Mar 2015 12:40:05 +0100 Subject: [PATCH 164/236] Java api: implement toString in CountRequestBuilder Similarly to what SearchRequestBuilder does, we print out a string representation of the query that the count request builder holds. Closes #5555 --- .../action/count/CountRequestBuilder.java | 17 +++ .../action/support/QuerySourceBuilder.java | 12 ++ .../count/CountRequestBuilderTests.java | 127 ++++++++++++++++++ 3 files changed, 156 insertions(+) create mode 100644 src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java diff --git a/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java index 6d1566797c1..b7b6377e619 100644 --- a/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java @@ -19,12 +19,14 @@ package org.elasticsearch.action.count; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; /** @@ -144,4 +146,19 @@ public class CountRequestBuilder extends BroadcastOperationRequestBuilder Date: Tue, 28 Apr 2015 05:25:42 -0400 Subject: [PATCH 165/236] Remove reflection permission for sun.management. This is no longer needed after #10553 --- src/main/resources/org/elasticsearch/bootstrap/security.policy | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index 92056a3ab81..ffc0032d4a0 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -95,8 +95,6 @@ grant { permission java.lang.RuntimePermission "accessClassInPackage.sun.nio.ch"; // needed by groovy engine permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; - // needed to get file descriptor statistics - permission java.lang.RuntimePermission "accessClassInPackage.sun.management"; permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.RuntimePermission "getStackTrace"; From c914134355a47747d2b455f71e3a5772d54fd189 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 8 Apr 2015 13:34:49 +0200 Subject: [PATCH 166/236] Scripting: remove groovy sandbox Groovy sandboxing was disabled by default from 1.4.3 on though since we found out that it could be worked around, so it makes little sense to keep it and maintain it. Closes #10156 Closes #10480 --- docs/reference/migration/migrate_2_0.asciidoc | 5 + docs/reference/modules/scripting.asciidoc | 52 +----- .../ClusterDynamicSettingsModule.java | 2 - .../elasticsearch/script/ScriptService.java | 41 +---- .../GroovySandboxExpressionChecker.java | 170 ------------------ .../groovy/GroovyScriptEngineService.java | 50 +----- .../script/GroovySandboxScriptTests.java | 159 ---------------- .../script/NativeScriptTests.java | 3 +- .../script/SandboxDisabledTests.java | 57 ------ .../script/ScriptServiceTests.java | 3 +- .../scriptfilter/ScriptFilterSearchTests.java | 2 - .../search/timeout/SearchTimeoutTests.java | 3 +- 12 files changed, 15 insertions(+), 532 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java delete mode 100644 src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java delete mode 100644 src/test/java/org/elasticsearch/script/SandboxDisabledTests.java diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index af77ed20ad6..f4dc3e506ec 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -345,6 +345,11 @@ Deprecated script parameters `id`, `file`, and `scriptField` have been removed from all scriptable APIs. `script_id`, `script_file` and `script` should be used in their place. +=== Groovy scripts sandbox + +The groovy sandbox and related settings have been removed. Groovy is now a non +sandboxed scripting language, without any option to turn the sandbox on. + === Plugins making use of scripts Plugins that make use of scripts must register their own script context through diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 2725e05a6e0..5d198520e87 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -11,26 +11,11 @@ The scripting module uses by default http://groovy.codehaus.org/[groovy] scripting language with some extensions. Groovy is used since it is extremely fast and very simple to use. -.Groovy dynamic scripting disabled by default from v1.4.3 +.Groovy dynamic scripting off by default from v1.4.3 [IMPORTANT] =================================================== -Elasticsearch versions 1.3.0-1.3.7 and 1.4.0-1.4.2 have a vulnerability in the -Groovy scripting engine. The vulnerability allows an attacker to construct -Groovy scripts that escape the sandbox and execute shell commands as the user -running the Elasticsearch Java VM. - -If you are running a vulnerable version of Elasticsearch, you should either -upgrade to at least v1.3.8 or v1.4.3, or disable dynamic Groovy scripts by -adding this setting to the `config/elasticsearch.yml` file in all nodes in the -cluster: - -[source,yaml] ------------------------------------ -script.groovy.sandbox.enabled: false ------------------------------------ - -This will turn off the Groovy sandbox, thus preventing dynamic Groovy scripts +Groovy dynamic scripting is off by default, preventing dynamic Groovy scripts from being accepted as part of a request or retrieved from the special `.scripts` index. You will still be able to use Groovy scripts stored in files in the `config/scripts/` directory on every node. @@ -351,39 +336,6 @@ The default scripting language (assuming no `lang` parameter is provided) is `groovy`. In order to change it, set the `script.default_lang` to the appropriate language. -[float] -=== Groovy Sandboxing - -Elasticsearch sandboxes Groovy scripts that are compiled and executed in order -to ensure they don't perform unwanted actions. There are a number of options -that can be used for configuring this sandbox: - -`script.groovy.sandbox.receiver_whitelist`:: - - Comma-separated list of string classes for objects that may have methods - invoked. - -`script.groovy.sandbox.package_whitelist`:: - - Comma-separated list of packages under which new objects may be constructed. - -`script.groovy.sandbox.class_whitelist`:: - - Comma-separated list of classes that are allowed to be constructed. - -`script.groovy.sandbox.method_blacklist`:: - - Comma-separated list of methods that are never allowed to be invoked, - regardless of target object. - -`script.groovy.sandbox.enabled`:: - - Flag to enable the sandbox (defaults to `false` meaning the sandbox is - disabled). - -When specifying whitelist or blacklist settings for the groovy sandbox, all -options replace the current whitelist, they are not additive. - [float] === Automatic Script Reloading diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java index 0d485a6932a..49cb3c56e81 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java @@ -33,7 +33,6 @@ import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.threadpool.ThreadPool; /** @@ -101,7 +100,6 @@ public class ClusterDynamicSettingsModule extends AbstractModule { clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - clusterDynamicSettings.addDynamicSetting(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH); } public void addDynamicSettings(String... settings) { diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index 1eb6ff166bb..cbe38f12958 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -58,7 +58,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; @@ -100,7 +99,6 @@ public class ScriptService extends AbstractComponent implements Closeable { private final Cache cache; private final Path scriptsDirectory; - private final FileWatcher fileWatcher; private final ScriptModes scriptModes; private final ScriptContextRegistry scriptContextRegistry; @@ -114,7 +112,7 @@ public class ScriptService extends AbstractComponent implements Closeable { @Inject public ScriptService(Settings settings, Environment env, Set scriptEngines, - ResourceWatcherService resourceWatcherService, NodeSettingsService nodeSettingsService, ScriptContextRegistry scriptContextRegistry) throws IOException { + ResourceWatcherService resourceWatcherService, ScriptContextRegistry scriptContextRegistry) throws IOException { super(settings); if (Strings.hasLength(settings.get(DISABLE_DYNAMIC_SCRIPTING_SETTING))) { @@ -159,7 +157,7 @@ public class ScriptService extends AbstractComponent implements Closeable { if (logger.isTraceEnabled()) { logger.trace("Using scripts directory [{}] ", scriptsDirectory); } - this.fileWatcher = new FileWatcher(scriptsDirectory); + FileWatcher fileWatcher = new FileWatcher(scriptsDirectory); fileWatcher.addListener(new ScriptChangesListener()); if (settings.getAsBoolean(SCRIPT_AUTO_RELOAD_ENABLED_SETTING, true)) { @@ -169,7 +167,6 @@ public class ScriptService extends AbstractComponent implements Closeable { // automatic reload is disable just load scripts once fileWatcher.init(); } - nodeSettingsService.addListener(new ApplySettings()); } //This isn't set in the ctor because doing so creates a guice circular @@ -183,21 +180,6 @@ public class ScriptService extends AbstractComponent implements Closeable { IOUtils.close(scriptEngines); } - /** - * Clear both the in memory and on disk compiled script caches. Files on - * disk will be treated as if they are new and recompiled. - * */ - public void clearCache() { - logger.debug("clearing script cache"); - // Clear the in-memory script caches - this.cache.invalidateAll(); - this.cache.cleanUp(); - // Clear the cache of on-disk scripts - this.staticCache.clear(); - // Clear the file watcher's state so it re-compiles on-disk scripts - this.fileWatcher.clearState(); - } - private ScriptEngineService getScriptEngineServiceForLang(String lang) { ScriptEngineService scriptEngineService = scriptEnginesByLang.get(lang); if (scriptEngineService == null) { @@ -642,23 +624,4 @@ public class ScriptService extends AbstractComponent implements Closeable { } } } - - private class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEnginesByLang.get(GroovyScriptEngineService.NAME); - if (engine != null) { - String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY); - boolean blacklistChanged = engine.addToBlacklist(patches); - if (blacklistChanged) { - logger.info("adding {} to [{}], new blacklisted methods: {}", patches, - GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions()); - engine.reloadConfig(); - // Because the GroovyScriptEngineService knows nothing about the - // cache, we need to clear it here if the setting changes - ScriptService.this.clearCache(); - } - } - } - } } diff --git a/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java b/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java deleted file mode 100644 index aa7ca8db65c..00000000000 --- a/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script.groovy; - -import com.google.common.collect.ImmutableSet; -import org.codehaus.groovy.ast.ClassNode; -import org.codehaus.groovy.ast.expr.*; -import org.codehaus.groovy.control.customizers.SecureASTCustomizer; -import org.elasticsearch.common.settings.Settings; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -import static com.google.common.collect.Lists.newArrayList; - -/** - * Class used to determine whether a Groovy expression should be allowed. - * During compilation, every expression is passed to the - * isAuthorized method, which returns true to allow that method - * and false to block it. Includes all of the sandbox-related whitelist and - * blacklist options. - */ -public class GroovySandboxExpressionChecker implements SecureASTCustomizer.ExpressionChecker { - - public static String GROOVY_SANDBOX_METHOD_BLACKLIST = "script.groovy.sandbox.method_blacklist"; - public static String GROOVY_SANDBOX_PACKAGE_WHITELIST = "script.groovy.sandbox.package_whitelist"; - public static String GROOVY_SANDBOX_CLASS_WHITELIST = "script.groovy.sandbox.class_whitelist"; - public static String GROOVY_SCRIPT_SANDBOX_RECEIVER_WHITELIST = "script.groovy.sandbox.receiver_whitelist"; - - private final Set methodBlacklist; - private final Set additionalMethodBlacklist; - private final Set packageWhitelist; - private final Set classWhitelist; - - public GroovySandboxExpressionChecker(Settings settings, Set blacklistAdditions) { - this.methodBlacklist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_METHOD_BLACKLIST, defaultMethodBlacklist, true)); - this.additionalMethodBlacklist = ImmutableSet.copyOf(blacklistAdditions); - this.packageWhitelist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_PACKAGE_WHITELIST, defaultPackageWhitelist, true)); - this.classWhitelist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_CLASS_WHITELIST, defaultClassConstructionWhitelist, true)); - } - - // Never allow calling these methods, regardless of the object type - public static String[] defaultMethodBlacklist = new String[]{ - "getClass", - "class", - "forName", - "wait", - "notify", - "notifyAll", - "invokeMethod", - "finalize" - }; - - // Only instances of these classes in these packages can be instantiated - public static String[] defaultPackageWhitelist = new String[] {"java.util", "java.lang", "org.joda.time"}; - - // Classes that are allowed to be constructed - public static String[] defaultClassConstructionWhitelist = new String[]{ - java.util.Date.class.getName(), - java.util.Map.class.getName(), - java.util.List.class.getName(), - java.util.Set.class.getName(), - java.util.ArrayList.class.getName(), - java.util.Arrays.class.getName(), - java.util.HashMap.class.getName(), - java.util.HashSet.class.getName(), - java.util.UUID.class.getName(), - java.math.BigDecimal.class.getName(), - org.joda.time.DateTime.class.getName(), - org.joda.time.DateTimeZone.class.getName() - }; - - // Default whitelisted receiver classes for the Groovy sandbox - private final static String[] defaultReceiverWhitelist = new String [] { - groovy.util.GroovyCollections.class.getName(), - java.lang.Math.class.getName(), - java.lang.Integer.class.getName(), "[I", "[[I", "[[[I", - java.lang.Float.class.getName(), "[F", "[[F", "[[[F", - java.lang.Double.class.getName(), "[D", "[[D", "[[[D", - java.lang.Long.class.getName(), "[J", "[[J", "[[[J", - java.lang.Short.class.getName(), "[S", "[[S", "[[[S", - java.lang.Character.class.getName(), "[C", "[[C", "[[[C", - java.lang.Byte.class.getName(), "[B", "[[B", "[[[B", - java.lang.Boolean.class.getName(), "[Z", "[[Z", "[[[Z", - java.math.BigDecimal.class.getName(), - java.util.Arrays.class.getName(), - java.util.Date.class.getName(), - java.util.List.class.getName(), - java.util.Map.class.getName(), - java.util.Set.class.getName(), - java.lang.Object.class.getName(), - org.joda.time.DateTime.class.getName(), - org.joda.time.DateTimeUtils.class.getName(), - org.joda.time.DateTimeZone.class.getName(), - org.joda.time.Instant.class.getName() - }; - - /** - * Checks whether the expression to be compiled is allowed - */ - @Override - public boolean isAuthorized(Expression expression) { - if (expression instanceof MethodPointerExpression) { - return false; - } else if (expression instanceof MethodCallExpression) { - MethodCallExpression mce = (MethodCallExpression) expression; - String methodName = mce.getMethodAsString(); - if (methodBlacklist.contains(methodName)) { - return false; - } else if (additionalMethodBlacklist.contains(methodName)) { - return false; - } else if (methodName == null && mce.getMethod() instanceof GStringExpression) { - // We do not allow GStrings for method invocation, they are a security risk - return false; - } - } else if (expression instanceof ConstructorCallExpression) { - ConstructorCallExpression cce = (ConstructorCallExpression) expression; - ClassNode type = cce.getType(); - if (!packageWhitelist.contains(type.getPackageName())) { - return false; - } - if (!classWhitelist.contains(type.getName())) { - return false; - } - } - return true; - } - - /** - * Returns a customized ASTCustomizer that includes the whitelists and - * expression checker. - */ - public static SecureASTCustomizer getSecureASTCustomizer(Settings settings, Set blacklistAdditions) { - SecureASTCustomizer scz = new SecureASTCustomizer(); - // Closures are allowed - scz.setClosuresAllowed(true); - // But defining methods is not - scz.setMethodDefinitionAllowed(false); - // Only allow the imports that we explicitly call out - List importWhitelist = new ArrayList<>(); - importWhitelist.addAll(ImmutableSet.copyOf(GroovySandboxExpressionChecker.defaultClassConstructionWhitelist)); - scz.setImportsWhitelist(importWhitelist); - // Package definitions are not allowed - scz.setPackageAllowed(false); - // White-listed receivers of method calls - String[] receiverWhitelist = settings.getAsArray(GROOVY_SCRIPT_SANDBOX_RECEIVER_WHITELIST, defaultReceiverWhitelist, true); - scz.setReceiversWhiteList(newArrayList(receiverWhitelist)); - // Add the customized expression checker for finer-grained checking - scz.addExpressionCheckers(new GroovySandboxExpressionChecker(settings, blacklistAdditions)); - return scz; - } -} diff --git a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 6e0e6aef518..96d63f13cc8 100644 --- a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -22,9 +22,6 @@ package org.elasticsearch.script.groovy; import groovy.lang.Binding; import groovy.lang.GroovyClassLoader; import groovy.lang.Script; - -import com.google.common.collect.ImmutableSet; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.codehaus.groovy.ast.ClassCodeExpressionTransformer; @@ -40,28 +37,18 @@ import org.codehaus.groovy.control.customizers.CompilationCustomizer; import org.codehaus.groovy.control.customizers.ImportCustomizer; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.ScoreAccessor; -import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.SearchScript; +import org.elasticsearch.script.*; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.math.BigDecimal; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; -import java.util.Set; import java.util.concurrent.atomic.AtomicLong; /** @@ -70,47 +57,17 @@ import java.util.concurrent.atomic.AtomicLong; public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "groovy"; - public static String GROOVY_SCRIPT_SANDBOX_ENABLED = "script.groovy.sandbox.enabled"; - public static String GROOVY_SCRIPT_BLACKLIST_PATCH = "script.groovy.sandbox.method_blacklist_patch"; - private final AtomicLong counter = new AtomicLong(); - private final boolean sandboxed; - private volatile GroovyClassLoader loader; - private volatile Set blacklistAdditions; + private final GroovyClassLoader loader; @Inject public GroovyScriptEngineService(Settings settings) { super(settings); - this.sandboxed = settings.getAsBoolean(GROOVY_SCRIPT_SANDBOX_ENABLED, false); - this.blacklistAdditions = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY)); - reloadConfig(); - } - - public Set blacklistAdditions() { - return this.blacklistAdditions; - } - - /** - * Appends the additional blacklisted methods to the current blacklist, - * returns true if the black list has changed - */ - public boolean addToBlacklist(String... additions) { - Set newBlackList = new HashSet<>(blacklistAdditions); - Collections.addAll(newBlackList, additions); - boolean changed = this.blacklistAdditions.equals(newBlackList) == false; - this.blacklistAdditions = ImmutableSet.copyOf(newBlackList); - return changed; - } - - public void reloadConfig() { ImportCustomizer imports = new ImportCustomizer(); imports.addStarImports("org.joda.time"); imports.addStaticStars("java.lang.Math"); CompilerConfiguration config = new CompilerConfiguration(); config.addCompilationCustomizers(imports); - if (this.sandboxed) { - config.addCompilationCustomizers(GroovySandboxExpressionChecker.getSecureASTCustomizer(settings, this.blacklistAdditions)); - } // Add BigDecimal -> Double transformer config.addCompilationCustomizers(new GroovyBigDecimalTransformer(CompilePhase.CONVERSION)); this.loader = new GroovyClassLoader(settings.getClassLoader(), config); @@ -148,7 +105,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public boolean sandboxed() { - return this.sandboxed; + return false; } @Override @@ -360,5 +317,4 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri return super.transform(newExpr); } } - } diff --git a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java b/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java deleted file mode 100644 index c575d9152a1..00000000000 --- a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.equalTo; - -/** - * Tests for the Groovy scripting sandbox - */ -@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) -public class GroovySandboxScriptTests extends ElasticsearchIntegrationTest { - - @Test - public void testSandboxedGroovyScript() throws Exception { - int nodes = randomIntBetween(1, 3); - Settings nodeSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, true) - .build(); - internalCluster().startNodesAsync(nodes, nodeSettings).get(); - client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get(); - - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - - // Plain test - testSuccess(""); - // List - testSuccess("def list = [doc['foo'].value, 3, 4]; def v = list.get(1); list.add(10)"); - // Ranges - testSuccess("def range = 1..doc['foo'].value; def v = range.get(0)"); - // Maps - testSuccess("def v = doc['foo'].value; def m = [:]; m.put(\\\"value\\\", v)"); - // Times - testSuccess("def t = Instant.now().getMillis()"); - // GroovyCollections - testSuccess("def n = [1,2,3]; GroovyCollections.max(n)"); - - // Fail cases - testFailure("pr = Runtime.getRuntime().exec(\\\"touch /tmp/gotcha\\\"); pr.waitFor()", - "Method calls not allowed on [java.lang.Runtime]"); - - testFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\\\"plus\\\").setAccessible(true)", - "Expression [MethodCallExpression] is not allowed: d.getClass()"); - - testFailure("d = new DateTime(); d.\\\"${'get' + 'Class'}\\\"()." + - "\\\"${'getDeclared' + 'Method'}\\\"(\\\"now\\\").\\\"${'set' + 'Accessible'}\\\"(false)", - "Expression [MethodCallExpression] is not allowed: d.$(get + Class)().$(getDeclared + Method)(now).$(set + Accessible)(false)"); - - testFailure("Class.forName(\\\"DateTime\\\").getDeclaredMethod(\\\"plus\\\").setAccessible(true)", - "Expression [MethodCallExpression] is not allowed: java.lang.Class.forName(DateTime)"); - - testFailure("Eval.me('2 + 2')", "Method calls not allowed on [groovy.util.Eval]"); - - testFailure("Eval.x(5, 'x + 2')", "Method calls not allowed on [groovy.util.Eval]"); - - testFailure("t = new java.util.concurrent.ThreadPoolExecutor(2, 2, 0L, TimeUnit.MILLISECONDS, " + - "new java.util.concurrent.LinkedBlockingQueue()); t.execute({ println 5 })", - "Expression [ConstructorCallExpression] is not allowed: new java.util.concurrent.ThreadPoolExecutor"); - - testFailure("d = new Date(); java.lang.reflect.Field f = Date.class.getDeclaredField(\\\"fastTime\\\");" + - " f.setAccessible(true); f.get(\\\"fastTime\\\")", - "Method calls not allowed on [java.lang.reflect.Field]"); - - testFailure("t = new Thread({ println 3 }); t.start(); t.join()", - "Expression [ConstructorCallExpression] is not allowed: new java.lang.Thread"); - - testFailure("Thread.start({ println 4 })", "Method calls not allowed on [java.lang.Thread]"); - - testFailure("import java.util.concurrent.ThreadPoolExecutor;", - "Importing [java.util.concurrent.ThreadPoolExecutor] is not allowed"); - - testFailure("s = new java.net.URL();", "Expression [ConstructorCallExpression] is not allowed: new java.net.URL()"); - - testFailure("def methodName = 'ex'; Runtime.\\\"${'get' + 'Runtime'}\\\"().\\\"${methodName}ec\\\"(\\\"touch /tmp/gotcha2\\\")", - "Expression [MethodCallExpression] is not allowed: java.lang.Runtime.$(get + Runtime)().$methodNameec(touch /tmp/gotcha2)"); - - testFailure("def c = [doc['foo'].value, 3, 4].&size; c()", - "Expression [MethodPointerExpression] is not allowed"); - - testFailure("[doc['foo'].value, 3, 4].invokeMethod([1,2],\\\"size\\\", new Object[0])", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].invokeMethod([1, 2], size, [])"); - } - - @Test - public void testDynamicBlacklist() throws Exception { - int nodes = randomIntBetween(1, 3); - Settings nodeSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, true) - .build(); - internalCluster().startNodesAsync(nodes, nodeSettings).get(); - client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get(); - - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - - testSuccess("[doc['foo'].value, 3, 4].isEmpty()"); - testSuccess("[doc['foo'].value, 3, 4].size()"); - - // Now we blacklist two methods, .isEmpty() and .size() - Settings blacklistSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, "isEmpty,size") - .build(); - - client().admin().cluster().prepareUpdateSettings().setTransientSettings(blacklistSettings).get(); - - testFailure("[doc['foo'].value, 3, 4].isEmpty()", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].isEmpty()"); - testFailure("[doc['foo'].value, 3, 4].size()", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].size()"); - } - - public void testSuccess(String script) { - logger.info("--> script: " + script); - SearchResponse resp = client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \""+ script + - "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - assertNoFailures(resp); - assertThat(resp.getHits().getAt(0).getSortValues(), equalTo(new Object[]{7.0})); - } - - public void testFailure(String script, String failMessage) { - logger.info("--> script: " + script); - try { - client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \""+ script + - "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - fail("script: " + script + " failed to be caught be the sandbox!"); - } catch (SearchPhaseExecutionException e) { - assertThat("script failed, but with incorrect message: " + e.toString(), e.toString().contains(failMessage), equalTo(true)); - } - } -} diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java index f08eb02e822..951f605801e 100644 --- a/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -81,7 +80,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { nativeScriptFactoryMap.put("my", new MyNativeScriptFactory()); Set scriptEngineServices = ImmutableSet.of(new NativeScriptEngineService(settings, nativeScriptFactoryMap)); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Lists.newArrayList()); - ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, new NodeSettingsService(settings), scriptContextRegistry); + ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { assertThat(scriptService.compile(new Script(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, null), scriptContext), notNullValue()); diff --git a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java b/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java deleted file mode 100644 index 0799fa0ddbd..00000000000 --- a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.hamcrest.Matchers.containsString; - -/** - * Test that a system where the sandbox is disabled while dynamic scripting is - * also disabled does not allow a script to be sent - */ -@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE) -public class SandboxDisabledTests extends ElasticsearchIntegrationTest { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false) - .put("script.inline", false).build(); - } - - @Test - public void testScriptingDisabledWhileSandboxDisabled() { - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - try { - client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \"doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - fail("shards should fail because the sandbox and dynamic scripting are disabled"); - } catch (Exception e) { - assertThat(e.toString(), containsString("scripts of type [inline], operation [search] and lang [groovy] are disabled")); - } - } -} diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index deff1b891c4..0493203e7a5 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.expression.ExpressionScriptEngineService; import org.elasticsearch.script.groovy.GroovyScriptEngineService; @@ -103,7 +102,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = ImmutableSettings.builder().put(baseSettings).put(additionalSettings).build(); Environment environment = new Environment(finalSettings); - scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, new NodeSettingsService(finalSettings), scriptContextRegistry) { + scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { @Override String getScriptFromIndex(String scriptLang, String id) { //mock the script that gets retrieved from an index diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java index 6b199d9970a..2e83e4ef3ba 100644 --- a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -50,7 +49,6 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false) // aggressive filter caching so that we can assert on the number of iterations of the script filters .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) diff --git a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java index c9abce165fc..2ef5d279c10 100644 --- a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java +++ b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.timeout; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -38,7 +37,7 @@ public class SearchTimeoutTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false).build(); + return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).build(); } @Test From a1289b4ad5202aa64b12c5cd0a952bfc90ab3cd8 Mon Sep 17 00:00:00 2001 From: minde-eagleeye Date: Mon, 27 Apr 2015 16:53:39 +0100 Subject: [PATCH 167/236] Docs: Update cluster.asciidoc added a missing comma in one of examples Closes #10834 --- docs/reference/modules/cluster.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc index 12d4c7443fd..25a88b2eeeb 100644 --- a/docs/reference/modules/cluster.asciidoc +++ b/docs/reference/modules/cluster.asciidoc @@ -227,7 +227,7 @@ several attributes, for example: [source,js] -------------------------------------------------- curl -XPUT localhost:9200/test/_settings -d '{ - "index.routing.allocation.include.group1" : "xxx" + "index.routing.allocation.include.group1" : "xxx", "index.routing.allocation.include.group2" : "yyy", "index.routing.allocation.exclude.group3" : "zzz", "index.routing.allocation.require.group4" : "aaa" From 54f2a91aff76be56352d12b1e530f7bab85e00d2 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 28 Apr 2015 13:09:00 +0300 Subject: [PATCH 168/236] Test: testMinimumMasterNodes should use at least min_master_nodes unicast hosts --- .../elasticsearch/discovery/ZenUnicastDiscoveryTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java index 430690ae146..f265869ec75 100644 --- a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java @@ -81,8 +81,9 @@ public class ZenUnicastDiscoveryTests extends ElasticsearchIntegrationTest { // can't be satisfied. public void testMinimumMasterNodes() throws Exception { int currentNumNodes = randomIntBetween(3, 5); - int currentNumOfUnicastHosts = randomIntBetween(1, currentNumNodes); - final Settings settings = ImmutableSettings.settingsBuilder().put("discovery.zen.minimum_master_nodes", currentNumNodes / 2 + 1).build(); + final int min_master_nodes = currentNumNodes / 2 + 1; + int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes); + final Settings settings = ImmutableSettings.settingsBuilder().put("discovery.zen.minimum_master_nodes", min_master_nodes).build(); discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings); List nodes = internalCluster().startNodesAsync(currentNumNodes).get(); From b8caa52e7c329e679935e26f554e7180df7b16d9 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 28 Apr 2015 14:24:51 +0200 Subject: [PATCH 169/236] [TEST] make CountRequestBuilder#toString and SearchRequestBuilder#toString tests non platform dependent --- .../elasticsearch/action/count/CountRequestBuilderTests.java | 3 ++- .../elasticsearch/action/search/SearchRequestBuilderTests.java | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java index 7847f2abefa..22387d277e8 100644 --- a/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java +++ b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java @@ -35,6 +35,7 @@ import org.junit.Test; import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class CountRequestBuilderTests extends ElasticsearchTestCase { @@ -72,7 +73,7 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); String query = "{ \"match_all\" : {} }"; countRequestBuilder.setQuery(new BytesArray(query)); - assertThat(countRequestBuilder.toString(), equalTo("{\n \"query\":{ \"match_all\" : {} }\n}")); + assertThat(countRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); } @Test diff --git a/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index 415afd1ffad..57a48bbbcc0 100644 --- a/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -34,6 +34,7 @@ import org.junit.Test; import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class SearchRequestBuilderTests extends ElasticsearchTestCase { @@ -83,7 +84,7 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); String query = "{ \"match_all\" : {} }"; searchRequestBuilder.setQuery(query); - assertThat(searchRequestBuilder.toString(), equalTo("{\n \"query\":{ \"match_all\" : {} }\n}")); + assertThat(searchRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); } @Test From 4f0492a780bc6154703f4da30efcce40adbfbc64 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 27 Apr 2015 18:58:01 +0200 Subject: [PATCH 170/236] [TEST] Run tests with 1 or 2 nodes by default This commit adds support for running with only one node and sets the maximum number of nodes to 3 by default. if run with test.nighly=true at most 6 nodes are used. This gave a 20% speed improvement compared to the previoulys minimum number of nodes of 3. --- .../elasticsearch/action/IndicesRequestTests.java | 2 +- .../OldIndexBackwardsCompatibilityTests.java | 1 + .../cluster/ack/AckClusterUpdateSettingsTests.java | 2 +- .../java/org/elasticsearch/cluster/ack/AckTests.java | 4 +--- .../cluster/allocation/AwarenessAllocationTests.java | 2 +- .../cluster/shards/ClusterSearchShardsTests.java | 2 +- .../index/suggest/stats/SuggestStatsTests.java | 2 ++ .../settings/UpdateNumberOfReplicasTests.java | 1 + .../indices/state/SimpleIndexStateTests.java | 1 + .../indices/template/SimpleIndexTemplateTests.java | 9 +++++---- .../aggregations/bucket/SignificantTermsTests.java | 4 ++-- .../aggregations/metrics/CardinalityTests.java | 4 ++-- .../search/basic/SearchWhileRelocatingTests.java | 2 +- .../basic/SearchWithRandomExceptionsTests.java | 2 +- .../search/preference/SearchPreferenceTests.java | 1 + .../elasticsearch/search/stats/SearchStatsTests.java | 1 + .../elasticsearch/snapshots/RepositoriesTests.java | 2 ++ .../test/ElasticsearchIntegrationTest.java | 12 ++++++------ .../org/elasticsearch/test/InternalTestCluster.java | 5 ++--- .../test/rest/ElasticsearchRestTestCase.java | 5 ++++- 20 files changed, 37 insertions(+), 27 deletions(-) diff --git a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java index 90118aab371..a62b20cbc99 100644 --- a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java +++ b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java @@ -117,7 +117,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.*; -@ClusterScope(scope = Scope.SUITE, numClientNodes = 1) +@ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) @Slow public class IndicesRequestTests extends ElasticsearchIntegrationTest { diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 206bc4469df..8ed550f1a98 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -72,6 +72,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +// needs at least 2 nodes since it bumps replicas to 1 @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) @LuceneTestCase.SuppressFileSystems("ExtrasFS") @LuceneTestCase.Slow diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java index 3305469c780..717aa891c6c 100644 --- a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java @@ -42,7 +42,7 @@ import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@ClusterScope(scope = TEST) +@ClusterScope(scope = TEST, minNumDataNodes = 2) public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java index e1da078e3eb..00724aada21 100644 --- a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java +++ b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; @@ -52,11 +51,10 @@ import org.junit.Test; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.*; -@ClusterScope(scope = SUITE) +@ClusterScope(minNumDataNodes = 2) public class AckTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java index 2ffd88b978e..b01e16f1ca8 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ -@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes =0) +@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes =0, minNumDataNodes = 2) public class AwarenessAllocationTests extends ElasticsearchIntegrationTest { private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class); diff --git a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java index 013c4a728e1..f337bf5b67c 100644 --- a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java +++ b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java @@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ -@ClusterScope(scope= Scope.SUITE, numDataNodes =2) +@ClusterScope(scope= Scope.SUITE, numDataNodes = 2) public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java b/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java index 1273b255fc1..18a9e382d9f 100644 --- a/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java +++ b/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java @@ -39,12 +39,14 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.*; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SuggestStatsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java index f98a3c3a5d3..c7e047265c0 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java @@ -38,6 +38,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java index 962a7eb028d..bb88f994bae 100644 --- a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java +++ b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java @@ -44,6 +44,7 @@ import static org.hamcrest.Matchers.nullValue; /** * */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SimpleIndexStateTests extends ElasticsearchIntegrationTest { private final ESLogger logger = Loggers.getLogger(SimpleIndexStateTests.class); diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 1c3f8f8c9ca..3d19218495b 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -70,6 +70,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().admin().indices().preparePutTemplate("template_1") .setTemplate("te*") + .setSettings(indexSettings()) .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", "yes").endObject() @@ -79,6 +80,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().admin().indices().preparePutTemplate("template_2") .setTemplate("test*") + .setSettings(indexSettings()) .setOrder(1) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field2").field("type", "string").field("store", "no").endObject() @@ -88,6 +90,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { // test create param assertThrows(client().admin().indices().preparePutTemplate("template_2") .setTemplate("test*") + .setSettings(indexSettings()) .setCreate(true) .setOrder(1) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -103,8 +106,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { // index something into test_index, will match on both templates client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - + ensureGreen(); SearchResponse searchResponse = client().prepareSearch("test_index") .setQuery(termQuery("field1", "value1")) .addField("field1").addField("field2") @@ -116,8 +118,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - + ensureGreen(); // now only match on one template (template_1) searchResponse = client().prepareSearch("text_index") .setQuery(termQuery("field1", "value1")) diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java index 742fd2cd1fa..db2d86c742a 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java @@ -65,8 +65,8 @@ public class SignificantTermsTests extends ElasticsearchIntegrationTest { @Override public Settings indexSettings() { return ImmutableSettings.builder() - .put("index.number_of_shards", between(1, 5)) - .put("index.number_of_replicas", between(0, 1)) + .put("index.number_of_shards", numberOfShards()) + .put("index.number_of_replicas", numberOfReplicas()) .build(); } diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java index 623143a167b..21c3e702e9d 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java @@ -46,8 +46,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Override public Settings indexSettings() { return ImmutableSettings.builder() - .put("index.number_of_shards", between(1, 5)) - .put("index.number_of_replicas", between(0, 1)) + .put("index.number_of_shards", numberOfShards()) + .put("index.number_of_replicas", numberOfReplicas()) .build(); } diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java index a5024920877..f11f72292e7 100644 --- a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java +++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.basic; -import com.carrotsearch.randomizedtesting.annotations.Nightly; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -43,6 +42,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTi import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest { // @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " + diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java index a759fb7f1c3..b4c26527639 100644 --- a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java +++ b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java @@ -94,7 +94,7 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes if (createIndexWithoutErrors) { Builder settings = settingsBuilder() - .put("index.number_of_replicas", randomIntBetween(0, 1)); + .put("index.number_of_replicas", numberOfReplicas()); logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); client().admin().indices().prepareCreate("test") .setSettings(settings) diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java index 98478e67b24..e3215ddd2b4 100644 --- a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java +++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java @@ -36,6 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.*; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchPreferenceTests extends ElasticsearchIntegrationTest { @Test // see #2896 diff --git a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java index bfc9684cb18..55e30467863 100644 --- a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java +++ b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java @@ -46,6 +46,7 @@ import static org.hamcrest.Matchers.*; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchStatsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java index 8f86ec572a0..171ce1c3481 100644 --- a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java +++ b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; +import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.nio.file.Path; @@ -48,6 +49,7 @@ import static org.hamcrest.Matchers.notNullValue; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class RepositoriesTests extends AbstractSnapshotTests { @Test diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 1c0c11bb5cd..64ab72bd1e6 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1503,16 +1503,16 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase int numDataNodes() default -1; /** - * Returns the minimum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MIN_NUM_DATA_NODES}. + * Returns the minimum number of nodes in the cluster. Default is -1. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ - int minNumDataNodes() default InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES; + int minNumDataNodes() default -1; /** - * Returns the maximum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MAX_NUM_DATA_NODES}. + * Returns the maximum number of nodes in the cluster. Default is -1. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ - int maxNumDataNodes() default InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES; + int maxNumDataNodes() default -1; /** * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a @@ -1613,12 +1613,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase private int getMinNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass()); - return annotation == null ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes(); + return annotation == null || annotation.minNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes(); } private int getMaxNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass()); - return annotation == null ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes(); + return annotation == null || annotation.maxNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes(); } private int getNumClientNodes() { diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 660d228a43e..70bc6b99c11 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -181,8 +181,8 @@ public final class InternalTestCluster extends TestCluster { private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); - static final int DEFAULT_MIN_NUM_DATA_NODES = 2; - static final int DEFAULT_MAX_NUM_DATA_NODES = 6; + static final int DEFAULT_MIN_NUM_DATA_NODES = 1; + static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; static final int DEFAULT_NUM_CLIENT_NODES = -1; static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0; @@ -237,7 +237,6 @@ public final class InternalTestCluster extends TestCluster { super(clusterSeed); this.baseDir = baseDir; this.clusterName = clusterName; - if (minNumDataNodes < 0 || maxNumDataNodes < 0) { throw new IllegalArgumentException("minimum and maximum number of data nodes must be >= 0"); } diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index 78a29610d33..dd6ae14612d 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -247,7 +247,10 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration @Override protected int maximumNumberOfReplicas() { - return 1; // never go crazy in the REST tests + // hardcoded 1 since this is what clients also do and our tests must expect that we have only node + // with replicas set to 1 ie. the cluster won't be green + return 1; + } /** From d7b7614f6fc960a947437007398464bf6a673123 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 28 Apr 2015 08:38:57 -0400 Subject: [PATCH 171/236] Use http for lucene snapshot downloads --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index cdb74c02c0d..fa3d5813d58 100644 --- a/pom.xml +++ b/pom.xml @@ -73,7 +73,7 @@ lucene-snapshots Lucene Snapshots - https://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision} + http://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision} From cb89a14010a7a9240459083ce2986b63b9ed3baf Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 27 Apr 2015 20:47:52 -0400 Subject: [PATCH 172/236] Add default to field_value_factor field_value_factor now takes a default that is used if the document doesn't have a value for that field. It looks like: "field_value_factor": { "field": "popularity", "missing": 1 } Closes #10841 --- .../queries/function-score-query.asciidoc | 5 +++- .../function/FieldValueFactorFunction.java | 30 +++++++++++++------ .../FieldValueFactorFunctionBuilder.java | 13 ++++++++ .../FieldValueFactorFunctionParser.java | 8 +++-- .../FunctionScoreFieldValueTests.java | 8 +++++ 5 files changed, 52 insertions(+), 12 deletions(-) diff --git a/docs/reference/query-dsl/queries/function-score-query.asciidoc b/docs/reference/query-dsl/queries/function-score-query.asciidoc index 32b7c0c386c..8b742bb088d 100644 --- a/docs/reference/query-dsl/queries/function-score-query.asciidoc +++ b/docs/reference/query-dsl/queries/function-score-query.asciidoc @@ -175,7 +175,8 @@ doing so would look like: "field_value_factor": { "field": "popularity", "factor": 1.2, - "modifier": "sqrt" + "modifier": "sqrt", + "missing": 1 } -------------------------------------------------- @@ -193,6 +194,8 @@ There are a number of options for the `field_value_factor` function: |`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`, `log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`. Defaults to `none`. +|`missing` |Value used if the document doesn't have that field. The modifier +and factor are still applied to it as though it were read from the document. |======================================================================= Keep in mind that taking the log() of 0, or the square root of a negative number diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 437e5a63b28..135cb53f65f 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -36,14 +36,20 @@ public class FieldValueFactorFunction extends ScoreFunction { private final String field; private final float boostFactor; private final Modifier modifier; + /** + * Value used if the document is missing the field. + */ + private final Double missing; private final IndexNumericFieldData indexFieldData; - public FieldValueFactorFunction(String field, float boostFactor, Modifier modifierType, IndexNumericFieldData indexFieldData) { + public FieldValueFactorFunction(String field, float boostFactor, Modifier modifierType, Double missing, + IndexNumericFieldData indexFieldData) { super(CombineFunction.MULT); this.field = field; this.boostFactor = boostFactor; this.modifier = modifierType; this.indexFieldData = indexFieldData; + this.missing = missing; } @Override @@ -55,26 +61,32 @@ public class FieldValueFactorFunction extends ScoreFunction { public double score(int docId, float subQueryScore) { values.setDocument(docId); final int numValues = values.count(); + double value; if (numValues > 0) { - double val = values.valueAt(0) * boostFactor; - double result = modifier.apply(val); - if (Double.isNaN(result) || Double.isInfinite(result)) { - throw new ElasticsearchException("Result of field modification [" + modifier.toString() + - "(" + val + ")] must be a number"); - } - return result; + value = values.valueAt(0); + } else if (missing != null) { + value = missing; } else { throw new ElasticsearchException("Missing value for field [" + field + "]"); } + double val = value * boostFactor; + double result = modifier.apply(val); + if (Double.isNaN(result) || Double.isInfinite(result)) { + throw new ElasticsearchException("Result of field modification [" + modifier.toString() + "(" + val + + ")] must be a number"); + } + return result; } @Override public Explanation explainScore(int docId, Explanation subQueryScore) { String modifierStr = modifier != null ? modifier.toString() : ""; + String defaultStr = missing != null ? "?:" + missing : ""; double score = score(docId, subQueryScore.getValue()); return Explanation.match( CombineFunction.toFloat(score), - "field value function: " + modifierStr + "(" + "doc['" + field + "'].value * factor=" + boostFactor + ")"); + String.format(Locale.ROOT, + "field value function: %s(doc['%s'].value%s * factor=%s)", modifierStr, field, defaultStr, boostFactor)); } }; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java index 34a2f8bbc67..5d38c5a5eb5 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java @@ -33,6 +33,7 @@ import java.util.Locale; public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { private String field = null; private Float factor = null; + private Double missing = null; private FieldValueFactorFunction.Modifier modifier = null; public FieldValueFactorFunctionBuilder(String fieldName) { @@ -49,6 +50,14 @@ public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { return this; } + /** + * Value used instead of the field value for documents that don't have that field defined. + */ + public FieldValueFactorFunctionBuilder missing(double missing) { + this.missing = missing; + return this; + } + public FieldValueFactorFunctionBuilder modifier(FieldValueFactorFunction.Modifier modifier) { this.modifier = modifier; return this; @@ -65,6 +74,10 @@ public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { builder.field("factor", factor); } + if (missing != null) { + builder.field("missing", missing); + } + if (modifier != null) { builder.field("modifier", modifier.toString().toLowerCase(Locale.ROOT)); } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java index 3426dcbef3c..c5f454ef40a 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java @@ -41,7 +41,8 @@ import java.util.Locale; * "field_value_factor": { * "field": "myfield", * "factor": 1.5, - * "modifier": "square" + * "modifier": "square", + * "missing": 1 * } * } * @@ -56,6 +57,7 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { String field = null; float boostFactor = 1; FieldValueFactorFunction.Modifier modifier = FieldValueFactorFunction.Modifier.NONE; + Double missing = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -67,6 +69,8 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { boostFactor = parser.floatValue(); } else if ("modifier".equals(currentFieldName)) { modifier = FieldValueFactorFunction.Modifier.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } else if ("missing".equals(currentFieldName)) { + missing = parser.doubleValue(); } else { throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); } @@ -84,7 +88,7 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { if (mapper == null) { throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]"); } - return new FieldValueFactorFunction(field, boostFactor, modifier, + return new FieldValueFactorFunction(field, boostFactor, modifier, missing, (IndexNumericFieldData)searchContext.fieldData().getForField(mapper)); } diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java index 0d4d91b1f92..eef4ed27959 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java @@ -88,6 +88,14 @@ public class FunctionScoreFieldValueTests extends ElasticsearchIntegrationTest { // We are expecting an exception, because 3 has no field } + // doc 3 doesn't have a "test" field but we're defaulting it to 100 so it should be last + response = client().prepareSearch("test") + .setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), + fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100))) + .get(); + assertOrderedSearchHits(response, "1", "2", "3"); + // n divided by 0 is infinity, which should provoke an exception. try { response = client().prepareSearch("test") From bc3136a3453a45abec66da690304d6ec694bfe8c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 28 Apr 2015 16:05:22 +0200 Subject: [PATCH 173/236] [TEST] Cleanup Direcotry and Searcher mock code We deployed our own code to check if directories are closed etc an d if serachers are still open. Yet, since we don't have a global cluster anymore we can just use lucene's internal mechanism to do that. This commit removes all special handling and usese LuceneTestCase.closeAfterSuite to fail if certain resources are not closed Closes #10853 --- .../OldIndexBackwardsCompatibilityTests.java | 1 - .../gateway/MetaDataStateFormatTest.java | 27 --- .../gateway/RecoveryFromGatewayTests.java | 3 +- .../indices/leaks/IndicesLeaksTests.java | 3 - .../indices/recovery/IndexRecoveryTests.java | 5 +- .../SearchWithRandomExceptionsTests.java | 13 +- .../test/ElasticsearchTestCase.java | 22 -- .../org/elasticsearch/test/TestCluster.java | 2 - .../test/engine/AssertingSearcher.java | 53 +++-- .../test/engine/MockEngineSupport.java | 43 ++-- .../test/engine/MockInternalEngine.java | 10 +- .../test/engine/MockShadowEngine.java | 23 +- .../hamcrest/ElasticsearchAssertions.java | 66 ------ .../test/store/MockDirectoryHelper.java | 199 ------------------ .../test/store/MockFSDirectoryService.java | 168 +++++++++++++-- 15 files changed, 222 insertions(+), 416 deletions(-) delete mode 100644 src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 8ed550f1a98..2ff82d9f464 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -227,7 +227,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio void unloadIndex(String indexName) throws Exception { ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDelete(indexName).get()); - ElasticsearchAssertions.assertAllFilesClosed(); } public void testAllVersionsTested() throws Exception { diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index 5581290d0e5..12b699a989b 100644 --- a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -538,33 +538,6 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { } } - // copied from lucene - it's package private - final class CloseableDirectory implements Closeable { - private final BaseDirectoryWrapper dir; - private final TestRuleMarkFailure failureMarker; - - public CloseableDirectory(BaseDirectoryWrapper dir, - TestRuleMarkFailure failureMarker) { - this.dir = dir; - this.failureMarker = failureMarker; - } - - @Override - public void close() throws IOException { - // We only attempt to check open/closed state if there were no other test - // failures. - try { - if (failureMarker.wasSuccessful() && dir.isOpen()) { - Assert.fail("Directory not closed: " + dir); - } - } finally { - if (dir.isOpen()) { - dir.close(); - } - } - } - } - public Path[] content(String glob, Path dir) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(dir, glob)) { return Iterators.toArray(stream.iterator(), Path.class); diff --git a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java index 42b8822a980..44321fad582 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; @@ -353,7 +352,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) .put("gateway.recover_after_nodes", 4) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 4) - .put(MockDirectoryHelper.CRASH_INDEX, false).build(); + .put(MockFSDirectoryService.CRASH_INDEX, false).build(); internalCluster().startNodesAsync(4, settings).get(); // prevent any rebalance actions during the peer recovery diff --git a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java index 28bcd9f63d9..ca0b1a52029 100644 --- a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java +++ b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.leaks; -import org.apache.lucene.util.LuceneTestCase.BadApple; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -28,7 +27,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.junit.Test; import java.lang.ref.WeakReference; @@ -92,7 +90,6 @@ public class IndicesLeaksTests extends ElasticsearchIntegrationTest { shardInjector = null; cluster().wipeIndices("test"); - MockDirectoryHelper.wrappers.clear(); // we need to clear this to allow the objects to recycle for (int i = 0; i < 100; i++) { System.gc(); diff --git a/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java b/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java index 113ac6d80b7..842a6c533dc 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java +++ b/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.recovery; -import com.carrotsearch.randomizedtesting.LifecycleScope; import com.google.common.util.concurrent.ListenableFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -52,7 +51,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.store.MockDirectoryHelper; +import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.*; import org.junit.Test; @@ -522,7 +521,7 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest { .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") .put("cluster.routing.schedule", "100ms") // aggressive reroute post shard failures .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName()) - .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node internalCluster().startNode(nodeSettings); diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java index b4c26527639..b4fef4f623d 100644 --- a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java +++ b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.engine.ThrowingLeafReaderWrapper; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; @@ -108,15 +107,15 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get(); client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); client().admin().indices().prepareOpen("test").execute().get(); } else { Builder settings = settingsBuilder() .put("index.number_of_replicas", randomIntBetween(0, 1)) .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); client().admin().indices().prepareCreate("test") .setSettings(settings) @@ -190,8 +189,8 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes // check the index still contains the records that we indexed without errors client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, 0) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, 0) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 55b4b15af01..1276089b182 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -71,8 +71,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed; /** * Base testcase for randomized unit testing with Elasticsearch @@ -205,26 +203,6 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { System.clearProperty(EsExecutors.DEFAULT_SYSPROP); } - // check some things (like MockDirectoryWrappers) are closed where we currently - // manage them. TODO: can we add these to LuceneTestCase.closeAfterSuite directly? - // or something else simpler instead of the fake closeables? - - @BeforeClass - public static void setAfterSuiteAssertions() throws Exception { - closeAfterSuite(new Closeable() { - @Override - public void close() throws IOException { - assertAllFilesClosed(); - } - }); - closeAfterSuite(new Closeable() { - @Override - public void close() throws IOException { - assertAllSearchersClosed(); - } - }); - } - @After public final void ensureCleanedUp() throws Exception { MockPageCacheRecycler.ensureAllPagesAreReleased(); diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java index adfa32ac1c6..d96dd084b3d 100644 --- a/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/TestCluster.java @@ -81,8 +81,6 @@ public abstract class TestCluster implements Iterable, Closeable { * This method checks all the things that need to be checked after each test */ public void assertAfterTest() throws IOException { - assertAllSearchersClosed(); - assertAllFilesClosed(); ensureEstimatedStats(); } diff --git a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java index b3d893c4362..e88881faae9 100644 --- a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java +++ b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java @@ -21,17 +21,21 @@ package org.elasticsearch.test.engine; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; +import java.io.Closeable; +import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; /** * A searcher that asserts the IndexReader's refcount on close */ -public class AssertingSearcher extends Engine.Searcher { +class AssertingSearcher extends Engine.Searcher { private final Engine.Searcher wrappedSearcher; private final ShardId shardId; private final IndexSearcher indexSearcher; @@ -39,10 +43,10 @@ public class AssertingSearcher extends Engine.Searcher { private final Object lock = new Object(); private final int initialRefCount; private final ESLogger logger; - private final Map inFlightSearchers; + private final AtomicBoolean closed = new AtomicBoolean(false); - public AssertingSearcher(IndexSearcher indexSearcher, Engine.Searcher wrappedSearcher, - ShardId shardId, Map inFlightSearchers, + AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher, + ShardId shardId, ESLogger logger) { super(wrappedSearcher.source(), indexSearcher); // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher @@ -53,8 +57,15 @@ public class AssertingSearcher extends Engine.Searcher { initialRefCount = wrappedSearcher.reader().getRefCount(); this.indexSearcher = indexSearcher; assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed"; - this.inFlightSearchers = inFlightSearchers; - this.inFlightSearchers.put(this, new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]")); + final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]"); + LuceneTestCase.closeAfterSuite(new Closeable() { + @Override + public void close() throws IOException { + if (closed.get() == false) { + throw ex; + } + } + }); } @Override @@ -64,29 +75,25 @@ public class AssertingSearcher extends Engine.Searcher { @Override public void close() throws ElasticsearchException { - RuntimeException remove = inFlightSearchers.remove(this); synchronized (lock) { - // make sure we only get this once and store the stack of the first caller! - if (remove == null) { - assert firstReleaseStack != null; + if (closed.compareAndSet(false, true)) { + firstReleaseStack = new RuntimeException(); + final int refCount = wrappedSearcher.reader().getRefCount(); + // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential + // problems. + assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]"; + try { + wrappedSearcher.close(); + } catch (RuntimeException ex) { + logger.debug("Failed to release searcher", ex); + throw ex; + } + } else { AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]"); error.initCause(firstReleaseStack); throw error; - } else { - assert firstReleaseStack == null; - firstReleaseStack = new RuntimeException("Searcher Released first here, source [" + wrappedSearcher.source() + "]"); } } - final int refCount = wrappedSearcher.reader().getRefCount(); - // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential - // problems. - assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]"; - try { - wrappedSearcher.close(); - } catch (RuntimeException ex) { - logger.debug("Failed to release searcher", ex); - throw ex; - } } @Override diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index 1c3581d2ae5..47c9472d2d6 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -33,8 +33,10 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Constructor; import java.util.Map; @@ -54,6 +56,7 @@ public final class MockEngineSupport { public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio"; private final AtomicBoolean closing = new AtomicBoolean(false); private final ESLogger logger = Loggers.getLogger(Engine.class); + private final ShardId shardId; public static class MockContext { public final Random random; @@ -71,12 +74,12 @@ public final class MockEngineSupport { } } - public static final ConcurrentMap INFLIGHT_ENGINE_SEARCHERS = new ConcurrentHashMap<>(); private final MockContext mockContext; public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); + shardId = config.getShardId(); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow @@ -99,33 +102,23 @@ public final class MockEngineSupport { * the first call and treats subsequent calls as if the engine passed is already closed. */ public CloseAction flushOrClose(Engine engine, CloseAction originalAction) throws IOException { - try { - if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow. - if (mockContext.flushOnClose > mockContext.random.nextDouble()) { - return CloseAction.FLUSH_AND_CLOSE; - } else { - return CloseAction.CLOSE; - } + if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow. + if (mockContext.flushOnClose > mockContext.random.nextDouble()) { + return CloseAction.FLUSH_AND_CLOSE; } else { - return originalAction; - } - } finally { - if (logger.isTraceEnabled()) { - // log debug if we have pending searchers - for (Map.Entry entry : INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - logger.trace("Unreleased Searchers instance for shard [{}]", - entry.getValue(), entry.getKey().shardId()); - } + return CloseAction.CLOSE; } + } else { + return originalAction; } } - public AssertingIndexSearcher newSearcher(Engine engine, String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { + public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { IndexReader reader = searcher.getIndexReader(); IndexReader wrappedReader = reader; assert reader != null; if (reader instanceof DirectoryReader && mockContext.wrapReader) { - wrappedReader = wrapReader((DirectoryReader) reader, engine); + wrappedReader = wrapReader((DirectoryReader) reader); } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); @@ -133,7 +126,7 @@ public final class MockEngineSupport { return assertingIndexSearcher; } - private DirectoryReader wrapReader(DirectoryReader reader, Engine engine) { + private DirectoryReader wrapReader(DirectoryReader reader) { try { Constructor[] constructors = mockContext.wrapper.getConstructors(); Constructor nonRandom = null; @@ -177,4 +170,14 @@ public final class MockEngineSupport { } + public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher, IndexSearcher searcher, SearcherManager manager) { + final AssertingIndexSearcher assertingIndexSearcher = newSearcher(source, searcher, manager); + assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); + // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will + // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager + // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here + return new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger); + } + + } diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java index ed08a95e86f..e81009c4979 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.engine; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; @@ -71,12 +72,7 @@ final class MockInternalEngine extends InternalEngine { @Override protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final AssertingIndexSearcher assertingIndexSearcher = support().newSearcher(this, source, searcher, manager); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); - // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will - // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager - // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here - return new AssertingSearcher(assertingIndexSearcher, - super.newSearcher(source, searcher, manager), shardId, MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS, logger); + final Searcher engineSearcher = super.newSearcher(source, searcher, manager); + return support().wrapSearcher(source, engineSearcher, searcher, manager); } } diff --git a/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java b/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java index 198d4ba6639..1ed920b20fc 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java @@ -37,29 +37,10 @@ final class MockShadowEngine extends ShadowEngine { this.support = new MockEngineSupport(config); } - @Override - public void close() throws IOException { - try { - super.close(); - } finally { - if (logger.isTraceEnabled()) { - // log debug if we have pending searchers - for (Map.Entry entry : MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - logger.trace("Unreleased Searchers instance for shard [{}]", entry.getValue(), entry.getKey().shardId()); - } - } - } - } - @Override protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final AssertingIndexSearcher assertingIndexSearcher = support.newSearcher(this, source, searcher, manager); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); - // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will - // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager - // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here - return new AssertingSearcher(assertingIndexSearcher, - super.newSearcher(source, searcher, manager), shardId, MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS, logger); + final Searcher engineSearcher = super.newSearcher(source, searcher, manager); + return support.wrapSearcher(source, engineSearcher, searcher, manager); } } diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 0a52c36ce32..7a822e163cb 100644 --- a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -26,7 +26,6 @@ import com.google.common.collect.Iterables; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -67,9 +66,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.engine.AssertingSearcher; -import org.elasticsearch.test.engine.MockEngineSupport; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Assert; @@ -80,7 +76,6 @@ import java.lang.reflect.InvocationTargetException; import java.nio.file.Files; import java.nio.file.Path; import java.util.*; -import java.util.concurrent.TimeUnit; import static com.google.common.base.Predicates.isNull; import static org.elasticsearch.test.ElasticsearchTestCase.*; @@ -683,67 +678,6 @@ public class ElasticsearchAssertions { return response; } - public static void assertAllSearchersClosed() { - /* in some cases we finish a test faster than the freeContext calls make it to the - * shards. Let's wait for some time if there are still searchers. If the are really - * pending we will fail anyway.*/ - try { - if (awaitBusy(new Predicate() { - @Override - public boolean apply(Object o) { - return MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.isEmpty(); - } - }, 5, TimeUnit.SECONDS)) { - return; - } - } catch (InterruptedException ex) { - if (MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.isEmpty()) { - return; - } - } - try { - RuntimeException ex = null; - StringBuilder builder = new StringBuilder("Unclosed Searchers instance for shards: ["); - for (Map.Entry entry : MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - ex = entry.getValue(); - builder.append(entry.getKey().shardId()).append(","); - } - builder.append("]"); - throw new RuntimeException(builder.toString(), ex); - } finally { - MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.clear(); - } - } - - public static void assertAllFilesClosed() { - try { - for (final MockDirectoryHelper.ElasticsearchMockDirectoryWrapper w : MockDirectoryHelper.wrappers) { - try { - w.awaitClosed(5000); - } catch (InterruptedException e) { - Thread.interrupted(); - } - if (!w.successfullyClosed()) { - if (w.closeException() == null) { - try { - w.close(); - } catch (IOException e) { - throw new ElasticsearchIllegalStateException("directory close threw IOException", e); - } - if (w.closeException() != null) { - throw w.closeException(); - } - } else { - throw w.closeException(); - } - } - assertThat(w.isOpen(), is(false)); - } - } finally { - MockDirectoryHelper.wrappers.clear(); - } - } - public static void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId, List expectedJvmPluginNames, List expectedJvmPluginDescriptions, diff --git a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java deleted file mode 100644 index 90e06d6a5df..00000000000 --- a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.store; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.MockDirectoryWrapper.Throttling; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.NRTCachingDirectory; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.FsDirectoryService; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.IndexStoreModule; -import com.carrotsearch.randomizedtesting.SeedUtils; - -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.Collection; -import java.util.Random; -import java.util.Set; - -public class MockDirectoryHelper { - public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate"; - public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open"; - public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write"; - public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file"; - public static final String CRASH_INDEX = "index.store.mock.random.crash_index"; - - public static final Set wrappers = ConcurrentCollections.newConcurrentSet(); - - private final Random random; - private final double randomIOExceptionRate; - private final double randomIOExceptionRateOnOpen; - private final Throttling throttle; - private final Settings indexSettings; - private final ShardId shardId; - private final boolean preventDoubleWrite; - private final boolean noDeleteOpenFile; - private final ESLogger logger; - private final boolean crashIndex; - - public MockDirectoryHelper(ShardId shardId, Settings indexSettings, ESLogger logger, Random random, long seed) { - this.random = random; - randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d); - randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d); - preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW - noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW - random.nextInt(shardId.getId() + 1); // some randomness per shard - throttle = Throttling.NEVER; - crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true); - - if (logger.isDebugEnabled()) { - logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), - throttle, crashIndex); - } - this.indexSettings = indexSettings; - this.shardId = shardId; - this.logger = logger; - } - - public Directory wrap(Directory dir) { - final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, logger, this.crashIndex); - w.setRandomIOExceptionRate(randomIOExceptionRate); - w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); - w.setThrottling(throttle); - w.setCheckIndexOnClose(false); // we do this on the index level - w.setPreventDoubleWrite(preventDoubleWrite); - // TODO: make this test robust to virus scanner - w.setEnableVirusScanner(false); - w.setNoDeleteOpenFile(noDeleteOpenFile); - w.setUseSlowOpenClosers(false); - wrappers.add(w); - return w; - } - - public FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { - ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder(); - builder.put(indexSettings); - builder.put(IndexStoreModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexStoreModule.Type.values())); - return new FsDirectoryService(builder.build(), indexStore, path); - } - - public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { - - private final ESLogger logger; - private final boolean crash; - private volatile RuntimeException closeException; - private final Object lock = new Object(); - private final Set superUnSyncedFiles; - private final Random superRandomState; - - public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, ESLogger logger, boolean crash) { - super(random, delegate); - this.crash = crash; - this.logger = logger; - - // TODO: remove all this and cutover to MockFS (DisableFsyncFS) instead - try { - Field field = MockDirectoryWrapper.class.getDeclaredField("unSyncedFiles"); - field.setAccessible(true); - superUnSyncedFiles = (Set) field.get(this); - - field = MockDirectoryWrapper.class.getDeclaredField("randomState"); - field.setAccessible(true); - superRandomState = (Random) field.get(this); - } catch (ReflectiveOperationException roe) { - throw new RuntimeException(roe); - } - } - - @Override - public synchronized void close() throws IOException { - try { - super.close(); - } catch (RuntimeException ex) { - logger.info("MockDirectoryWrapper#close() threw exception", ex); - closeException = ex; - throw ex; - } finally { - synchronized (lock) { - lock.notifyAll(); - } - } - } - - /** - * Returns true if {@link #in} must sync its files. - * Currently, only {@link NRTCachingDirectory} requires sync'ing its files - * because otherwise they are cached in an internal {@link org.apache.lucene.store.RAMDirectory}. If - * other directories require that too, they should be added to this method. - */ - private boolean mustSync() { - Directory delegate = in; - while (delegate instanceof FilterDirectory) { - if (delegate instanceof NRTCachingDirectory) { - return true; - } - delegate = ((FilterDirectory) delegate).getDelegate(); - } - return delegate instanceof NRTCachingDirectory; - } - - @Override - public synchronized void sync(Collection names) throws IOException { - // don't wear out our hardware so much in tests. - if (superRandomState.nextInt(100) == 0 || mustSync()) { - super.sync(names); - } else { - superUnSyncedFiles.removeAll(names); - } - } - - public void awaitClosed(long timeout) throws InterruptedException { - synchronized (lock) { - if(isOpen()) { - lock.wait(timeout); - } - } - } - - public synchronized boolean successfullyClosed() { - return closeException == null && !isOpen(); - } - - public synchronized RuntimeException closeException() { - return closeException; - } - - @Override - public synchronized void crash() throws IOException { - if (crash) { - super.crash(); - } - } - } -} diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java index c10c2863db1..d07b3b7c4d5 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -19,57 +19,86 @@ package org.elasticsearch.test.store; +import com.carrotsearch.randomizedtesting.SeedUtils; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Charsets; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.StoreRateLimiting; +import org.apache.lucene.store.*; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestRuleMarkFailure; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.*; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Assert; +import java.io.Closeable; import java.io.IOException; import java.io.PrintStream; +import java.lang.reflect.Field; import java.nio.file.Path; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.Random; +import java.util.*; public class MockFSDirectoryService extends FsDirectoryService { + public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close"; + public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open"; + public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write"; + public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file"; + public static final String CRASH_INDEX = "index.store.mock.random.crash_index"; + private static final EnumSet validCheckIndexStates = EnumSet.of( IndexShardState.STARTED, IndexShardState.RELOCATED , IndexShardState.POST_RECOVERY ); - private final MockDirectoryHelper helper; - private FsDirectoryService delegateService; - public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close"; + private final FsDirectoryService delegateService; private final boolean checkIndexOnClose; + private final Random random; + private final double randomIOExceptionRate; + private final double randomIOExceptionRateOnOpen; + private final MockDirectoryWrapper.Throttling throttle; + private final Settings indexSettings; + private final boolean preventDoubleWrite; + private final boolean noDeleteOpenFile; + private final boolean crashIndex; @Inject public MockFSDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) { super(indexSettings, indexStore, path); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); - Random random = new Random(seed); - helper = new MockDirectoryHelper(shardId, indexSettings, logger, random, seed); + this.random = new Random(seed); checkIndexOnClose = indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, true); + randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d); + randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d); + preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW + noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW + random.nextInt(shardId.getId() + 1); // some randomness per shard + throttle = MockDirectoryWrapper.Throttling.NEVER; + crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true); - delegateService = helper.randomDirectorService(indexStore, path); + if (logger.isDebugEnabled()) { + logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), + throttle, crashIndex); + } + this.indexSettings = indexSettings; + delegateService = randomDirectorService(indexStore, path); if (checkIndexOnClose) { final IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() { @@ -112,7 +141,7 @@ public class MockFSDirectoryService extends FsDirectoryService { @Override public Directory newDirectory() throws IOException { - return helper.wrap(delegateService.newDirectory()); + return wrap(delegateService.newDirectory()); } @Override @@ -173,4 +202,117 @@ public class MockFSDirectoryService extends FsDirectoryService { public long throttleTimeInNanos() { return delegateService.throttleTimeInNanos(); } + + public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate"; + + private Directory wrap(Directory dir) { + final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex); + w.setRandomIOExceptionRate(randomIOExceptionRate); + w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); + w.setThrottling(throttle); + w.setCheckIndexOnClose(false); // we do this on the index level + w.setPreventDoubleWrite(preventDoubleWrite); + // TODO: make this test robust to virus scanner + w.setEnableVirusScanner(false); + w.setNoDeleteOpenFile(noDeleteOpenFile); + w.setUseSlowOpenClosers(false); + LuceneTestCase.closeAfterSuite(new CloseableDirectory(w)); + return w; + } + + private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder(); + builder.put(indexSettings); + builder.put(IndexStoreModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexStoreModule.Type.values())); + return new FsDirectoryService(builder.build(), indexStore, path); + } + + public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { + + private final boolean crash; + private final Set superUnSyncedFiles; + private final Random superRandomState; + + public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, boolean crash) { + super(random, delegate); + this.crash = crash; + + // TODO: remove all this and cutover to MockFS (DisableFsyncFS) instead + try { + Field field = MockDirectoryWrapper.class.getDeclaredField("unSyncedFiles"); + field.setAccessible(true); + superUnSyncedFiles = (Set) field.get(this); + + field = MockDirectoryWrapper.class.getDeclaredField("randomState"); + field.setAccessible(true); + superRandomState = (Random) field.get(this); + } catch (ReflectiveOperationException roe) { + throw new RuntimeException(roe); + } + } + + /** + * Returns true if {@link #in} must sync its files. + * Currently, only {@link org.apache.lucene.store.NRTCachingDirectory} requires sync'ing its files + * because otherwise they are cached in an internal {@link org.apache.lucene.store.RAMDirectory}. If + * other directories require that too, they should be added to this method. + */ + private boolean mustSync() { + Directory delegate = in; + while (delegate instanceof FilterDirectory) { + if (delegate instanceof NRTCachingDirectory) { + return true; + } + delegate = ((FilterDirectory) delegate).getDelegate(); + } + return delegate instanceof NRTCachingDirectory; + } + + @Override + public synchronized void sync(Collection names) throws IOException { + // don't wear out our hardware so much in tests. + if (superRandomState.nextInt(100) == 0 || mustSync()) { + super.sync(names); + } else { + superUnSyncedFiles.removeAll(names); + } + } + + @Override + public synchronized void crash() throws IOException { + if (crash) { + super.crash(); + } + } + } + + final class CloseableDirectory implements Closeable { + private final BaseDirectoryWrapper dir; + private final TestRuleMarkFailure failureMarker; + + public CloseableDirectory(BaseDirectoryWrapper dir) { + this.dir = dir; + try { + final Field suiteFailureMarker = LuceneTestCase.class.getDeclaredField("suiteFailureMarker"); + suiteFailureMarker.setAccessible(true); + this.failureMarker = (TestRuleMarkFailure) suiteFailureMarker.get(LuceneTestCase.class); + } catch (Throwable e) { + throw new ElasticsearchException("foo", e); + } + } + + @Override + public void close() { + // We only attempt to check open/closed state if there were no other test + // failures. + try { + if (failureMarker.wasSuccessful() && dir.isOpen()) { + Assert.fail("Directory not closed: " + dir); + } + } finally { + // TODO: perform real close of the delegate: LUCENE-4058 + // dir.close(); + } + } + } } From 87cf1452d58afdb5ce020cc57628cf4547d0cba5 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 28 Apr 2015 13:59:20 +0200 Subject: [PATCH 174/236] [REST] Render non-elasticsearch exception as root cause if we don't have an ElasticsearchException as the wrapper of the actual cause we don't render a root cause today. This commit adds support for 3rd party exceptions as root causes. Closes #10836 --- .../elasticsearch/ElasticsearchException.java | 13 +++++++++++-- .../ElasticsearchExceptionTests.java | 9 +++++++++ .../rest/BytesRestResponseTests.java | 17 ++++++++++++----- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/main/java/org/elasticsearch/ElasticsearchException.java b/src/main/java/org/elasticsearch/ElasticsearchException.java index eed4040793c..a622b0f7e81 100644 --- a/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -194,7 +194,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte if (this instanceof ElasticsearchWrapperException) { toXContent(builder, params, this); } else { - builder.field("type", getExceptionName(this)); + builder.field("type", getExceptionName()); builder.field("reason", getMessage()); innerToXContent(builder, params); } @@ -261,7 +261,16 @@ public class ElasticsearchException extends RuntimeException implements ToXConte if (ex instanceof ElasticsearchException) { return ((ElasticsearchException) ex).guessRootCauses(); } - return new ElasticsearchException[0]; + return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) { + @Override + protected String getExceptionName() { + return getExceptionName(getCause()); + } + }}; + } + + protected String getExceptionName() { + return getExceptionName(this); } /** diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 74ea5f6e460..1c631eb00f5 100644 --- a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -104,6 +104,15 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { } + { + final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar")); + assertEquals(foobars.length, 1); + assertTrue(foobars[0] instanceof ElasticsearchException); + assertEquals(foobars[0].getMessage(), "foobar"); + assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class); + assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception"); + } + } public void testDeduplicate() throws IOException { diff --git a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 8c0203e58de..e110e6ca70d 100644 --- a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -112,11 +112,18 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { public void testGuessRootCause() throws IOException { RestRequest request = new FakeRestRequest(); RestChannel channel = new DetailedExceptionRestChannel(request); - - Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); - BytesRestResponse response = new BytesRestResponse(channel, t); - String text = response.content().toUtf8(); - assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]")); + { + Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + BytesRestResponse response = new BytesRestResponse(channel, t); + String text = response.content().toUtf8(); + assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]")); + } + { + Throwable t = new FileNotFoundException("/foo/bar"); + BytesRestResponse response = new BytesRestResponse(channel, t); + String text = response.content().toUtf8(); + assertThat(text, containsString("{\"root_cause\":[{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}]")); + } } @Test From 9d890c472bfd328fb69ff07d98cfcdfe215e21b1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 28 Apr 2015 18:01:47 +0200 Subject: [PATCH 175/236] Internal: Remove the query parser cache. The original goal of this cache was to avoid parsing the same query several times in case several shards are held on the same node. While this might sound like a good idea, this would only help when parsing the query takes non-negligible time compared to actually running the query, which should not be the case. --- .../TransportClearIndicesCacheAction.java | 2 - .../elasticsearch/index/cache/IndexCache.java | 39 +------- .../index/cache/IndexCacheModule.java | 2 - .../cache/query/parser/QueryParserCache.java | 39 -------- .../query/parser/QueryParserCacheModule.java | 44 --------- .../parser/none/NoneQueryParserCache.java | 58 ------------ .../resident/ResidentQueryParserCache.java | 90 ------------------- .../index/query/QueryParseContext.java | 16 ++-- .../index/query/QueryStringQueryParser.java | 10 +-- .../ResidentQueryParserCacheTest.java | 54 ----------- 10 files changed, 12 insertions(+), 342 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java delete mode 100644 src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java delete mode 100644 src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java delete mode 100644 src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java delete mode 100644 src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index ef162e205ca..ca03241bf25 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -101,8 +101,6 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio IndexService service = indicesService.indexService(request.shardId().getIndex()); if (service != null) { IndexShard shard = service.shard(request.shardId().id()); - // we always clear the query cache - service.cache().queryParserCache().clear(); boolean clearedAtLeastOne = false; if (request.filterCache()) { clearedAtLeastOne = true; diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 76316b39872..338b49f0490 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -20,18 +20,12 @@ package org.elasticsearch.index.cache; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.filter.FilterCache; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; import org.elasticsearch.index.settings.IndexSettings; import java.io.Closeable; @@ -40,30 +34,18 @@ import java.io.IOException; /** * */ -public class IndexCache extends AbstractIndexComponent implements Closeable, ClusterStateListener { +public class IndexCache extends AbstractIndexComponent implements Closeable { private final FilterCache filterCache; - private final QueryParserCache queryParserCache; private final BitsetFilterCache bitsetFilterCache; - private ClusterService clusterService; - @Inject - public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, BitsetFilterCache bitsetFilterCache) { super(index, indexSettings); this.filterCache = filterCache; - this.queryParserCache = queryParserCache; this.bitsetFilterCache = bitsetFilterCache; } - @Inject(optional = true) - public void setClusterService(@Nullable ClusterService clusterService) { - this.clusterService = clusterService; - if (clusterService != null) { - clusterService.add(this); - } - } - public FilterCache filter() { return filterCache; } @@ -75,29 +57,14 @@ public class IndexCache extends AbstractIndexComponent implements Closeable, Clu return bitsetFilterCache; } - public QueryParserCache queryParserCache() { - return this.queryParserCache; - } - @Override public void close() throws IOException { - IOUtils.close(filterCache, queryParserCache, bitsetFilterCache); - if (clusterService != null) { - clusterService.remove(this); - } + IOUtils.close(filterCache, bitsetFilterCache); } public void clear(String reason) { filterCache.clear(reason); - queryParserCache.clear(); bitsetFilterCache.clear(reason); } - @Override - public void clusterChanged(ClusterChangedEvent event) { - // clear the query parser cache if the metadata (mappings) changed... - if (event.metaDataChanged()) { - queryParserCache.clear(); - } - } } diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java index 796ad7388b4..3a0c9fc584e 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.bitset.BitsetFilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.query.parser.QueryParserCacheModule; /** * @@ -39,7 +38,6 @@ public class IndexCacheModule extends AbstractModule { @Override protected void configure() { new FilterCacheModule(settings).configure(binder()); - new QueryParserCacheModule(settings).configure(binder()); new BitsetFilterCacheModule(settings).configure(binder()); bind(IndexCache.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java deleted file mode 100644 index fee962c733b..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser; - -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.elasticsearch.index.IndexComponent; - -import java.io.Closeable; - -/** - * The main benefit of the query parser cache is to not parse the same query string on different shards. - * Less about long running query strings. - */ -public interface QueryParserCache extends IndexComponent, Closeable { - - Query get(QueryParserSettings queryString); - - void put(QueryParserSettings queryString, Query query); - - void clear(); -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java b/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java deleted file mode 100644 index b244d5ff9b6..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Scopes; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.query.parser.resident.ResidentQueryParserCache; - -/** - * - */ -public class QueryParserCacheModule extends AbstractModule { - - private final Settings settings; - - public QueryParserCacheModule(Settings settings) { - this.settings = settings; - } - - @Override - protected void configure() { - bind(QueryParserCache.class) - .to(settings.getAsClass("index.cache.query.parser.type", ResidentQueryParserCache.class, "org.elasticsearch.index.cache.query.parser.", "QueryParserCache")) - .in(Scopes.SINGLETON); - } -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java deleted file mode 100644 index 2a9dcd8235d..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser.none; - -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; -import org.elasticsearch.index.settings.IndexSettings; - -/** - * - */ -public class NoneQueryParserCache extends AbstractIndexComponent implements QueryParserCache { - - @Inject - public NoneQueryParserCache(Index index, @IndexSettings Settings indexSettings) { - super(index, indexSettings); - } - - @Override - public Query get(QueryParserSettings queryString) { - return null; - } - - @Override - public void put(QueryParserSettings queryString, Query query) { - } - - @Override - public void clear() { - } - - @Override - public void close() throws ElasticsearchException { - } -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java deleted file mode 100644 index 7e090b62659..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser.resident; - -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; -import org.elasticsearch.index.settings.IndexSettings; - -import java.util.concurrent.TimeUnit; - -/** - * A small (by default) query parser cache mainly to not parse the same query string several times - * if several shards exists on the same node. - */ -public class ResidentQueryParserCache extends AbstractIndexComponent implements QueryParserCache { - - private final Cache cache; - - private volatile int maxSize; - private volatile TimeValue expire; - - @Inject - public ResidentQueryParserCache(Index index, @IndexSettings Settings indexSettings) { - super(index, indexSettings); - - this.maxSize = indexSettings.getAsInt("index.cache.query.parser.resident.max_size", 100); - this.expire = indexSettings.getAsTime("index.cache.query.parser.resident.expire", null); - logger.debug("using [resident] query cache with max_size [{}], expire [{}]", maxSize, expire); - - CacheBuilder cacheBuilder = CacheBuilder.newBuilder().maximumSize(maxSize); - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.nanos(), TimeUnit.NANOSECONDS); - } - - this.cache = cacheBuilder.build(); - } - - @Override - public Query get(QueryParserSettings queryString) { - Query value = cache.getIfPresent(queryString); - if (value != null) { - return value.clone(); - } else { - return null; - } - } - - @Override - public void put(QueryParserSettings queryString, Query query) { - if (queryString.isCacheable()) { - cache.put(queryString, query); - } - } - - @Override - public void clear() { - cache.invalidateAll(); - } - - @Override - public void close() throws ElasticsearchException { - cache.invalidateAll(); - } -} diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index fb55b7b818f..2f43985444c 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -46,14 +46,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMappers; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperBuilders; -import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; @@ -64,7 +63,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * @@ -182,10 +186,6 @@ public class QueryParseContext { return indexQueryParser.similarityService != null ? indexQueryParser.similarityService.similarity() : null; } - public QueryParserCache queryParserCache() { - return indexQueryParser.indexCache.queryParserCache(); - } - public String defaultField() { return indexQueryParser.defaultField(); } diff --git a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index ded43bc50e8..d0b07941888 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -219,18 +219,11 @@ public class QueryStringQueryParser implements QueryParser { } qpSettings.queryTypes(parseContext.queryTypes()); - Query query = parseContext.queryParserCache().get(qpSettings); - if (query != null) { - if (queryName != null) { - parseContext.addNamedQuery(queryName, query); - } - return query; - } MapperQueryParser queryParser = parseContext.queryParser(qpSettings); try { - query = queryParser.parse(qpSettings.queryString()); + Query query = queryParser.parse(qpSettings.queryString()); if (query == null) { return null; } @@ -241,7 +234,6 @@ public class QueryStringQueryParser implements QueryParser { if (query instanceof BooleanQuery) { Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); } - parseContext.queryParserCache().put(qpSettings, query); if (queryName != null) { parseContext.addNamedQuery(queryName, query); } diff --git a/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java b/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java deleted file mode 100644 index 437ed34366b..00000000000 --- a/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser.resident; - -import org.apache.lucene.index.Term; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.index.Index; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Test; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; - -/** - */ -public class ResidentQueryParserCacheTest extends ElasticsearchTestCase { - - @Test - public void testCaching() throws Exception { - ResidentQueryParserCache cache = new ResidentQueryParserCache(new Index("test"), ImmutableSettings.EMPTY); - QueryParserSettings key = new QueryParserSettings(); - key.queryString("abc"); - key.defaultField("a"); - key.boost(2.0f); - - Query query = new TermQuery(new Term("a", "abc")); - cache.put(key, query); - - assertThat(cache.get(key), not(sameInstance(query))); - assertThat(cache.get(key), equalTo(query)); - } - -} From 3ad279b8fd02d695d04f47862e372760712c26a0 Mon Sep 17 00:00:00 2001 From: Brian Murphy Date: Tue, 21 Apr 2015 13:58:21 -0400 Subject: [PATCH 176/236] Add `fairness` option to KeyedLock. This change adds the ability of a `KeyedLock` to be `fair` this means that threads will aquire the lock in the order they ask for it. --- .../common/util/concurrent/KeyedLock.java | 31 +++++++++++++++++-- .../transport/netty/KeyedLockTests.java | 2 +- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 862bc6d9645..523f7b92f74 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -39,6 +39,19 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * */ public class KeyedLock { + private final boolean fair; + + /** + * @param fair Use fair locking, ie threads get the lock in the order they requested it + */ + public KeyedLock(boolean fair) { + this.fair = fair; + } + + public KeyedLock() { + this(false); + } + private final ConcurrentMap map = ConcurrentCollections.newConcurrentMap(); protected final ThreadLocal threadLocal = new ThreadLocal<>(); @@ -52,7 +65,7 @@ public class KeyedLock { } KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { - KeyLock newLock = new KeyLock(); + KeyLock newLock = new KeyLock(fair); perNodeLock = map.putIfAbsent(key, newLock); if (perNodeLock == null) { newLock.lock(); @@ -92,6 +105,10 @@ public class KeyedLock { @SuppressWarnings("serial") private final static class KeyLock extends ReentrantLock { + KeyLock(boolean fair) { + super(fair); + } + private final AtomicInteger count = new AtomicInteger(1); } @@ -105,7 +122,17 @@ public class KeyedLock { */ public final static class GlobalLockable extends KeyedLock { - private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + private final ReadWriteLock lock; + + public GlobalLockable(boolean fair){ + super(fair); + lock = new ReentrantReadWriteLock(fair); + } + + public GlobalLockable() { + this(false); + } @Override public void acquire(T key) { diff --git a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java index 07d5751ba96..d8be9b704c6 100644 --- a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java @@ -40,7 +40,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { public void checkIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException { ConcurrentHashMap counter = new ConcurrentHashMap<>(); ConcurrentHashMap safeCounter = new ConcurrentHashMap<>(); - KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); + KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable(randomBoolean()) : new KeyedLock(randomBoolean()); String[] names = new String[randomIntBetween(1, 40)]; for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); From c0d32051464840cb65b9e656b2d1d88a0d32830d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 28 Apr 2015 20:39:34 +0200 Subject: [PATCH 177/236] Fix more download URLs --- src/packaging/deb/lintian/elasticsearch | 2 +- src/test/java/org/elasticsearch/plugins/PluginManagerTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/packaging/deb/lintian/elasticsearch b/src/packaging/deb/lintian/elasticsearch index 0e66c778a3e..1ca52eaed28 100644 --- a/src/packaging/deb/lintian/elasticsearch +++ b/src/packaging/deb/lintian/elasticsearch @@ -4,5 +4,5 @@ elasticsearch binary: arch-independent-package-contains-binary-or-object elasticsearch binary: unstripped-binary-or-object # Ignore arch dependent warnings, we chose the right libs on start elasticsearch binary: arch-dependent-file-in-usr-share -# Please check our changelog at http://www.elasticsearch.org/downloads/ +# Please check our changelog at http://www.elastic.co/downloads/elasticsearch elasticsearch binary: changelog-file-missing-in-native-package diff --git a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java index 2489fec6b3a..eaaa8c64cda 100644 --- a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java +++ b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java @@ -422,7 +422,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { @Test @Network public void testInstallPluginWithElasticsearchDownloadService() throws IOException { - assumeTrue("download.elasticsearch.org is accessible", isDownloadServiceWorking("download.elasticsearch.org", 80, "/elasticsearch/ci-test.txt")); + assumeTrue("download.elastic.co is accessible", isDownloadServiceWorking("download.elastic.co", 80, "/elasticsearch/ci-test.txt")); singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/2.4.0", null); } From 7d8f39fc27f0787b6a8e1270a7edff9e765d68e5 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 28 Apr 2015 13:03:02 +0300 Subject: [PATCH 178/236] Discovery: Unicast Ping should close temporary connections after returning ping results During pinging we open light , temporary connections to the the unicast hosts. After the pinging is done we close those. At the moment we do so before returning the results of the pings to the caller. On the other hand, in our transport logic we acquire a lock specific to the node id while opening a connection. When disconnecting from node, we have to acquire the same lock in order to guarantee the the connection opening has finished. This can cause big delays in environments where opening a connection is very slow, as the connection closing has to wait *after* the pinging was done.. This can be problematic as it causes master election to use stale data. Closes #10849 --- .../discovery/zen/ping/unicast/UnicastZenPing.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index de7a5e309f0..3dd2ad6a51d 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -220,11 +220,11 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen protected void doRun() throws Exception { sendPings(timeout, TimeValue.timeValueMillis(timeout.millis() / 2), sendPingsHandler); sendPingsHandler.close(); + listener.onPing(sendPingsHandler.pingCollection().toArray()); for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); transportService.disconnectFromNode(node); } - listener.onPing(sendPingsHandler.pingCollection().toArray()); } @Override From cf2fb4ed0fb06e688aaad5a8b6a54272ceabab66 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Tue, 28 Apr 2015 16:09:04 -0400 Subject: [PATCH 179/236] Remove core delete-by-query implementation, to be replaced with a plugin The current implementation is dangerous: it unexpectedly refreshes, which can quickly cause an unhealthy index (segment explosion). It can also delete different documents on primary vs replicas, causing inconsistent replicas. For 2.0 we will replace this with an optional plugin that does a scan/scroll search and then issues bulk delete requests. Closes #10859 --- rest-api-spec/api/delete_by_query.json | 75 ------ .../test/delete_by_query/10_basic.yaml | 42 ---- .../elasticsearch/action/ActionModule.java | 6 - .../deletebyquery/DeleteByQueryAction.java | 45 ---- .../deletebyquery/DeleteByQueryRequest.java | 218 ------------------ .../DeleteByQueryRequestBuilder.java | 149 ------------ .../deletebyquery/DeleteByQueryResponse.java | 93 -------- .../IndexDeleteByQueryRequest.java | 70 ------ .../IndexDeleteByQueryResponse.java | 61 ----- .../ShardDeleteByQueryRequest.java | 177 -------------- .../ShardDeleteByQueryResponse.java | 29 --- .../TransportDeleteByQueryAction.java | 98 -------- .../TransportIndexDeleteByQueryAction.java | 73 ------ .../TransportShardDeleteByQueryAction.java | 139 ----------- .../action/deletebyquery/package-info.java | 23 -- .../java/org/elasticsearch/client/Client.java | 26 --- .../org/elasticsearch/client/Requests.java | 13 -- .../client/support/AbstractClient.java | 19 -- .../client/transport/TransportClient.java | 12 - .../cluster/routing/OperationRouting.java | 19 -- .../DeleteByQueryFailedEngineException.java | 5 +- .../elasticsearch/index/engine/Engine.java | 2 + .../index/engine/InternalEngine.java | 2 + .../index/engine/ShadowEngine.java | 2 + .../indexing/IndexingOperationListener.java | 8 - .../index/indexing/ShardIndexingService.java | 13 -- .../percolator/PercolatorQueriesRegistry.java | 10 - .../index/query/HasChildFilterParser.java | 3 - .../index/query/HasChildQueryParser.java | 3 - .../index/query/HasParentFilterParser.java | 4 +- .../index/query/HasParentQueryParser.java | 5 +- .../index/query/QueryParserUtils.java | 48 ---- .../index/query/TopChildrenQueryParser.java | 3 - .../elasticsearch/index/shard/IndexShard.java | 11 - .../index/translog/Translog.java | 2 + .../rest/action/RestActionModule.java | 2 - .../RestDeleteByQueryAction.java | 103 --------- .../action/IndicesRequestTests.java | 14 -- .../aliases/IndexAliasesTests.java | 20 -- .../BasicBackwardsCompatibilityTest.java | 32 --- ...leteByQueryBackwardsCompatibilityTest.java | 114 --------- .../deleteByQuery/DeleteByQueryTests.java | 204 ---------------- .../document/DocumentActionsTests.java | 18 -- .../document/ShardInfoTests.java | 12 - .../IndicesOptionsIntegrationTests.java | 20 -- .../nested/SimpleNestedTests.java | 118 +--------- ...DestructiveOperationsIntegrationTests.java | 34 --- .../routing/AliasRoutingTests.java | 18 -- .../routing/SimpleRoutingTests.java | 16 -- .../child/SimpleChildQuerySearchTests.java | 137 ----------- .../stresstest/search1/Search1StressTest.java | 43 ---- 51 files changed, 12 insertions(+), 2401 deletions(-) delete mode 100644 rest-api-spec/api/delete_by_query.json delete mode 100644 rest-api-spec/test/delete_by_query/10_basic.yaml delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java delete mode 100644 src/main/java/org/elasticsearch/action/deletebyquery/package-info.java delete mode 100644 src/main/java/org/elasticsearch/index/query/QueryParserUtils.java delete mode 100644 src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java delete mode 100644 src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java delete mode 100644 src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java diff --git a/rest-api-spec/api/delete_by_query.json b/rest-api-spec/api/delete_by_query.json deleted file mode 100644 index a91b7be2c95..00000000000 --- a/rest-api-spec/api/delete_by_query.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "delete_by_query": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html", - "methods": ["DELETE"], - "url": { - "path": "/{index}/_query", - "paths": ["/{index}/_query", "/{index}/{type}/_query"], - "parts": { - "index": { - "type" : "list", - "required": true, - "description" : "A comma-separated list of indices to restrict the operation; use `_all` to perform the operation on all indices" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of types to restrict the operation" - } - }, - "params": { - "analyzer": { - "type" : "string", - "description" : "The analyzer to use for the query string" - }, - "consistency": { - "type" : "enum", - "options" : ["one", "quorum", "all"], - "description" : "Specific write consistency setting for the operation" - }, - "default_operator": { - "type" : "enum", - "options" : ["AND","OR"], - "default" : "OR", - "description" : "The default operator for query string query (AND or OR)" - }, - "df": { - "type" : "string", - "description" : "The field to use as default where no field prefix is given in the query string" - }, - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "q": { - "type" : "string", - "description" : "Query in the Lucene query string syntax" - }, - "routing": { - "type" : "string", - "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - } - } - }, - "body": { - "description" : "A query to restrict the operation specified with the Query DSL" - } - } -} diff --git a/rest-api-spec/test/delete_by_query/10_basic.yaml b/rest-api-spec/test/delete_by_query/10_basic.yaml deleted file mode 100644 index c253ad8d276..00000000000 --- a/rest-api-spec/test/delete_by_query/10_basic.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -"Basic delete_by_query": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 2 - body: { foo: baz } - - - do: - index: - index: test_1 - type: test - id: 3 - body: { foo: foo } - - - do: - indices.refresh: {} - - - do: - delete_by_query: - index: test_1 - body: - query: - match: - foo: bar - - - do: - indices.refresh: {} - - - do: - count: - index: test_1 - - - match: { count: 2 } diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 7e596dd5a93..3046d15418f 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -124,10 +124,6 @@ import org.elasticsearch.action.count.CountAction; import org.elasticsearch.action.count.TransportCountAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.TransportExistsAction; import org.elasticsearch.action.explain.ExplainAction; @@ -284,8 +280,6 @@ public class ActionModule extends AbstractModule { TransportShardMultiGetAction.class); registerAction(BulkAction.INSTANCE, TransportBulkAction.class, TransportShardBulkAction.class); - registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class, - TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class); registerAction(SearchAction.INSTANCE, TransportSearchAction.class, TransportSearchDfsQueryThenFetchAction.class, TransportSearchQueryThenFetchAction.class, diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java deleted file mode 100644 index 8a35aef6818..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; - -/** - */ -public class DeleteByQueryAction extends ClientAction { - - public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction(); - public static final String NAME = "indices:data/write/delete/by_query"; - - private DeleteByQueryAction() { - super(NAME); - } - - @Override - public DeleteByQueryResponse newResponse() { - return new DeleteByQueryResponse(); - } - - @Override - public DeleteByQueryRequestBuilder newRequestBuilder(Client client) { - return new DeleteByQueryRequestBuilder(client); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java deleted file mode 100644 index 7386c9e9449..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import com.google.common.base.Charsets; -import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request to delete all documents that matching a specific query. Best created with - * {@link org.elasticsearch.client.Requests#deleteByQueryRequest(String...)}. - *

    - *

    The request requires the source to be set either using {@link #source(QuerySourceBuilder)}, - * or {@link #source(byte[])}. - * - * @see DeleteByQueryResponse - * @see org.elasticsearch.client.Requests#deleteByQueryRequest(String...) - * @see org.elasticsearch.client.Client#deleteByQuery(DeleteByQueryRequest) - */ -public class DeleteByQueryRequest extends IndicesReplicationOperationRequest { - - private BytesReference source; - - private String[] types = Strings.EMPTY_ARRAY; - @Nullable - private String routing; - - /** - * Constructs a new delete by query request to run against the provided indices. No indices means - * it will run against all indices. - */ - public DeleteByQueryRequest(String... indices) { - this.indices = indices; - } - - public DeleteByQueryRequest() { - } - - /** - * Copy constructor that creates a new delete by query request that is a copy of the one provided as an argument. - * The new request will inherit though headers and context from the original request that caused it. - */ - public DeleteByQueryRequest(ActionRequest originalRequest) { - super(originalRequest); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); - if (source == null) { - validationException = addValidationError("source is missing", validationException); - } - return validationException; - } - - /** - * The source to execute. - */ - public BytesReference source() { - return source; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(QuerySourceBuilder sourceBuilder) { - this.source = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE); - return this; - } - - /** - * The source to execute. It is preferable to use either {@link #source(byte[])} - * or {@link #source(QuerySourceBuilder)}. - */ - public DeleteByQueryRequest source(String query) { - this.source = new BytesArray(query.getBytes(Charsets.UTF_8)); - return this; - } - - /** - * The source to execute in the form of a map. - */ - @SuppressWarnings("unchecked") - public DeleteByQueryRequest source(Map source) { - try { - XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE); - builder.map(source); - return source(builder); - } catch (IOException e) { - throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); - } - } - - public DeleteByQueryRequest source(XContentBuilder builder) { - this.source = builder.bytes(); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(byte[] source) { - return source(source, 0, source.length); - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(byte[] source, int offset, int length) { - return source(new BytesArray(source, offset, length)); - } - - public DeleteByQueryRequest source(BytesReference source) { - this.source = source; - return this; - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public String[] types() { - return this.types; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public String routing() { - return this.routing; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public DeleteByQueryRequest routing(String routing) { - this.routing = routing; - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public DeleteByQueryRequest routing(String... routings) { - this.routing = Strings.arrayToCommaDelimitedString(routings); - return this; - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public DeleteByQueryRequest types(String... types) { - this.types = types; - return this; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - source = in.readBytesReference(); - routing = in.readOptionalString(); - types = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBytesReference(source); - out.writeOptionalString(routing); - out.writeStringArray(types); - } - - @Override - public String toString() { - String sSource = "_na_"; - try { - sSource = XContentHelper.convertToJson(source, false); - } catch (Exception e) { - // ignore - } - return "[" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "], source[" + sSource + "]"; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java deleted file mode 100644 index ea29d3df92a..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequestBuilder; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryBuilder; - -import java.util.Map; - -/** - * - */ -public class DeleteByQueryRequestBuilder extends IndicesReplicationOperationRequestBuilder { - - private QuerySourceBuilder sourceBuilder; - - public DeleteByQueryRequestBuilder(Client client) { - super(client, new DeleteByQueryRequest()); - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public DeleteByQueryRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - - /** - * A comma separated list of routing values to control the shards the action will be executed on. - */ - public DeleteByQueryRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - /** - * The routing values to control the shards that the action will be executed on. - */ - public DeleteByQueryRequestBuilder setRouting(String... routing) { - request.routing(routing); - return this; - } - - - /** - * The query to delete documents for. - * - * @see org.elasticsearch.index.query.QueryBuilders - */ - public DeleteByQueryRequestBuilder setQuery(QueryBuilder queryBuilder) { - sourceBuilder().setQuery(queryBuilder); - return this; - } - - /** - * The source to execute. It is preferable to use either {@link #setSource(byte[])} - * or {@link #setQuery(QueryBuilder)}. - */ - public DeleteByQueryRequestBuilder setSource(String source) { - request().source(source); - return this; - } - - /** - * The source to execute in the form of a map. - */ - public DeleteByQueryRequestBuilder setSource(Map source) { - request().source(source); - return this; - } - - /** - * The source to execute in the form of a builder. - */ - public DeleteByQueryRequestBuilder setSource(XContentBuilder builder) { - request().source(builder); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(byte[] source) { - request().source(source); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(BytesReference source) { - request().source(source); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(byte[] source, int offset, int length) { - request().source(source, offset, length); - return this; - } - - @Override - public DeleteByQueryRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) { - request.consistencyLevel(consistencyLevel); - return this; - } - - @Override - protected void doExecute(ActionListener listener) { - if (sourceBuilder != null) { - request.source(sourceBuilder); - } - - client.deleteByQuery(request, listener); - } - - private QuerySourceBuilder sourceBuilder() { - if (sourceBuilder == null) { - sourceBuilder = new QuerySourceBuilder(); - } - return sourceBuilder; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java deleted file mode 100644 index b65744f40a2..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import static com.google.common.collect.Maps.newHashMap; - -/** - * The response of delete by query action. Holds the {@link IndexDeleteByQueryResponse}s from all the - * different indices. - */ -public class DeleteByQueryResponse extends ActionResponse implements Iterable { - - private Map indices = newHashMap(); - - DeleteByQueryResponse() { - - } - - @Override - public Iterator iterator() { - return indices.values().iterator(); - } - - /** - * The responses from all the different indices. - */ - public Map getIndices() { - return indices; - } - - /** - * The response of a specific index. - */ - public IndexDeleteByQueryResponse getIndex(String index) { - return indices.get(index); - } - - public RestStatus status() { - RestStatus status = RestStatus.OK; - for (IndexDeleteByQueryResponse indexResponse : indices.values()) { - if (indexResponse.getShardInfo().status().getStatus() > status.getStatus()) { - status = indexResponse.getShardInfo().status(); - } - } - return status; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse(); - response.readFrom(in); - indices.put(response.getIndex(), response); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(indices.size()); - for (IndexDeleteByQueryResponse indexResponse : indices.values()) { - indexResponse.writeTo(out); - } - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java deleted file mode 100644 index 94c58c8b087..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; - -import java.util.Set; - -/** - * Delete by query request to execute on a specific index. - */ -class IndexDeleteByQueryRequest extends IndexReplicationOperationRequest { - - private final BytesReference source; - private final String[] types; - @Nullable - private final Set routing; - @Nullable - private final String[] filteringAliases; - private final long nowInMillis; - - IndexDeleteByQueryRequest(DeleteByQueryRequest request, String index, @Nullable Set routing, @Nullable String[] filteringAliases, - long nowInMillis) { - super(index, request.timeout(), request.consistencyLevel(), request.indices(), request.indicesOptions(), request); - this.source = request.source(); - this.types = request.types(); - this.routing = routing; - this.filteringAliases = filteringAliases; - this.nowInMillis = nowInMillis; - } - - BytesReference source() { - return source; - } - - Set routing() { - return this.routing; - } - - String[] types() { - return this.types; - } - - String[] filteringAliases() { - return filteringAliases; - } - - long nowInMillis() { - return nowInMillis; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java deleted file mode 100644 index 2c8d4001bae..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Delete by query response executed on a specific index. - */ -public class IndexDeleteByQueryResponse extends ActionWriteResponse { - - private String index; - - IndexDeleteByQueryResponse(String index, ShardInfo failures) { - this.index = index; - setShardInfo(failures); - } - - IndexDeleteByQueryResponse() { - } - - /** - * The index the delete by query operation was executed against. - */ - public String getIndex() { - return this.index; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java deleted file mode 100644 index c16bf57e4b0..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * Delete by query request to execute on a specific shard. - */ -public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest { - - private int shardId; - private BytesReference source; - private String[] types = Strings.EMPTY_ARRAY; - @Nullable - private Set routing; - @Nullable - private String[] filteringAliases; - private long nowInMillis; - - private OriginalIndices originalIndices; - - ShardDeleteByQueryRequest(IndexDeleteByQueryRequest request, int shardId) { - super(request); - this.index = request.index(); - this.source = request.source(); - this.types = request.types(); - this.shardId = shardId; - consistencyLevel(request.consistencyLevel()); - timeout = request.timeout(); - this.routing = request.routing(); - filteringAliases = request.filteringAliases(); - nowInMillis = request.nowInMillis(); - this.originalIndices = new OriginalIndices(request); - } - - ShardDeleteByQueryRequest() { - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); - if (source == null) { - addValidationError("source is missing", validationException); - } - return validationException; - } - - public int shardId() { - return this.shardId; - } - - BytesReference source() { - return source; - } - - public String[] types() { - return this.types; - } - - public Set routing() { - return this.routing; - } - - public String[] filteringAliases() { - return filteringAliases; - } - - long nowInMillis() { - return nowInMillis; - } - - @Override - public String[] indices() { - return originalIndices.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return originalIndices.indicesOptions(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - source = in.readBytesReference(); - shardId = in.readVInt(); - types = in.readStringArray(); - int routingSize = in.readVInt(); - if (routingSize > 0) { - routing = new HashSet<>(routingSize); - for (int i = 0; i < routingSize; i++) { - routing.add(in.readString()); - } - } - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - - nowInMillis = in.readVLong(); - originalIndices = OriginalIndices.readOriginalIndices(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBytesReference(source); - out.writeVInt(shardId); - out.writeStringArray(types); - if (routing != null) { - out.writeVInt(routing.size()); - for (String r : routing) { - out.writeString(r); - } - } else { - out.writeVInt(0); - } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - out.writeVLong(nowInMillis); - OriginalIndices.writeOriginalIndices(originalIndices, out); - } - - @Override - public String toString() { - String sSource = "_na_"; - try { - sSource = XContentHelper.convertToJson(source, false); - } catch (Exception e) { - // ignore - } - return "delete_by_query {[" + index + "]" + Arrays.toString(types) + ", query [" + sSource + "]}"; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java deleted file mode 100644 index 63640732d42..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionWriteResponse; - -/** - * Delete by query response executed on a specific shard. - */ -public class ShardDeleteByQueryResponse extends ActionWriteResponse { - -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java deleted file mode 100644 index 8566e32ea26..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - */ -public class TransportDeleteByQueryAction extends TransportIndicesReplicationOperationAction { - - private final DestructiveOperations destructiveOperations; - - @Inject - public TransportDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService, - ThreadPool threadPool, TransportIndexDeleteByQueryAction indexDeleteByQueryAction, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, DeleteByQueryAction.NAME, transportService, clusterService, threadPool, indexDeleteByQueryAction, actionFilters, DeleteByQueryRequest.class); - this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); - } - - @Override - protected void doExecute(DeleteByQueryRequest request, ActionListener listener) { - destructiveOperations.failDestructive(request.indices()); - super.doExecute(request, listener); - } - - @Override - protected Map> resolveRouting(ClusterState clusterState, DeleteByQueryRequest request) throws ElasticsearchException { - return clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - } - - @Override - protected DeleteByQueryResponse newResponseInstance(DeleteByQueryRequest request, AtomicReferenceArray indexResponses) { - DeleteByQueryResponse response = new DeleteByQueryResponse(); - for (int i = 0; i < indexResponses.length(); i++) { - IndexDeleteByQueryResponse indexResponse = (IndexDeleteByQueryResponse) indexResponses.get(i); - if (indexResponse != null) { - response.getIndices().put(indexResponse.getIndex(), indexResponse); - } - } - return response; - } - - @Override - protected boolean accumulateExceptions() { - return false; - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, DeleteByQueryRequest replicationPingRequest) { - return state.blocks().globalBlockedException(ClusterBlockLevel.READ); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, DeleteByQueryRequest request, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.WRITE, concreteIndices); - } - - @Override - protected IndexDeleteByQueryRequest newIndexRequestInstance(DeleteByQueryRequest request, String index, Set routing, long startTimeInMillis) { - String[] filteringAliases = clusterService.state().metaData().filteringAliases(index, request.indices()); - return new IndexDeleteByQueryRequest(request, index, routing, filteringAliases, startTimeInMillis); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java deleted file mode 100644 index 607459e7798..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.List; - -/** - * Internal transport action that broadcasts a delete by query request to all of the shards that belong to an index. - */ -public class TransportIndexDeleteByQueryAction extends TransportIndexReplicationOperationAction { - - private static final String ACTION_NAME = DeleteByQueryAction.NAME + "[index]"; - - @Inject - public TransportIndexDeleteByQueryAction(Settings settings, ClusterService clusterService, - ThreadPool threadPool, TransportShardDeleteByQueryAction shardDeleteByQueryAction, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterService, threadPool, shardDeleteByQueryAction, actionFilters); - } - - @Override - protected IndexDeleteByQueryResponse newResponseInstance(IndexDeleteByQueryRequest request, List shardDeleteByQueryResponses, ActionWriteResponse.ShardInfo shardInfo) { - return new IndexDeleteByQueryResponse(request.index(), shardInfo); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexDeleteByQueryRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, IndexDeleteByQueryRequest request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index()); - } - - @Override - protected GroupShardsIterator shards(IndexDeleteByQueryRequest request) { - return clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.index(), request.routing()); - } - - @Override - protected ShardDeleteByQueryRequest newShardRequestInstance(IndexDeleteByQueryRequest request, int shardId) { - return new ShardDeleteByQueryRequest(request, shardId); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java deleted file mode 100644 index 99add9e6504..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.internal.DefaultSearchContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchLocalRequest; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -/** - * - */ -public class TransportShardDeleteByQueryAction extends TransportShardReplicationOperationAction { - - public final static String DELETE_BY_QUERY_API = "delete_by_query"; - - private static final String ACTION_NAME = DeleteByQueryAction.NAME + "[s]"; - - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; - - @Inject - public TransportShardDeleteByQueryAction(Settings settings, TransportService transportService, - ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, - ShardStateAction shardStateAction, ScriptService scriptService, - PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - ShardDeleteByQueryRequest.class, ShardDeleteByQueryRequest.class, ThreadPool.Names.INDEX); - this.scriptService = scriptService; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays; - } - - @Override - protected boolean checkWriteConsistency() { - return true; - } - - @Override - protected ShardDeleteByQueryResponse newResponseInstance() { - return new ShardDeleteByQueryResponse(); - } - - @Override - protected boolean resolveIndex() { - return false; - } - - @Override - protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { - ShardDeleteByQueryRequest request = shardRequest.request; - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - - SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchLocalRequest(request.types(), request.nowInMillis()), null, - indexShard.acquireSearcher(DELETE_BY_QUERY_API), indexService, indexShard, scriptService, - pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter())); - try { - Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), Engine.Operation.Origin.PRIMARY, request.types()); - SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query())); - indexShard.deleteByQuery(deleteByQuery); - } finally { - try (SearchContext searchContext = SearchContext.current()) { - SearchContext.removeCurrent(); - } - } - return new Tuple<>(new ShardDeleteByQueryResponse(), shardRequest.request); - } - - - @Override - protected void shardOperationOnReplica(ShardId shardId, ShardDeleteByQueryRequest request) { - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardId.id()); - - SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchLocalRequest(request.types(), request.nowInMillis()), null, - indexShard.acquireSearcher(DELETE_BY_QUERY_API, true), indexService, indexShard, scriptService, - pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter())); - try { - Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), Engine.Operation.Origin.REPLICA, request.types()); - SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query())); - indexShard.deleteByQuery(deleteByQuery); - } finally { - try (SearchContext searchContext = SearchContext.current()) { - SearchContext.removeCurrent(); - } - } - } - - @Override - protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { - GroupShardsIterator group = clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.concreteIndex(), request.request().routing()); - for (ShardIterator shardIt : group) { - if (shardIt.shardId().id() == request.request().shardId()) { - return shardIt; - } - } - throw new ElasticsearchIllegalStateException("No shards iterator found for shard [" + request.request().shardId() + "]"); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java b/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java deleted file mode 100644 index a4bb68271da..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Delete by query action. - */ -package org.elasticsearch.action.deletebyquery; \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index 7f763e50d35..973ebf511c3 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -29,9 +29,6 @@ import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.exists.ExistsRequestBuilder; import org.elasticsearch.action.exists.ExistsResponse; @@ -223,29 +220,6 @@ public interface Client extends ElasticsearchClient, Releasable { */ BulkRequestBuilder prepareBulk(); - /** - * Deletes all documents from one or more indices based on a query. - * - * @param request The delete by query request - * @return The result future - * @see Requests#deleteByQueryRequest(String...) - */ - ActionFuture deleteByQuery(DeleteByQueryRequest request); - - /** - * Deletes all documents from one or more indices based on a query. - * - * @param request The delete by query request - * @param listener A listener to be notified with a result - * @see Requests#deleteByQueryRequest(String...) - */ - void deleteByQuery(DeleteByQueryRequest request, ActionListener listener); - - /** - * Deletes all documents from one or more indices based on a query. - */ - DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices); - /** * Gets the document that was indexed from an index with a type and id. * diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index 6fad2682a2a..13fce3326fe 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -52,7 +52,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -112,18 +111,6 @@ public class Requests { return new BulkRequest(); } - /** - * Creates a delete by query request. Note, the query itself must be set either by setting the JSON source - * of the query, or by using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}). - * - * @param indices The indices the delete by query against. Use null or _all to execute against all indices - * @return The delete by query request - * @see org.elasticsearch.client.Client#deleteByQuery(org.elasticsearch.action.deletebyquery.DeleteByQueryRequest) - */ - public static DeleteByQueryRequest deleteByQueryRequest(String... indices) { - return new DeleteByQueryRequest(indices); - } - /** * Creates a get request to get the JSON source from an index based on a type and id. Note, the * {@link GetRequest#type(String)} and {@link GetRequest#id(String)} must be set. diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index ccfab027fd4..7bfdfa14198 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -32,10 +32,6 @@ import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.exists.ExistsRequestBuilder; @@ -172,21 +168,6 @@ public abstract class AbstractClient implements Client { return new BulkRequestBuilder(this); } - @Override - public ActionFuture deleteByQuery(final DeleteByQueryRequest request) { - return execute(DeleteByQueryAction.INSTANCE, request); - } - - @Override - public void deleteByQuery(final DeleteByQueryRequest request, final ActionListener listener) { - execute(DeleteByQueryAction.INSTANCE, request, listener); - } - - @Override - public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) { - return new DeleteByQueryRequestBuilder(this).setIndices(indices); - } - @Override public ActionFuture get(final GetRequest request) { return execute(GetAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 2e4b995bf46..ad42cb764eb 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -29,8 +29,6 @@ import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.get.GetRequest; @@ -352,16 +350,6 @@ public class TransportClient extends AbstractClient { internalClient.bulk(request, listener); } - @Override - public ActionFuture deleteByQuery(DeleteByQueryRequest request) { - return internalClient.deleteByQuery(request); - } - - @Override - public void deleteByQuery(DeleteByQueryRequest request, ActionListener listener) { - internalClient.deleteByQuery(request, listener); - } - @Override public ActionFuture get(GetRequest request) { return internalClient.get(request); diff --git a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 4c4cba24507..ee9f4270c8c 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -78,25 +78,6 @@ public class OperationRouting extends AbstractComponent { return indexRoutingTable(clusterState, index).groupByShardsIt(); } - public GroupShardsIterator deleteByQueryShards(ClusterState clusterState, String index, @Nullable Set routing) throws IndexMissingException { - if (routing == null || routing.isEmpty()) { - return indexRoutingTable(clusterState, index).groupByShardsIt(); - } - - // we use set here and not identity set since we might get duplicates - HashSet set = new HashSet<>(); - IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index); - for (String r : routing) { - int shardId = shardId(clusterState, index, null, null, r); - IndexShardRoutingTable indexShard = indexRouting.shard(shardId); - if (indexShard == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); - } - set.add(indexShard.shardsRandomIt()); - } - return new GroupShardsIterator(Lists.newArrayList(set)); - } - public int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) throws IndexMissingException { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); return shards.size(); diff --git a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java index 9c12348d90d..28b57701349 100644 --- a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java +++ b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java @@ -21,12 +21,9 @@ package org.elasticsearch.index.engine; import org.elasticsearch.index.shard.ShardId; -/** - * - */ public class DeleteByQueryFailedEngineException extends EngineException { public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) { super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index 516313ddd28..01398aa74ee 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -205,6 +205,8 @@ public abstract class Engine implements Closeable { public abstract void delete(Delete delete) throws EngineException; + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated public abstract void delete(DeleteByQuery delete) throws EngineException; final protected GetResult getFromSearcher(Get get) throws EngineException { diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 267a9042ea3..06be412060e 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -537,6 +537,8 @@ public class InternalEngine extends Engine { } } + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated @Override public void delete(DeleteByQuery delete) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 511b9ae9955..30db42f502e 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -117,6 +117,8 @@ public class ShadowEngine extends Engine { throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine"); } + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated @Override public void delete(DeleteByQuery delete) throws EngineException { throw new UnsupportedOperationException(shardId + " delete-by-query operation not allowed on shadow engine"); diff --git a/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java index 29bfe5866b5..8b95e0f132b 100644 --- a/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java +++ b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java @@ -96,12 +96,4 @@ public abstract class IndexingOperationListener { public void postDelete(Engine.Delete delete) { } - - public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - return deleteByQuery; - } - - public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - - } } diff --git a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java index a84010ea366..1207101d6ca 100644 --- a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java +++ b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java @@ -216,19 +216,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent { typeStats(delete.type()).deleteCurrent.dec(); } - public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - for (IndexingOperationListener listener : listeners) { - deleteByQuery = listener.preDeleteByQuery(deleteByQuery); - } - return deleteByQuery; - } - - public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - for (IndexingOperationListener listener : listeners) { - listener.postDeleteByQuery(deleteByQuery); - } - } - public void noopUpdate(String type) { totalStats.noopUpdates.inc(); typeStats(type).noopUpdates.inc(); diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index f77c335577d..486101f741f 100644 --- a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -345,15 +345,5 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple removePercolateQuery(delete.id()); } } - - // Updating the live percolate queries for a delete by query is tricky with the current way delete by queries - // are handled. It is only possible if we put a big lock around the post delete by query hook... - - // If we implement delete by query, that just runs a query and generates delete operations in a bulk, then - // updating the live percolator is automatically supported for delete by query. -// @Override -// public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { -// } } - } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index dc8869921eb..c04e48b8e1e 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -41,8 +41,6 @@ import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -64,7 +62,6 @@ public class HasChildFilterParser implements FilterParser { @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index 48601f84416..058f47d5eb7 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -42,8 +42,6 @@ import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -65,7 +63,6 @@ public class HasChildQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java index 2f549c2f674..fd3335202f3 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java @@ -32,7 +32,6 @@ import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; import static org.elasticsearch.index.query.HasParentQueryParser.createParentQuery; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; /** * @@ -55,7 +54,6 @@ public class HasParentFilterParser implements FilterParser { @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -129,4 +127,4 @@ public class HasParentFilterParser implements FilterParser { return new CustomQueryWrappingFilter(parentQuery); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index 2325d2840e8..9525064647b 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -44,8 +44,6 @@ import java.io.IOException; import java.util.HashSet; import java.util.Set; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - public class HasParentQueryParser implements QueryParser { public static final String NAME = "has_parent"; @@ -64,7 +62,6 @@ public class HasParentQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -206,4 +203,4 @@ public class HasParentQueryParser implements QueryParser { } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java b/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java deleted file mode 100644 index 558722f1daf..00000000000 --- a/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction; -import org.elasticsearch.search.internal.SearchContext; - -/** - */ -public final class QueryParserUtils { - - private QueryParserUtils() { - } - - /** - * Ensures that the query parsing wasn't invoked via the delete by query api. - */ - public static void ensureNotDeleteByQuery(String name, QueryParseContext parseContext) { - SearchContext context = SearchContext.current(); - if (context == null) { - // We can't do the api check, because there is no search context. - // Because the delete by query shard transport action sets the search context this isn't an issue. - return; - } - - if (TransportShardDeleteByQueryAction.DELETE_BY_QUERY_API.equals(context.source())) { - throw new QueryParsingException(parseContext.index(), "[" + name + "] query and filter unsupported in delete_by_query api"); - } - } - -} diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index 7c45bb7fc14..a44239e863e 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -35,8 +35,6 @@ import org.elasticsearch.index.search.child.TopChildrenQuery; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -55,7 +53,6 @@ public class TopChildrenQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 546d66cbdaa..e7b2bbf9ec6 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -565,17 +565,6 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types); } - public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException { - writeAllowed(deleteByQuery.origin()); - if (logger.isTraceEnabled()) { - logger.trace("delete_by_query [{}]", deleteByQuery.query()); - } - deleteByQuery = indexingService.preDeleteByQuery(deleteByQuery); - engine().delete(deleteByQuery); - deleteByQuery.endTime(System.nanoTime()); - indexingService.postDeleteByQuery(deleteByQuery); - } - public Engine.GetResult get(Engine.Get get) throws ElasticsearchException { readAllowed(); return engine().get(get); diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java index 5e132304779..7d6a4b0a02a 100644 --- a/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -662,6 +662,8 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { } } + /** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ + @Deprecated static class DeleteByQuery implements Operation { public static final int SERIALIZATION_FORMAT = 2; diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index 874a91f3af2..424d6d0d954 100644 --- a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -81,7 +81,6 @@ import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; import org.elasticsearch.rest.action.bulk.RestBulkAction; import org.elasticsearch.rest.action.cat.*; import org.elasticsearch.rest.action.delete.RestDeleteAction; -import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction; import org.elasticsearch.rest.action.explain.RestExplainAction; import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; import org.elasticsearch.rest.action.get.RestGetAction; @@ -194,7 +193,6 @@ public class RestActionModule extends AbstractModule { bind(RestHeadAction.class).asEagerSingleton(); bind(RestMultiGetAction.class).asEagerSingleton(); bind(RestDeleteAction.class).asEagerSingleton(); - bind(RestDeleteByQueryAction.class).asEagerSingleton(); bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton(); bind(RestSuggestAction.class).asEagerSingleton(); bind(RestTermVectorsAction.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java deleted file mode 100644 index da00073c733..00000000000 --- a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.deletebyquery; - -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.ShardDeleteByQueryRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestActions; -import org.elasticsearch.rest.action.support.RestBuilderListener; - -import static org.elasticsearch.rest.RestRequest.Method.DELETE; - -/** - * - */ -public class RestDeleteByQueryAction extends BaseRestHandler { - - @Inject - public RestDeleteByQueryAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - controller.registerHandler(DELETE, "/{index}/_query", this); - controller.registerHandler(DELETE, "/{index}/{type}/_query", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); - deleteByQueryRequest.listenerThreaded(false); - if (RestActions.hasBodyContent(request)) { - deleteByQueryRequest.source(RestActions.getRestContent(request)); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - deleteByQueryRequest.source(querySourceBuilder); - } - } - deleteByQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - deleteByQueryRequest.timeout(request.paramAsTime("timeout", ShardDeleteByQueryRequest.DEFAULT_TIMEOUT)); - - deleteByQueryRequest.routing(request.param("routing")); - String consistencyLevel = request.param("consistency"); - if (consistencyLevel != null) { - deleteByQueryRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); - } - deleteByQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteByQueryRequest.indicesOptions())); - client.deleteByQuery(deleteByQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeleteByQueryResponse result, XContentBuilder builder) throws Exception { - RestStatus restStatus = result.status(); - builder.startObject(); - builder.startObject(Fields._INDICES); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : result.getIndices().values()) { - builder.startObject(indexDeleteByQueryResponse.getIndex(), XContentBuilder.FieldCaseConversion.NONE); - indexDeleteByQueryResponse.getShardInfo().toXContent(builder, request); - builder.endObject(); - builder.endObject(); - } - builder.endObject(); - return new BytesRestResponse(restStatus, builder); - } - }); - } - - static final class Fields { - static final XContentBuilderString _INDICES = new XContentBuilderString("_indices"); - static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful"); - static final XContentBuilderString FAILED = new XContentBuilderString("failed"); - static final XContentBuilderString FAILURES = new XContentBuilderString("failures"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString REASON = new XContentBuilderString("reason"); - } -} diff --git a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java index a62b20cbc99..2fa6da48ce6 100644 --- a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java +++ b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java @@ -61,8 +61,6 @@ import org.elasticsearch.action.count.CountAction; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.explain.ExplainAction; @@ -259,18 +257,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { assertSameIndices(updateRequest, updateShardActions); } - @Test - public void testDeleteByQuery() { - String[] deleteByQueryShardActions = new String[]{DeleteByQueryAction.NAME + "[s]", DeleteByQueryAction.NAME + "[s][r]"}; - interceptTransportActions(deleteByQueryShardActions); - - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(randomIndicesOrAliases()).source(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery())); - internalCluster().clientNodeClient().deleteByQuery(deleteByQueryRequest).actionGet(); - - clearInterceptedActions(); - assertSameIndices(deleteByQueryRequest, deleteByQueryShardActions); - } - @Test public void testBulk() { String[] bulkShardActions = new String[]{BulkAction.NAME + "[s]", BulkAction.NAME + "[s][r]"}; diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 4cdafec11a4..5c6f51c7ea1 100644 --- a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -405,26 +405,6 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { logger.info("--> checking counts before delete"); assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L)); - - logger.info("--> delete by query from a single alias"); - client().prepareDeleteByQuery("bars").setQuery(QueryBuilders.termQuery("name", "test")).get(); - - logger.info("--> verify that only one record was deleted"); - assertThat(client().prepareCount("test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(3L)); - - logger.info("--> delete by query from an aliases pointing to two indices"); - client().prepareDeleteByQuery("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - - logger.info("--> verify that proper records were deleted"); - SearchResponse searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "3", "4", "6", "7", "8"); - - logger.info("--> delete by query from an aliases and an index"); - client().prepareDeleteByQuery("tests", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - - logger.info("--> verify that proper records were deleted"); - searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "4"); } diff --git a/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java index 3a4aaa89feb..beeebdac339 100644 --- a/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java +++ b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java @@ -31,8 +31,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -478,36 +476,6 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa return client().admin().cluster().prepareState().get().getState().nodes().masterNode().getVersion(); } - @Test - public void testDeleteByQuery() throws ExecutionException, InterruptedException { - createIndex("test"); - ensureYellow("test"); - - int numDocs = iterations(10, 50); - IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs + 1]; - for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", "value"); - } - indexRequestBuilders[numDocs] = client().prepareIndex("test", "test", Integer.toString(numDocs)).setSource("field", "other_value"); - indexRandom(true, indexRequestBuilders); - - SearchResponse searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs + 1)); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(QueryBuilders.termQuery("field", "value")).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : deleteByQueryResponse) { - assertThat(indexDeleteByQueryResponse.getIndex(), equalTo("test")); - assertThat(indexDeleteByQueryResponse.getShardInfo().getFailures().length, equalTo(0)); - } - - refresh(); - searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - } - @Test public void testDeleteRoutingRequired() throws ExecutionException, InterruptedException, IOException { createIndexWithAlias(); diff --git a/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java deleted file mode 100644 index a45c080eb68..00000000000 --- a/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.hamcrest.core.Is.is; - -/** - */ -public class ParentChildDeleteByQueryBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest { - - @BeforeClass - public static void checkVersion() { - assumeTrue("parent child queries in delete by query is forbidden from 1.1.2 and up", globalCompatibilityVersion().onOrBefore(Version.V_1_1_1)); - } - - @Override - public void assertAllShardsOnNodes(String index, String pattern) { - super.assertAllShardsOnNodes(index, pattern); - } - - @Override - protected Settings externalNodeSettings(int nodeOrdinal) { - return ImmutableSettings.builder() - .put(super.externalNodeSettings(nodeOrdinal)) - .put("index.translog.disable_flush", true) - .build(); - } - - @Test - public void testHasChild() throws Exception { - assertAcked(prepareCreate("idx") - .setSettings(ImmutableSettings.builder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - .put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()) - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("idx", "parent", "1").setSource("{}")); - requests.add(client().prepareIndex("idx", "child", "1").setParent("1").setSource("{}")); - indexRandom(true, requests); - - SearchResponse response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - - client().prepareDeleteByQuery("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - refresh(); - - response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 0); - - client().prepareIndex("idx", "type", "1").setSource("{}").get(); - assertThat(client().prepareGet("idx", "type", "1").get().isExists(), is(true)); - - backwardsCluster().upgradeAllNodes(); - backwardsCluster().allowOnAllNodes("idx"); - ensureGreen("idx"); - - response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); // The delete by query has failed on recovery so that parent doc is still there - - // But the rest of the recovery did execute, we just skipped over the delete by query with the p/c query. - assertThat(client().prepareGet("idx", "type", "1").get().isExists(), is(true)); - response = client().prepareSearch("idx").setTypes("type").get(); - assertNoFailures(response); - assertHitCount(response, 1); - } - -} diff --git a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java deleted file mode 100644 index 877714681c6..00000000000 --- a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.deleteByQuery; - -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.IndexMissingException; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.hamcrest.Matchers.*; - -@Slow -public class DeleteByQueryTests extends ElasticsearchIntegrationTest { - - @Test - public void testDeleteAllNoIndices() { - client().admin().indices().prepareRefresh().execute().actionGet(); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)); - DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(actionGet.getIndices().size(), equalTo(0)); - } - - @Test - public void testDeleteAllOneIndex() { - String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}"; - final long iters = randomIntBetween(1, 50); - for (int i = 0; i < iters; i++) { - client().prepareIndex("twitter", "tweet", "" + i).setSource(json).execute().actionGet(); - } - refresh(); - SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(iters)); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - - DeleteByQueryResponse response = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(response.status(), equalTo(RestStatus.OK)); - assertSyncShardInfo(response.getIndex("twitter").getShardInfo(), getNumShards("twitter")); - - client().admin().indices().prepareRefresh().execute().actionGet(); - search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(0l)); - } - - @Test - public void testMissing() { - - String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}"; - - client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet(); - - SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(1l)); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setIndices("twitter", "missing"); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - - try { - deleteByQueryRequestBuilder.execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexMissingException e) { - //everything well - } - - deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.lenientExpandOpen()); - DeleteByQueryResponse response = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(response.status(), equalTo(RestStatus.OK)); - assertSyncShardInfo(response.getIndex("twitter").getShardInfo(), getNumShards("twitter")); - - client().admin().indices().prepareRefresh().execute().actionGet(); - search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(0l)); - } - - @Test - public void testFailure() throws Exception { - assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); - - DeleteByQueryResponse response = client().prepareDeleteByQuery(indexOrAlias()) - .setQuery(QueryBuilders.hasChildQuery("type", QueryBuilders.matchAllQuery())) - .execute().actionGet(); - - NumShards twitter = getNumShards("test"); - - assertThat(response.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(response.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(response.getIndex("test").getShardInfo().getFailures().length, equalTo(twitter.numPrimaries)); - assertThat(response.getIndices().size(), equalTo(1)); - assertThat(response.getIndices().get("test").getShardInfo().getFailures().length, equalTo(twitter.numPrimaries)); - for (ActionWriteResponse.ShardInfo.Failure failure : response.getIndices().get("test").getShardInfo().getFailures()) { - assertThat(failure.reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); - assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(failure.shardId(), greaterThan(-1)); - } - } - - @Test - public void testDeleteByFieldQuery() throws Exception { - assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); - int numDocs = scaledRandomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test", "test", Integer.toString(i)) - .setRouting(randomAsciiOfLengthBetween(1, 5)) - .setSource("foo", "bar").get(); - } - refresh(); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(), 1); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs); - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery(indexOrAlias()) - .setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - assertThat(deleteByQueryResponse.getIndex("test"), notNullValue()); - - refresh(); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs - 1); - } - - @Test - public void testDateMath() throws Exception { - index("test", "type", "1", "d", "2013-01-01"); - ensureGreen(); - refresh(); - assertHitCount(client().prepareCount("test").get(), 1); - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.rangeQuery("d").to("now-1h")).get(); - refresh(); - assertHitCount(client().prepareCount("test").get(), 0); - } - - @Test - public void testDeleteByTermQuery() throws ExecutionException, InterruptedException { - createIndex("test"); - ensureGreen(); - - int numDocs = iterations(10, 50); - IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs + 1]; - for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", "value"); - } - indexRequestBuilders[numDocs] = client().prepareIndex("test", "test", Integer.toString(numDocs)).setSource("field", "other_value"); - indexRandom(true, indexRequestBuilders); - - SearchResponse searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo((long)numDocs + 1)); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(QueryBuilders.termQuery("field", "value")).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : deleteByQueryResponse) { - assertThat(indexDeleteByQueryResponse.getIndex(), equalTo("test")); - assertThat(indexDeleteByQueryResponse.getShardInfo().getFailures().length, equalTo(0)); - } - - refresh(); - searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - } - - private static String indexOrAlias() { - return randomBoolean() ? "test" : "alias"; - } - - private void assertSyncShardInfo(ActionWriteResponse.ShardInfo shardInfo, NumShards numShards) { - assertThat(shardInfo.getTotal(), greaterThanOrEqualTo(numShards.totalNumShards)); - // we do not ensure green so just make sure request succeeded at least on all primaries - assertThat(shardInfo.getSuccessful(), greaterThanOrEqualTo(numShards.numPrimaries)); - assertThat(shardInfo.getFailed(), equalTo(0)); - for (ActionWriteResponse.ShardInfo.Failure failure : shardInfo.getFailures()) { - assertThat(failure.status(), equalTo(RestStatus.OK)); - } - } -} diff --git a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java index 06b37172d2f..4de1e72b87b 100644 --- a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java +++ b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -177,23 +176,6 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest { assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(countResponse.getFailedShards(), equalTo(0)); } - - logger.info("Delete by query"); - DeleteByQueryResponse queryResponse = client().prepareDeleteByQuery().setIndices("test").setQuery(termQuery("name", "test2")).execute().actionGet(); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getTotal(), greaterThanOrEqualTo(numShards.totalNumShards)); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getSuccessful(), greaterThanOrEqualTo(numShards.totalNumShards)); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getFailures().length, equalTo(0)); - client().admin().indices().refresh(refreshRequest("test")).actionGet(); - - logger.info("Get [type1/1] and [type1/2], should be empty"); - for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); - assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string())); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); - assertThat("cycle #" + i, getResult.isExists(), equalTo(false)); - assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - } } @Test diff --git a/src/test/java/org/elasticsearch/document/ShardInfoTests.java b/src/test/java/org/elasticsearch/document/ShardInfoTests.java index 22533eaef69..7bd950ca3c0 100644 --- a/src/test/java/org/elasticsearch/document/ShardInfoTests.java +++ b/src/test/java/org/elasticsearch/document/ShardInfoTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.ClusterState; @@ -100,17 +99,6 @@ public class ShardInfoTests extends ElasticsearchIntegrationTest { } } - @Test - public void testDeleteByQuery() throws Exception { - int numPrimaryShards = randomIntBetween(1, 2); - prepareIndex(numPrimaryShards); - IndexDeleteByQueryResponse indexDeleteByQueryResponse = client().prepareDeleteByQuery("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .get().getIndex("idx"); - assertShardInfo(indexDeleteByQueryResponse, numCopies * numPrimaryShards, numNodes * numPrimaryShards); - } - - private void prepareIndex(int numberOfPrimaryShards) throws Exception { prepareIndex(numberOfPrimaryShards, false); } diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index 9f10f503002..265155f74be 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; import org.elasticsearch.action.count.CountRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateSourceBuilder; @@ -85,7 +84,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2"), true); verify(aliasExists("test1", "test2"), true); verify(typesExists("test1", "test2"), true); - verify(deleteByQuery("test1", "test2"), true); verify(percolate("test1", "test2"), true); verify(mpercolate(null, "test1", "test2"), false); verify(suggest("test1", "test2"), true); @@ -108,7 +106,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), true); verify(aliasExists("test1", "test2").setIndicesOptions(options), true); verify(typesExists("test1", "test2").setIndicesOptions(options), true); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), true); verify(percolate("test1", "test2").setIndicesOptions(options), true); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), true); @@ -131,7 +128,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(typesExists("test1", "test2").setIndicesOptions(options), false); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false); verify(percolate("test1", "test2").setIndicesOptions(options), false); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), false); @@ -156,7 +152,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(typesExists("test1", "test2").setIndicesOptions(options), false); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false); verify(percolate("test1", "test2").setIndicesOptions(options), false); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), false); @@ -190,7 +185,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); verify(typesExists("test1").setIndicesOptions(options), true); - verify(deleteByQuery("test1").setIndicesOptions(options), true); verify(percolate("test1").setIndicesOptions(options), true); verify(mpercolate(options, "test1").setIndicesOptions(options), true); verify(suggest("test1").setIndicesOptions(options), true); @@ -213,7 +207,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(mpercolate(options, "test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); @@ -239,7 +232,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(mpercolate(options, "test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); @@ -265,7 +257,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); verify(typesExists("test1").setIndicesOptions(options), true); - verify(deleteByQuery("test1").setIndicesOptions(options), true); verify(percolate("test1").setIndicesOptions(options), true); verify(suggest("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); @@ -287,7 +278,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); @@ -312,7 +302,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); @@ -369,7 +358,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), true); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), true); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -393,7 +381,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); verify(typesExists(indices).setIndicesOptions(options), false); - verify(deleteByQuery(indices).setIndicesOptions(options), false); verify(percolate(indices).setIndicesOptions(options), false); verify(mpercolate(options, indices), false); verify(suggest(indices).setIndicesOptions(options), false); @@ -420,7 +407,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), false); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), false); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -444,7 +430,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), true); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), true); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -468,7 +453,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); verify(typesExists(indices).setIndicesOptions(options), false); - verify(deleteByQuery(indices).setIndicesOptions(options), false); verify(percolate(indices).setIndicesOptions(options), false); verify(mpercolate(options, indices), false); verify(suggest(indices).setIndicesOptions(options), false); @@ -848,10 +832,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest return client().admin().indices().prepareTypesExists(indices).setTypes("dummy"); } - private static DeleteByQueryRequestBuilder deleteByQuery(String... indices) { - return client().prepareDeleteByQuery(indices).setQuery(boolQuery().mustNot(matchAllQuery())); - } - private static PercolateRequestBuilder percolate(String... indices) { return client().preparePercolate().setIndices(indices) .setSource(new PercolateSourceBuilder().setDoc(docBuilder().setDoc("k", "v"))) diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java index fc95131d250..a788964bbfb 100644 --- a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java +++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java @@ -256,113 +256,6 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { } } - @Test - public void simpleNestedDeletedByQuery1() throws Exception { - simpleNestedDeleteByQuery(3, 0); - } - - @Test - public void simpleNestedDeletedByQuery2() throws Exception { - simpleNestedDeleteByQuery(3, 1); - } - - @Test - public void simpleNestedDeletedByQuery3() throws Exception { - simpleNestedDeleteByQuery(3, 2); - } - - private void simpleNestedDeleteByQuery(int total, int docToDelete) throws Exception { - - assertAcked(prepareCreate("test") - .setSettings(settingsBuilder().put(indexSettings()).put("index.referesh_interval", -1).build()) - .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject().endObject().endObject())); - - ensureGreen(); - - for (int i = 0; i < total; i++) { - client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() - .field("field1", "value1") - .startArray("nested1") - .startObject() - .field("n_field1", "n_value1_1") - .field("n_field2", "n_value2_1") - .endObject() - .startObject() - .field("n_field1", "n_value1_2") - .field("n_field2", "n_value2_2") - .endObject() - .endArray() - .endObject()).execute().actionGet(); - } - - - flush(); - assertDocumentCount("test", total * 3); - - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); - flush(); - refresh(); - assertDocumentCount("test", (total * 3l) - 3); - - for (int i = 0; i < total; i++) { - assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete)); - } - } - - @Test - public void noChildrenNestedDeletedByQuery1() throws Exception { - noChildrenNestedDeleteByQuery(3, 0); - } - - @Test - public void noChildrenNestedDeletedByQuery2() throws Exception { - noChildrenNestedDeleteByQuery(3, 1); - } - - @Test - public void noChildrenNestedDeletedByQuery3() throws Exception { - noChildrenNestedDeleteByQuery(3, 2); - } - - private void noChildrenNestedDeleteByQuery(long total, int docToDelete) throws Exception { - - assertAcked(prepareCreate("test") - .setSettings(settingsBuilder().put(indexSettings()).put("index.referesh_interval", -1).build()) - .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject().endObject().endObject())); - - ensureGreen(); - - - for (int i = 0; i < total; i++) { - client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() - .field("field1", "value1") - .endObject()).execute().actionGet(); - } - - - flush(); - refresh(); - - assertDocumentCount("test", total); - - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); - flush(); - refresh(); - assertDocumentCount("test", total - 1); - - for (int i = 0; i < total; i++) { - assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete)); - } - } - @Test public void multiNested() throws Exception { assertAcked(prepareCreate("test") @@ -487,15 +380,6 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { flush(); refresh(); assertDocumentCount("test", 6); - - client().prepareDeleteByQuery("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - flush(); - refresh(); - - // This must be 3, otherwise child docs aren't deleted. - // If this is 5 then only the parent has been removed - assertDocumentCount("test", 3); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); } @Test @@ -1371,4 +1255,4 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java index 9815887f1d1..eda46721e36 100644 --- a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java @@ -110,40 +110,6 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat // end close index: client().admin().indices().prepareDelete("_all").get(); - // delete_by_query: - settings = ImmutableSettings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - - // Should succeed, since no wildcards - client().prepareDeleteByQuery("1index").setQuery(QueryBuilders.matchAllQuery()).get(); - - try { - client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get(); - fail(); - } catch (ElasticsearchIllegalArgumentException e) {} - - try { - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - fail(); - } catch (ElasticsearchIllegalArgumentException e) {} - - settings = ImmutableSettings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get(); - - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - // end delete_by_query: - client().admin().indices().prepareDelete("_all").get(); } } diff --git a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java index 8b4fef56852..8fd37a7804d 100644 --- a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java @@ -102,24 +102,6 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest { assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); } - - logger.info("--> deleting_by_query with 1 as routing, should not delete anything"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet(); - refresh(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - - logger.info("--> deleting_by_query with alias0, should delete"); - client().prepareDeleteByQuery("alias0").setQuery(matchAllQuery()).execute().actionGet(); - refresh(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - } } @Test diff --git a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java index 55b796acb4a..cca1d2125a2 100644 --- a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java @@ -87,22 +87,6 @@ public class SimpleRoutingTests extends ElasticsearchIntegrationTest { for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); } - - logger.info("--> deleting_by_query with 1 as routing, should not delete anything"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - } - - logger.info("--> deleting_by_query with , should delete"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("0").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - } } public void testSimpleSearchRouting() { diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index a536f64260d..a3139e507a6 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.count.CountResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -1379,99 +1378,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(2l)); } - @Test - public void testDeleteByQuery_has_child() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder().put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get(); - client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get(); - client().admin().indices().prepareRefresh().get(); - // p4 will not be found via search api, but will be deleted via delete_by_query api! - client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get(); - client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get(); - client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 2l); - - // Delete by query doesn't support p/c queries. If the delete by query has a different execution mode - // that doesn't rely on IW#deleteByQuery() then this test can be changed. - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - } - - @Test - public void testDeleteByQuery_has_child_SingleRefresh() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().admin().indices().prepareFlush().get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get(); - client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get(); - client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get(); - client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get(); - client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get(); - client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get(); - client().admin().indices().prepareRefresh().get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - } - private QueryBuilder randomHasChild(String type, String field, String value) { if (randomBoolean()) { if (randomBoolean()) { @@ -1484,49 +1390,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } } - @Test - public void testDeleteByQuery_has_parent() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().admin().indices().prepareRefresh().get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertHitCount(searchResponse, 2l); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_parent] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - client().admin().indices().prepareRefresh("test").get(); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertHitCount(searchResponse, 2l); - } - private QueryBuilder randomHasParent(String type, String field, String value) { if (randomBoolean()) { if (randomBoolean()) { diff --git a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java index a677acdcc63..ad03d23e295 100644 --- a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java +++ b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java @@ -63,7 +63,6 @@ public class Search1StressTest { private int numberOfValues = 20; private int numberOfHits = 300; private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000); - private TimeValue deleteByQueryThrottle = TimeValue.timeValueMillis(5000); private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS; @@ -130,11 +129,6 @@ public class Search1StressTest { return this; } - public Search1StressTest setDeleteByQueryThrottle(TimeValue deleteByQueryThrottle) { - this.deleteByQueryThrottle = deleteByQueryThrottle; - return this; - } - public Search1StressTest setSettings(Settings settings) { this.settings = settings; return this; @@ -264,28 +258,6 @@ public class Search1StressTest { } } - private class DeleteByQuery extends Thread { - volatile boolean close = false; - - volatile boolean closed = false; - - @Override - public void run() { - while (true) { - if (close) { - closed = true; - return; - } - try { - client.client().prepareDeleteByQuery().setQuery(termQuery("num", nextNumValue())).execute().actionGet(); - Thread.sleep(deleteByQueryThrottle.millis()); - } catch (Exception e) { - logger.warn("failed to delete_by_query", e); - } - } - } - } - private void indexDoc() throws Exception { XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("num", nextNumValue()) @@ -340,13 +312,6 @@ public class Search1StressTest { flusher.start(); } - DeleteByQuery deleteByQuery = null; - if (deleteByQueryThrottle.millis() > 0) { - deleteByQuery = new DeleteByQuery(); - deleteByQuery.start(); - } - - long testStart = System.currentTimeMillis(); while (true) { @@ -362,10 +327,6 @@ public class Search1StressTest { flusher.close = true; } - if (deleteByQuery != null) { - deleteByQuery.close = true; - } - for (Searcher searcherThread : searcherThreads) { searcherThread.close = true; } @@ -379,9 +340,6 @@ public class Search1StressTest { if (flusher != null && !flusher.closed) { logger.warn("flusher not closed!"); } - if (deleteByQuery != null && !deleteByQuery.closed) { - logger.warn("deleteByQuery not closed!"); - } for (Searcher searcherThread : searcherThreads) { if (!searcherThread.closed) { logger.warn("search thread not closed!"); @@ -410,7 +368,6 @@ public class Search1StressTest { .setIndexerThrottle(TimeValue.timeValueMillis(100)) .setSearchers(10) .setSearcherThrottle(TimeValue.timeValueMillis(10)) - .setDeleteByQueryThrottle(TimeValue.timeValueMillis(-1)) .setFlusherThrottle(TimeValue.timeValueMillis(1000)) .setNumberOfIndices(10) .setNumberOfTypes(5) From 0a61d03ea2b7a1f3f8507534e268dfc51144f07d Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Tue, 28 Apr 2015 22:49:49 +0200 Subject: [PATCH 180/236] Remove index/indices replication infra code now that delete by query is out, we don't need this infrastructure code. The delete by query will be implenented as a plugin, with scan scroll + bulk delete, so it will not need this infra anyhow --- .../IndexReplicationOperationRequest.java | 87 -------- .../IndicesReplicationOperationRequest.java | 140 ------------- ...cesReplicationOperationRequestBuilder.java | 80 -------- ...nsportIndexReplicationOperationAction.java | 194 ------------------ ...portIndicesReplicationOperationAction.java | 126 ------------ 5 files changed, 627 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java delete mode 100644 src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java delete mode 100644 src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java delete mode 100644 src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java deleted file mode 100644 index 008008b1e6c..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; - -/** - * Request used within {@link org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction}. - * Since the corresponding action is internal that gets always executed locally, this request never gets sent over the transport. - * The specified index is expected to be a concrete index. Relies on input validation done by the caller actions. - */ -public abstract class IndexReplicationOperationRequest extends ActionRequest implements IndicesRequest { - - private final TimeValue timeout; - private final String index; - private final WriteConsistencyLevel consistencyLevel; - private final OriginalIndices originalIndices; - - protected IndexReplicationOperationRequest(String index, TimeValue timeout, WriteConsistencyLevel consistencyLevel, - String[] originalIndices, IndicesOptions originalIndicesOptions, ActionRequest request) { - super(request); - this.index = index; - this.timeout = timeout; - this.consistencyLevel = consistencyLevel; - this.originalIndices = new OriginalIndices(originalIndices, originalIndicesOptions); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - public TimeValue timeout() { - return timeout; - } - - public String index() { - return this.index; - } - - @Override - public String[] indices() { - return originalIndices.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return originalIndices.indicesOptions(); - } - - public WriteConsistencyLevel consistencyLevel() { - return this.consistencyLevel; - } - - @Override - public final void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("IndexReplicationOperationRequest is not supposed to be sent over the transport"); - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("IndexReplicationOperationRequest is not supposed to be sent over the transport"); - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java deleted file mode 100644 index 5113628fa6f..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; - -/** - * - */ -public abstract class IndicesReplicationOperationRequest extends ActionRequest implements IndicesRequest.Replaceable { - - protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT; - protected String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); - - protected WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; - - public TimeValue timeout() { - return timeout; - } - - protected IndicesReplicationOperationRequest() { - } - - protected IndicesReplicationOperationRequest(ActionRequest actionRequest) { - super(actionRequest); - } - - /** - * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final T timeout(TimeValue timeout) { - this.timeout = timeout; - return (T) this; - } - - /** - * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public T timeout(String timeout) { - this.timeout = TimeValue.parseTimeValue(timeout, null); - return (T) this; - } - - @Override - public String[] indices() { - return this.indices; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - @SuppressWarnings("unchecked") - public T indicesOptions(IndicesOptions indicesOptions) { - if (indicesOptions == null) { - throw new IllegalArgumentException("IndicesOptions must not be null"); - } - this.indicesOptions = indicesOptions; - return (T) this; - } - - /** - * The indices the request will execute against. - */ - @SuppressWarnings("unchecked") - @Override - public final T indices(String[] indices) { - this.indices = indices; - return (T) this; - } - - public WriteConsistencyLevel consistencyLevel() { - return this.consistencyLevel; - } - - /** - * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} - */ - @SuppressWarnings("unchecked") - public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) { - if (consistencyLevel == null) { - throw new IllegalArgumentException("WriteConsistencyLevel must not be null"); - } - this.consistencyLevel = consistencyLevel; - return (T) this; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - consistencyLevel = WriteConsistencyLevel.fromId(in.readByte()); - timeout = TimeValue.readTimeValue(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeByte(consistencyLevel.id()); - timeout.writeTo(out); - out.writeStringArrayNullable(indices); - indicesOptions.writeIndicesOptions(out); - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java deleted file mode 100644 index 75598a6d295..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.unit.TimeValue; - -/** - */ -public abstract class IndicesReplicationOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends IndicesReplicationOperationRequestBuilder> - extends ActionRequestBuilder { - - protected IndicesReplicationOperationRequestBuilder(Client client, Request request) { - super(client, request); - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(String timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - @SuppressWarnings("unchecked") - public final RequestBuilder setIndices(String... indices) { - request.indices(indices); - return (RequestBuilder) this; - } - - /** - * Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions. - * For example indices that don't exist. - */ - @SuppressWarnings("unchecked") - public RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request().indicesOptions(indicesOptions); - return (RequestBuilder) this; - } - - /** - * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} - */ - @SuppressWarnings("unchecked") - public RequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) { - request.consistencyLevel(consistencyLevel); - return (RequestBuilder) this; - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java deleted file mode 100644 index 5d0cba209d0..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportIndexReplicationOperationAction.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.ActionWriteResponse.ShardInfo.Failure; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - * Internal transport action that executes on multiple shards, doesn't register any transport handler as it is always executed locally. - * It relies on a shard sub-action that gets sent over the transport and executed on each of the shard. - * The index provided with the request is expected to be a concrete index, properly resolved by the callers (parent actions). - */ -public abstract class TransportIndexReplicationOperationAction - extends TransportAction { - - protected final ClusterService clusterService; - - protected final TransportShardReplicationOperationAction shardAction; - - protected TransportIndexReplicationOperationAction(Settings settings, String actionName, ClusterService clusterService, - ThreadPool threadPool, TransportShardReplicationOperationAction shardAction, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); - this.clusterService = clusterService; - this.shardAction = shardAction; - } - - @Override - protected void doExecute(final Request request, final ActionListener listener) { - ClusterState clusterState = clusterService.state(); - ClusterBlockException blockException = checkGlobalBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - blockException = checkRequestBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - - final GroupShardsIterator groups; - try { - groups = shards(request); - } catch (Throwable e) { - listener.onFailure(e); - return; - } - final AtomicInteger indexCounter = new AtomicInteger(); - final AtomicInteger failureCounter = new AtomicInteger(); - final AtomicInteger completionCounter = new AtomicInteger(groups.size()); - final AtomicReferenceArray shardsResponses = new AtomicReferenceArray<>(groups.size()); - - for (final ShardIterator shardIt : groups) { - final ShardRequest shardRequest = newShardRequestInstance(request, shardIt.shardId().id()); - shardRequest.operationThreaded(true); - // no need for threaded listener, we will fork when its done based on the index request - shardRequest.listenerThreaded(false); - shardAction.execute(shardRequest, new ActionListener() { - @Override - public void onResponse(ShardResponse result) { - shardsResponses.set(indexCounter.getAndIncrement(), new ShardActionResult(result)); - returnIfNeeded(); - } - - @Override - public void onFailure(Throwable e) { - failureCounter.getAndIncrement(); - int index = indexCounter.getAndIncrement(); - // this is a failure for an entire shard group, constructs shard info accordingly - final RestStatus status = ExceptionsHelper.status(e); - Failure failure = new Failure(request.index(), shardIt.shardId().id(), null, e, status, true); - shardsResponses.set(index, new ShardActionResult(new ActionWriteResponse.ShardInfo(shardIt.size(), 0, failure))); - returnIfNeeded(); - } - - private void returnIfNeeded() { - if (completionCounter.decrementAndGet() == 0) { - List responses = new ArrayList<>(); - List failureList = new ArrayList<>(); - - int total = 0; - int successful = 0; - for (int i = 0; i < shardsResponses.length(); i++) { - ShardActionResult shardActionResult = shardsResponses.get(i); - final ActionWriteResponse.ShardInfo sf; - if (shardActionResult.isFailure()) { - assert shardActionResult.shardInfoOnFailure != null; - sf = shardActionResult.shardInfoOnFailure; - } else { - responses.add(shardActionResult.shardResponse); - sf = shardActionResult.shardResponse.getShardInfo(); - } - total += sf.getTotal(); - successful += sf.getSuccessful(); - failureList.addAll(Arrays.asList(sf.getFailures())); - } - assert failureList.size() == 0 || numShardGroupFailures(failureList) == failureCounter.get(); - - final Failure[] failures; - if (failureList.isEmpty()) { - failures = ActionWriteResponse.EMPTY; - } else { - failures = failureList.toArray(new Failure[failureList.size()]); - } - listener.onResponse(newResponseInstance(request, responses, new ActionWriteResponse.ShardInfo(total, successful, failures))); - } - } - - private int numShardGroupFailures(List failures) { - int numShardGroupFailures = 0; - for (Failure failure : failures) { - if (failure.primary()) { - numShardGroupFailures++; - } - } - return numShardGroupFailures; - } - }); - - } - } - - protected abstract Response newResponseInstance(Request request, List shardResponses, ActionWriteResponse.ShardInfo shardInfo); - - protected abstract GroupShardsIterator shards(Request request) throws ElasticsearchException; - - protected abstract ShardRequest newShardRequestInstance(Request request, int shardId); - - protected ClusterBlockException checkGlobalBlock(ClusterState state, Request request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - } - - protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index()); - } - - private class ShardActionResult { - - private final ShardResponse shardResponse; - private final ActionWriteResponse.ShardInfo shardInfoOnFailure; - - private ShardActionResult(ShardResponse shardResponse) { - assert shardResponse != null; - this.shardResponse = shardResponse; - this.shardInfoOnFailure = null; - } - - private ShardActionResult(ActionWriteResponse.ShardInfo shardInfoOnFailure) { - assert shardInfoOnFailure != null; - this.shardInfoOnFailure = shardInfoOnFailure; - this.shardResponse = null; - } - - boolean isFailure() { - return shardInfoOnFailure != null; - } - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java deleted file mode 100644 index 0c76ebf8f58..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - */ -public abstract class TransportIndicesReplicationOperationAction - extends HandledTransportAction { - - protected final ClusterService clusterService; - - protected final TransportIndexReplicationOperationAction indexAction; - - protected TransportIndicesReplicationOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - TransportIndexReplicationOperationAction indexAction, ActionFilters actionFilters, - Class request) { - super(settings, actionName, threadPool, transportService, actionFilters, request); - this.clusterService = clusterService; - this.indexAction = indexAction; - } - - - protected abstract Map> resolveRouting(ClusterState clusterState, Request request) throws ElasticsearchException; - - @Override - protected void doExecute(final Request request, final ActionListener listener) { - ClusterState clusterState = clusterService.state(); - ClusterBlockException blockException = checkGlobalBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - // get actual indices - String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); - blockException = checkRequestBlock(clusterState, request, concreteIndices); - if (blockException != null) { - throw blockException; - } - - final AtomicInteger indexCounter = new AtomicInteger(); - final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); - final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); - final long startTimeInMillis = System.currentTimeMillis(); - - Map> routingMap = resolveRouting(clusterState, request); - if (concreteIndices.length == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } else { - for (final String index : concreteIndices) { - Set routing = null; - if (routingMap != null) { - routing = routingMap.get(index); - } - IndexRequest indexRequest = newIndexRequestInstance(request, index, routing, startTimeInMillis); - // no threading needed, all is done on the index replication one - indexRequest.listenerThreaded(false); - indexAction.execute(indexRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse result) { - indexResponses.set(indexCounter.getAndIncrement(), result); - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } - } - - @Override - public void onFailure(Throwable e) { - int index = indexCounter.getAndIncrement(); - if (accumulateExceptions()) { - indexResponses.set(index, e); - } - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } - } - }); - } - } - } - - protected abstract Response newResponseInstance(Request request, AtomicReferenceArray indexResponses); - - protected abstract IndexRequest newIndexRequestInstance(Request request, String index, Set routing, long startTimeInMillis); - - protected abstract boolean accumulateExceptions(); - - protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request); - - protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices); -} From f6d8b12796a1724cb3c323ccaf17e964af880ee5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 22 Apr 2015 16:38:07 -0700 Subject: [PATCH 181/236] Mappings: Explicitly disallow multi fields from using object or nested fields Multi fields currently parse any field type passed in. However, they were only intended to support copying simple values from the outter field. This change adds validation to ensure object and nested fields are not used within multi fields. closes #10745 --- .../mapper/core/AbstractFieldMapper.java | 33 ++++++++++--------- .../index/mapper/core/TypeParsers.java | 4 +++ .../mapper/multifield/MultiFieldTests.java | 27 +++++++++++++++ 3 files changed, 48 insertions(+), 16 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index c18e7c06563..654e203d14d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -837,7 +837,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { public static class MultiFields { public static MultiFields empty() { - return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.of()); + return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.of()); } public static class Builder { @@ -860,7 +860,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { if (pathType == Defaults.PATH_TYPE && mapperBuilders.isEmpty()) { return empty(); } else if (mapperBuilders.isEmpty()) { - return new MultiFields(pathType, ImmutableOpenMap.of()); + return new MultiFields(pathType, ImmutableOpenMap.of()); } else { ContentPath.Type origPathType = context.path().pathType(); context.path().pathType(pathType); @@ -869,26 +869,27 @@ public abstract class AbstractFieldMapper implements FieldMapper { for (ObjectObjectCursor cursor : this.mapperBuilders) { String key = cursor.key; Mapper.Builder value = cursor.value; - mapperBuilders.put(key, value.build(context)); + Mapper mapper = value.build(context); + assert mapper instanceof FieldMapper; + mapperBuilders.put(key, mapper); } context.path().remove(); context.path().pathType(origPathType); - ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); + ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); return new MultiFields(pathType, mappers.build()); } } - } private final ContentPath.Type pathType; - private volatile ImmutableOpenMap mappers; + private volatile ImmutableOpenMap mappers; - public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { + public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { this.pathType = pathType; this.mappers = mappers; // we disable the all in multi-field mappers - for (ObjectCursor cursor : mappers.values()) { - Mapper mapper = cursor.value; + for (ObjectCursor cursor : mappers.values()) { + FieldMapper mapper = cursor.value; if (mapper instanceof AllFieldMapper.IncludeInAll) { ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); } @@ -906,7 +907,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { context.path().pathType(pathType); context.path().add(mainField.name()); - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.parse(context); } context.path().remove(); @@ -918,10 +919,10 @@ public abstract class AbstractFieldMapper implements FieldMapper { AbstractFieldMapper mergeWithMultiField = (AbstractFieldMapper) mergeWith; List> newFieldMappers = null; - ImmutableOpenMap.Builder newMappersBuilder = null; + ImmutableOpenMap.Builder newMappersBuilder = null; - for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { - Mapper mergeWithMapper = cursor.value; + for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { + FieldMapper mergeWithMapper = cursor.value; Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name()); if (mergeIntoMapper == null) { // no mapping, simply add it if not simulating @@ -938,7 +939,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { if (newFieldMappers == null) { newFieldMappers = new ArrayList<>(2); } - newFieldMappers.add((FieldMapper) mergeWithMapper); + newFieldMappers.add(mergeWithMapper); } } } else { @@ -957,13 +958,13 @@ public abstract class AbstractFieldMapper implements FieldMapper { } public void traverse(FieldMapperListener fieldMapperListener) { - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.traverse(fieldMapperListener); } } public void close() { - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.close(); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index cfed59be20f..20f9eda9b26 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.object.ObjectMapper; import java.util.ArrayList; import java.util.Collections; @@ -332,6 +333,9 @@ public class TypeParsers { } else { throw new MapperParsingException("No type specified for property [" + multiFieldName + "]"); } + if (type.equals(ObjectMapper.CONTENT_TYPE) || type.equals(ObjectMapper.NESTED_CONTENT_TYPE)) { + throw new MapperParsingException("Type [" + type + "] cannot be used in multi field"); + } Mapper.TypeParser typeParser = parserContext.typeParser(type); if (typeParser == null) { diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index 8a54985f0ce..75c58a7bf8f 100644 --- a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.core.*; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; @@ -481,4 +482,30 @@ public class MultiFieldTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper2 = parser.parse(docMapper.mappingSource().string()); assertThat(docMapper.mappingSource(), equalTo(docMapper2.mappingSource())); } + + public void testObjectFieldNotAllowed() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") + .field("type", "string").startObject("fields").startObject("multi").field("type", "object").endObject().endObject() + .endObject().endObject().endObject().endObject().string(); + final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + try { + parser.parse(mapping); + fail("expected mapping parse failure"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("cannot be used in multi field")); + } + } + + public void testNestedFieldNotAllowed() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") + .field("type", "string").startObject("fields").startObject("multi").field("type", "nested").endObject().endObject() + .endObject().endObject().endObject().endObject().string(); + final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + try { + parser.parse(mapping); + fail("expected mapping parse failure"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("cannot be used in multi field")); + } + } } From 99584deae885d1b79f085f200facfe4cb690bd7b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sat, 25 Apr 2015 00:15:48 -0700 Subject: [PATCH 182/236] Mappings: Consolidate document parsing logic The code to parse a document was spread across 3 different classes, and depended on traversing the ObjectMapper hiearchy. This change consolidates all the doc parsing code into a new DocumentParser. This should allow adding unit tests (future issue) for document parsing so the logic can be simplified. All code was copied directly for this change with only minor modifications to make it work within the new location. closes #10802 --- .../index/mapper/DocumentMapper.java | 149 +--- .../index/mapper/DocumentParser.java | 701 ++++++++++++++++++ .../index/mapper/FieldMapper.java | 7 + .../elasticsearch/index/mapper/Mapper.java | 7 - .../index/mapper/MapperUtils.java | 13 +- .../mapper/core/AbstractFieldMapper.java | 79 +- .../index/mapper/object/ObjectMapper.java | 452 +---------- .../index/mapper/object/RootObjectMapper.java | 5 - .../DynamicMappingIntegrationTests.java | 3 +- .../{dynamic => }/DynamicMappingTests.java | 13 +- .../mapper/externalvalues/ExternalMapper.java | 9 +- .../externalvalues/ExternalRootMapper.java | 5 - .../mapper/merge/TestMergeMapperTests.java | 2 +- 13 files changed, 740 insertions(+), 705 deletions(-) create mode 100644 src/main/java/org/elasticsearch/index/mapper/DocumentParser.java rename src/test/java/org/elasticsearch/index/mapper/{dynamic => }/DynamicMappingIntegrationTests.java (98%) rename src/test/java/org/elasticsearch/index/mapper/{dynamic => }/DynamicMappingTests.java (96%) diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 205633289aa..51b8119e59c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -21,15 +21,10 @@ package org.elasticsearch.index.mapper; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; - import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; -import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; @@ -46,8 +41,6 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.Mapping.SourceTransform; @@ -75,12 +68,10 @@ import org.elasticsearch.script.ScriptService.ScriptType; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -172,27 +163,15 @@ public class DocumentMapper implements ToXContent { } } - - private CloseableThreadLocal cache = new CloseableThreadLocal() { - @Override - protected ParseContext.InternalParseContext initialValue() { - return new ParseContext.InternalParseContext(index, indexSettings, docMapperParser, DocumentMapper.this, new ContentPath(0)); - } - }; - - private final String index; - - private final Settings indexSettings; - private final String type; private final StringAndBytesText typeText; - private final DocumentMapperParser docMapperParser; - private volatile CompressedString mappingSource; private final Mapping mapping; + private final DocumentParser documentParser; + private volatile DocumentFieldMappers fieldMappers; private volatile ImmutableMap objectMappers = ImmutableMap.of(); @@ -211,16 +190,14 @@ public class DocumentMapper implements ToXContent { RootObjectMapper rootObjectMapper, ImmutableMap meta, Map, RootMapper> rootMappers, List sourceTransforms) { - this.index = index; - this.indexSettings = indexSettings; this.type = rootObjectMapper.name(); this.typeText = new StringAndBytesText(this.type); - this.docMapperParser = docMapperParser; this.mapping = new Mapping( rootObjectMapper, rootMappers.values().toArray(new RootMapper[rootMappers.values().size()]), sourceTransforms.toArray(new SourceTransform[sourceTransforms.size()]), meta); + this.documentParser = new DocumentParser(index, indexSettings, docMapperParser, this); this.typeFilter = typeMapper().termFilter(type, null); @@ -364,109 +341,13 @@ public class DocumentMapper implements ToXContent { } public ParsedDocument parse(SourceToParse source) throws MapperParsingException { - return parse(source, null); + return documentParser.parseDocument(source, null); } + // NOTE: do not use this method, it will be removed in the future once + // https://github.com/elastic/elasticsearch/issues/10736 is done (MLT api is the only user of this listener) public ParsedDocument parse(SourceToParse source, @Nullable ParseListener listener) throws MapperParsingException { - ParseContext.InternalParseContext context = cache.get(); - - if (source.type() != null && !source.type().equals(this.type)) { - throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + this.type + "]"); - } - source.type(this.type); - - XContentParser parser = source.parser(); - try { - if (parser == null) { - parser = XContentHelper.createParser(source.source()); - } - if (mapping.sourceTransforms.length > 0) { - parser = transform(parser); - } - context.reset(parser, new ParseContext.Document(), source, listener); - - // will result in START_OBJECT - int countDownTokens = 0; - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new MapperParsingException("Malformed content, must start with an object"); - } - boolean emptyDoc = false; - token = parser.nextToken(); - if (token == XContentParser.Token.END_OBJECT) { - // empty doc, we can handle it... - emptyDoc = true; - } else if (token != XContentParser.Token.FIELD_NAME) { - throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); - } - - for (RootMapper rootMapper : mapping.rootMappers) { - rootMapper.preParse(context); - } - - if (!emptyDoc) { - Mapper update = mapping.root.parse(context); - if (update != null) { - context.addDynamicMappingsUpdate((RootObjectMapper) update); - } - } - - for (int i = 0; i < countDownTokens; i++) { - parser.nextToken(); - } - - for (RootMapper rootMapper : mapping.rootMappers) { - rootMapper.postParse(context); - } - } catch (Throwable e) { - // if its already a mapper parsing exception, no need to wrap it... - if (e instanceof MapperParsingException) { - throw (MapperParsingException) e; - } - - // Throw a more meaningful message if the document is empty. - if (source.source() != null && source.source().length() == 0) { - throw new MapperParsingException("failed to parse, document is empty"); - } - - throw new MapperParsingException("failed to parse", e); - } finally { - // only close the parser when its not provided externally - if (source.parser() == null && parser != null) { - parser.close(); - } - } - // reverse the order of docs for nested docs support, parent should be last - if (context.docs().size() > 1) { - Collections.reverse(context.docs()); - } - // apply doc boost - if (context.docBoost() != 1.0f) { - Set encounteredFields = Sets.newHashSet(); - for (ParseContext.Document doc : context.docs()) { - encounteredFields.clear(); - for (IndexableField field : doc) { - if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) { - if (!encounteredFields.contains(field.name())) { - ((Field) field).setBoost(context.docBoost() * field.boost()); - encounteredFields.add(field.name()); - } - } - } - } - } - - Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); - Mapping update = null; - if (rootDynamicUpdate != null) { - update = mapping.mappingUpdate(rootDynamicUpdate); - } - - ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), - context.source(), update).parent(source.parent()); - // reset the context to free up memory - context.reset(null, null, null, null); - return doc; + return documentParser.parseDocument(source, listener); } /** @@ -514,19 +395,7 @@ public class DocumentMapper implements ToXContent { * @return transformed version of transformMe. This may actually be the same object as sourceAsMap */ public Map transformSourceAsMap(Map sourceAsMap) { - if (mapping.sourceTransforms.length == 0) { - return sourceAsMap; - } - for (SourceTransform transform : mapping.sourceTransforms) { - sourceAsMap = transform.transformSourceAsMap(sourceAsMap); - } - return sourceAsMap; - } - - private XContentParser transform(XContentParser parser) throws IOException { - Map transformed = transformSourceAsMap(parser.mapOrderedAndClose()); - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).value(transformed); - return parser.contentType().xContent().createParser(builder.bytes()); + return DocumentParser.transformSourceAsMap(mapping, sourceAsMap); } public void addFieldMappers(Collection> fieldMappers) { @@ -638,7 +507,7 @@ public class DocumentMapper implements ToXContent { } public void close() { - cache.close(); + documentParser.close(); mapping.root.close(); for (RootMapper rootMapper : mapping.rootMappers) { rootMapper.close(); diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java new file mode 100644 index 00000000000..2cd958fce7b --- /dev/null +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -0,0 +1,701 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; +import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.mapper.object.RootObjectMapper; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** A parser for documents, given mappings from a DocumentMapper */ +class DocumentParser implements Closeable { + + private CloseableThreadLocal cache = new CloseableThreadLocal() { + @Override + protected ParseContext.InternalParseContext initialValue() { + return new ParseContext.InternalParseContext(index, indexSettings, docMapperParser, docMapper, new ContentPath(0)); + } + }; + + private String index; + private Settings indexSettings; + private DocumentMapperParser docMapperParser; + private DocumentMapper docMapper; + + public DocumentParser(String index, Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper) { + this.index = index; + this.indexSettings = indexSettings; + this.docMapperParser = docMapperParser; + this.docMapper = docMapper; + } + + public ParsedDocument parseDocument(SourceToParse source, @Nullable DocumentMapper.ParseListener listener) throws MapperParsingException { + ParseContext.InternalParseContext context = cache.get(); + + final Mapping mapping = docMapper.mapping(); + if (source.type() != null && !source.type().equals(docMapper.type())) { + throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + docMapper.type() + "]"); + } + source.type(docMapper.type()); + + XContentParser parser = source.parser(); + try { + if (parser == null) { + parser = XContentHelper.createParser(source.source()); + } + if (mapping.sourceTransforms.length > 0) { + parser = transform(mapping, parser); + } + context.reset(parser, new ParseContext.Document(), source, listener); + + // will result in START_OBJECT + int countDownTokens = 0; + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new MapperParsingException("Malformed content, must start with an object"); + } + boolean emptyDoc = false; + token = parser.nextToken(); + if (token == XContentParser.Token.END_OBJECT) { + // empty doc, we can handle it... + emptyDoc = true; + } else if (token != XContentParser.Token.FIELD_NAME) { + throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); + } + + for (RootMapper rootMapper : mapping.rootMappers) { + rootMapper.preParse(context); + } + + if (!emptyDoc) { + Mapper update = parseObject(context, mapping.root); + if (update != null) { + context.addDynamicMappingsUpdate(update); + } + } + + for (int i = 0; i < countDownTokens; i++) { + parser.nextToken(); + } + + for (RootMapper rootMapper : mapping.rootMappers) { + rootMapper.postParse(context); + } + } catch (Throwable e) { + // if its already a mapper parsing exception, no need to wrap it... + if (e instanceof MapperParsingException) { + throw (MapperParsingException) e; + } + + // Throw a more meaningful message if the document is empty. + if (source.source() != null && source.source().length() == 0) { + throw new MapperParsingException("failed to parse, document is empty"); + } + + throw new MapperParsingException("failed to parse", e); + } finally { + // only close the parser when its not provided externally + if (source.parser() == null && parser != null) { + parser.close(); + } + } + // reverse the order of docs for nested docs support, parent should be last + if (context.docs().size() > 1) { + Collections.reverse(context.docs()); + } + // apply doc boost + if (context.docBoost() != 1.0f) { + Set encounteredFields = Sets.newHashSet(); + for (ParseContext.Document doc : context.docs()) { + encounteredFields.clear(); + for (IndexableField field : doc) { + if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) { + if (!encounteredFields.contains(field.name())) { + ((Field) field).setBoost(context.docBoost() * field.boost()); + encounteredFields.add(field.name()); + } + } + } + } + } + + Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); + Mapping update = null; + if (rootDynamicUpdate != null) { + update = mapping.mappingUpdate(rootDynamicUpdate); + } + + ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), + context.source(), update).parent(source.parent()); + // reset the context to free up memory + context.reset(null, null, null, null); + return doc; + } + + static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper) throws IOException { + if (mapper.isEnabled() == false) { + context.parser().skipChildren(); + return null; + } + XContentParser parser = context.parser(); + + String currentFieldName = parser.currentName(); + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NULL) { + // the object is null ("obj1" : null), simply bail + return null; + } + + if (token.isValue()) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); + } + + ObjectMapper.Nested nested = mapper.nested(); + if (nested.isNested()) { + context = context.createNestedContext(mapper.fullPath()); + ParseContext.Document nestedDoc = context.doc(); + ParseContext.Document parentDoc = nestedDoc.getParent(); + // pre add the uid field if possible (id was already provided) + IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); + if (uidField != null) { + // we don't need to add it as a full uid field in nested docs, since we don't need versioning + // we also rely on this for UidField#loadVersion + + // this is a deeply nested field + nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + } + // the type of the nested doc starts with __, so we can identify that its a nested one in filters + // note, we don't prefix it with the type of the doc since it allows us to execute a nested query + // across types (for example, with similar nested objects) + nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); + } + + ContentPath.Type origPathType = context.path().pathType(); + context.path().pathType(mapper.pathType()); + + // if we are at the end of the previous object, advance + if (token == XContentParser.Token.END_OBJECT) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.START_OBJECT) { + // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first + token = parser.nextToken(); + } + + ObjectMapper update = null; + while (token != XContentParser.Token.END_OBJECT) { + ObjectMapper newUpdate = null; + if (token == XContentParser.Token.START_OBJECT) { + newUpdate = parseObject(context, mapper, currentFieldName); + } else if (token == XContentParser.Token.START_ARRAY) { + newUpdate = parseArray(context, mapper, currentFieldName); + } else if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NULL) { + parseNullValue(context, mapper, currentFieldName); + } else if (token == null) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); + } else if (token.isValue()) { + newUpdate = parseValue(context, mapper, currentFieldName, token); + } + token = parser.nextToken(); + if (newUpdate != null) { + if (update == null) { + update = newUpdate; + } else { + MapperUtils.merge(update, newUpdate); + } + } + } + // restore the enable path flag + context.path().pathType(origPathType); + if (nested.isNested()) { + ParseContext.Document nestedDoc = context.doc(); + ParseContext.Document parentDoc = nestedDoc.getParent(); + if (nested.isIncludeInParent()) { + for (IndexableField field : nestedDoc.getFields()) { + if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { + continue; + } else { + parentDoc.add(field); + } + } + } + if (nested.isIncludeInRoot()) { + ParseContext.Document rootDoc = context.rootDoc(); + // don't add it twice, if its included in parent, and we are handling the master doc... + if (!nested.isIncludeInParent() || parentDoc != rootDoc) { + for (IndexableField field : nestedDoc.getFields()) { + if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { + continue; + } else { + rootDoc.add(field); + } + } + } + } + } + return update; + } + + private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { + if (mapper instanceof ObjectMapper) { + return parseObject(context, (ObjectMapper) mapper); + } else { + FieldMapper fieldMapper = (FieldMapper)mapper; + Mapper update = fieldMapper.parse(context); + if (fieldMapper.copyTo() != null) { + parseCopyFields(context, fieldMapper, fieldMapper.copyTo().copyToFields()); + } + return update; + } + } + + private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException { + if (currentFieldName == null) { + throw new MapperParsingException("object mapping [" + mapper.name() + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); + } + context.path().add(currentFieldName); + + ObjectMapper update = null; + Mapper objectMapper = mapper.getMapper(currentFieldName); + if (objectMapper != null) { + final Mapper subUpdate = parseObjectOrField(context, objectMapper); + if (subUpdate != null) { + // propagate mapping update + update = mapper.mappingUpdate(subUpdate); + } + } else { + ObjectMapper.Dynamic dynamic = mapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName); + } else if (dynamic == ObjectMapper.Dynamic.TRUE) { + // remove the current field name from path, since template search and the object builder add it as well... + context.path().remove(); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); + if (builder == null) { + builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); + // if this is a non root object, then explicitly set the dynamic behavior if set + if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { + ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); + } + } + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + objectMapper = builder.build(builderContext); + context.path().add(currentFieldName); + update = mapper.mappingUpdate(parseAndMergeUpdate(objectMapper, context)); + } else { + // not dynamic, read everything up to end object + context.parser().skipChildren(); + } + } + + context.path().remove(); + return update; + } + + private static ObjectMapper parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + String arrayFieldName = lastFieldName; + Mapper mapper = parentMapper.getMapper(lastFieldName); + if (mapper != null) { + // There is a concrete mapper for this field already. Need to check if the mapper + // expects an array, if so we pass the context straight to the mapper and if not + // we serialize the array components + if (mapper instanceof ArrayValueMapperParser) { + final Mapper subUpdate = parseObjectOrField(context, mapper); + if (subUpdate != null) { + // propagate the mapping update + return parentMapper.mappingUpdate(subUpdate); + } else { + return null; + } + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } else { + + ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); + } else if (dynamic == ObjectMapper.Dynamic.TRUE) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); + if (builder == null) { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + mapper = builder.build(builderContext); + if (mapper != null && mapper instanceof ArrayValueMapperParser) { + context.path().add(arrayFieldName); + mapper = parseAndMergeUpdate(mapper, context); + return parentMapper.mappingUpdate(mapper); + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } + } + + private static ObjectMapper parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { + XContentParser parser = context.parser(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { + return parseObject(context, mapper, lastFieldName); + } else if (token == XContentParser.Token.START_ARRAY) { + return parseArray(context, mapper, lastFieldName); + } else if (token == XContentParser.Token.FIELD_NAME) { + lastFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NULL) { + parseNullValue(context, mapper, lastFieldName); + } else if (token == null) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); + } else { + return parseValue(context, mapper, lastFieldName, token); + } + } + return null; + } + + private static ObjectMapper parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + if (currentFieldName == null) { + throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); + } + Mapper mapper = parentMapper.getMapper(currentFieldName); + if (mapper != null) { + Mapper subUpdate = parseObjectOrField(context, mapper); + if (subUpdate == null) { + return null; + } + return parentMapper.mappingUpdate(subUpdate); + } else { + return parseDynamicValue(context, parentMapper, currentFieldName, token); + } + } + + private static void parseNullValue(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + // we can only handle null values if we have mappings for them + Mapper mapper = parentMapper.getMapper(lastFieldName); + if (mapper != null) { + if (mapper instanceof FieldMapper) { + if (!((FieldMapper) mapper).supportsNullValue()) { + throw new MapperParsingException("no object mapping found for null value in [" + lastFieldName + "]"); + } + } + parseObjectOrField(context, mapper); + } else if (parentMapper.dynamic() == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), lastFieldName); + } + } + + private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); + } + if (dynamic == ObjectMapper.Dynamic.FALSE) { + return null; + } + Mapper mapper = null; + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + if (token == XContentParser.Token.VALUE_STRING) { + boolean resolved = false; + + // do a quick test to see if its fits a dynamic template, if so, use it. + // we need to do it here so we can handle things like attachment templates, where calling + // text (to see if its a date) causes the binary value to be cleared + if (!resolved) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null); + if (builder != null) { + mapper = builder.build(builderContext); + resolved = true; + } + } + + if (!resolved && context.root().dateDetection()) { + String text = context.parser().text(); + // a safe check since "1" gets parsed as well + if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) { + for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { + try { + dateTimeFormatter.parser().parseMillis(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); + if (builder == null) { + builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter); + } + mapper = builder.build(builderContext); + resolved = true; + break; + } catch (Exception e) { + // failure to parse this, continue + } + } + } + } + if (!resolved && context.root().numericDetection()) { + String text = context.parser().text(); + try { + Long.parseLong(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + resolved = true; + } catch (Exception e) { + // not a long number + } + if (!resolved) { + try { + Double.parseDouble(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + resolved = true; + } catch (Exception e) { + // not a long number + } + } + } + if (!resolved) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); + if (builder == null) { + builder = MapperBuilders.stringField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + XContentParser.NumberType numberType = context.parser().numberType(); + if (numberType == XContentParser.NumberType.INT) { + if (context.parser().estimatedNumberType()) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer"); + if (builder == null) { + builder = MapperBuilders.integerField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (numberType == XContentParser.NumberType.LONG) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + } else if (numberType == XContentParser.NumberType.FLOAT) { + if (context.parser().estimatedNumberType()) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float"); + if (builder == null) { + builder = MapperBuilders.floatField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (numberType == XContentParser.NumberType.DOUBLE) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean"); + if (builder == null) { + builder = MapperBuilders.booleanField(currentFieldName); + } + mapper = builder.build(builderContext); + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary"); + if (builder == null) { + builder = MapperBuilders.binaryField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null); + if (builder != null) { + mapper = builder.build(builderContext); + } else { + // TODO how do we identify dynamically that its a binary value? + throw new ElasticsearchIllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); + } + } + + mapper = parseAndMergeUpdate(mapper, context); + + ObjectMapper update = null; + if (mapper != null) { + update = parentMapper.mappingUpdate(mapper); + } + return update; + } + + /** Creates instances of the fields that the current field should be copied to */ + private static void parseCopyFields(ParseContext context, FieldMapper fieldMapper, ImmutableList copyToFields) throws IOException { + if (!context.isWithinCopyTo() && copyToFields.isEmpty() == false) { + context = context.createCopyToContext(); + for (String field : copyToFields) { + // In case of a hierarchy of nested documents, we need to figure out + // which document the field should go to + ParseContext.Document targetDoc = null; + for (ParseContext.Document doc = context.doc(); doc != null; doc = doc.getParent()) { + if (field.startsWith(doc.getPrefix())) { + targetDoc = doc; + break; + } + } + assert targetDoc != null; + final ParseContext copyToContext; + if (targetDoc == context.doc()) { + copyToContext = context; + } else { + copyToContext = context.switchDoc(targetDoc); + } + parseCopy(field, copyToContext); + } + } + } + + /** Creates an copy of the current field with given field name and boost */ + private static void parseCopy(String field, ParseContext context) throws IOException { + // TODO: this should not be indexName... + FieldMappers mappers = context.docMapper().mappers().indexName(field); + if (mappers != null && !mappers.isEmpty()) { + mappers.mapper().parse(context); + } else { + // The path of the dest field might be completely different from the current one so we need to reset it + context = context.overridePath(new ContentPath(0)); + + ObjectMapper mapper = context.root(); + String objectPath = ""; + String fieldPath = field; + int posDot = field.lastIndexOf('.'); + if (posDot > 0) { + objectPath = field.substring(0, posDot); + context.path().add(objectPath); + mapper = context.docMapper().objectMappers().get(objectPath); + fieldPath = field.substring(posDot + 1); + } + if (mapper == null) { + //TODO: Create an object dynamically? + throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]"); + } + ObjectMapper update = parseDynamicValue(context, mapper, fieldPath, context.parser().currentToken()); + assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping + + // propagate the update to the root + while (objectPath.length() > 0) { + String parentPath = ""; + ObjectMapper parent = context.root(); + posDot = objectPath.lastIndexOf('.'); + if (posDot > 0) { + parentPath = objectPath.substring(0, posDot); + parent = context.docMapper().objectMappers().get(parentPath); + } + if (parent == null) { + throw new ElasticsearchIllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); + } + update = parent.mappingUpdate(update); + objectPath = parentPath; + } + context.addDynamicMappingsUpdate(update); + } + } + + /** + * Parse the given {@code context} with the given {@code mapper} and apply + * the potential mapping update in-place. This method is useful when + * composing mapping updates. + */ + private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { + final Mapper update = parseObjectOrField(context, mapper); + if (update != null) { + MapperUtils.merge(mapper, update); + } + return mapper; + } + + private static XContentParser transform(Mapping mapping, XContentParser parser) throws IOException { + Map transformed = transformSourceAsMap(mapping, parser.mapOrderedAndClose()); + XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).value(transformed); + return parser.contentType().xContent().createParser(builder.bytes()); + } + + private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper.Dynamic dynamic) { + return dynamic == null ? ObjectMapper.Dynamic.TRUE : dynamic; + } + + static Map transformSourceAsMap(Mapping mapping, Map sourceAsMap) { + if (mapping.sourceTransforms.length == 0) { + return sourceAsMap; + } + for (Mapping.SourceTransform transform : mapping.sourceTransforms) { + sourceAsMap = transform.transformSourceAsMap(sourceAsMap); + } + return sourceAsMap; + } + + @Override + public void close() { + cache.close(); + } +} diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 09b6b58137b..2c19d0e326c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -297,6 +297,13 @@ public interface FieldMapper extends Mapper { * */ public boolean isGenerated(); + /** + * Parse using the provided {@link ParseContext} and return a mapping + * update if dynamic mappings modified the mappings, or {@code null} if + * mappings were not modified. + */ + Mapper parse(ParseContext context) throws IOException; + /** * @return a {@link FieldStats} instance that maps to the type of this field based on the provided {@link Terms} instance. */ diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/src/main/java/org/elasticsearch/index/mapper/Mapper.java index e1aaffb4d39..1821dfd4436 100644 --- a/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -125,13 +125,6 @@ public interface Mapper extends ToXContent { String name(); - /** - * Parse using the provided {@link ParseContext} and return a mapping - * update if dynamic mappings modified the mappings, or {@code null} if - * mappings were not modified. - */ - Mapper parse(ParseContext context) throws IOException; - void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException; void traverse(FieldMapperListener fieldMapperListener); diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index 09c061c60a9..d83cd76440a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -28,18 +28,7 @@ import java.util.Collection; public enum MapperUtils { ; - /** - * Parse the given {@code context} with the given {@code mapper} and apply - * the potential mapping update in-place. This method is useful when - * composing mapping updates. - */ - public static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { - final Mapper update = mapper.parse(context); - if (update != null) { - merge(mapper, update); - } - return mapper; - } + private static MergeResult newStrictMergeContext() { return new MergeResult(false) { diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 654e203d14d..40dc4a77313 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -429,9 +429,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { throw new MapperParsingException("failed to parse [" + names.fullName() + "]", e); } multiFields.parse(this, context); - if (copyTo != null) { - copyTo.parse(context); - } return null; } @@ -897,6 +894,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { } public void parse(AbstractFieldMapper mainField, ParseContext context) throws IOException { + // TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part of the mappings if (mappers.isEmpty()) { return; } @@ -1003,34 +1001,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { this.copyToFields = copyToFields; } - /** - * Creates instances of the fields that the current field should be copied to - */ - public void parse(ParseContext context) throws IOException { - if (!context.isWithinCopyTo() && copyToFields.isEmpty() == false) { - context = context.createCopyToContext(); - for (String field : copyToFields) { - // In case of a hierarchy of nested documents, we need to figure out - // which document the field should go to - Document targetDoc = null; - for (Document doc = context.doc(); doc != null; doc = doc.getParent()) { - if (field.startsWith(doc.getPrefix())) { - targetDoc = doc; - break; - } - } - assert targetDoc != null; - final ParseContext copyToContext; - if (targetDoc == context.doc()) { - copyToContext = context; - } else { - copyToContext = context.switchDoc(targetDoc); - } - parse(field, copyToContext); - } - } - } - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (!copyToFields.isEmpty()) { builder.startArray("copy_to"); @@ -1058,53 +1028,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { public ImmutableList copyToFields() { return copyToFields; } - - /** - * Creates an copy of the current field with given field name and boost - */ - public void parse(String field, ParseContext context) throws IOException { - FieldMappers mappers = context.docMapper().mappers().indexName(field); - if (mappers != null && !mappers.isEmpty()) { - mappers.mapper().parse(context); - } else { - // The path of the dest field might be completely different from the current one so we need to reset it - context = context.overridePath(new ContentPath(0)); - - ObjectMapper mapper = context.root(); - String objectPath = ""; - String fieldPath = field; - int posDot = field.lastIndexOf('.'); - if (posDot > 0) { - objectPath = field.substring(0, posDot); - context.path().add(objectPath); - mapper = context.docMapper().objectMappers().get(objectPath); - fieldPath = field.substring(posDot + 1); - } - if (mapper == null) { - //TODO: Create an object dynamically? - throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]"); - } - ObjectMapper update = mapper.parseDynamicValue(context, fieldPath, context.parser().currentToken()); - assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping - - // propagate the update to the root - while (objectPath.length() > 0) { - String parentPath = ""; - ObjectMapper parent = context.root(); - posDot = objectPath.lastIndexOf('.'); - if (posDot > 0) { - parentPath = objectPath.substring(0, posDot); - parent = context.docMapper().objectMappers().get(parentPath); - } - if (parent == null) { - throw new ElasticsearchIllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); - } - update = parent.mappingUpdate(update); - objectPath = parentPath; - } - context.addDynamicMappingsUpdate((RootObjectMapper) update); - } - } } /** diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index d0b9ab72819..67c3636be7d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -20,44 +20,31 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Iterables; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; -import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperUtils; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ObjectMapperListener; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParseContext.Document; -import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.settings.IndexSettings; import java.io.IOException; @@ -74,15 +61,7 @@ import java.util.TreeMap; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.index.mapper.MapperBuilders.binaryField; -import static org.elasticsearch.index.mapper.MapperBuilders.booleanField; -import static org.elasticsearch.index.mapper.MapperBuilders.dateField; -import static org.elasticsearch.index.mapper.MapperBuilders.doubleField; -import static org.elasticsearch.index.mapper.MapperBuilders.floatField; -import static org.elasticsearch.index.mapper.MapperBuilders.integerField; -import static org.elasticsearch.index.mapper.MapperBuilders.longField; import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** @@ -418,6 +397,18 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea return this.name; } + public boolean isEnabled() { + return this.enabled; + } + + public ContentPath.Type pathType() { + return pathType; + } + + public Mapper getMapper(String field) { + return mappers.get(field); + } + @Override public void includeInAll(Boolean includeInAll) { if (includeInAll == null) { @@ -500,422 +491,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } public final Dynamic dynamic() { - return this.dynamic == null ? Dynamic.TRUE : this.dynamic; - } - - public void setDynamic(Dynamic dynamic) { - this.dynamic = dynamic; - } - - protected boolean allowValue() { - return true; - } - - @Override - public ObjectMapper parse(ParseContext context) throws IOException { - if (!enabled) { - context.parser().skipChildren(); - return null; - } - XContentParser parser = context.parser(); - - String currentFieldName = parser.currentName(); - XContentParser.Token token = parser.currentToken(); - if (token == XContentParser.Token.VALUE_NULL) { - // the object is null ("obj1" : null), simply bail - return null; - } - - if (token.isValue() && !allowValue()) { - // if we are parsing an object but it is just a value, its only allowed on root level parsers with there - // is a field name with the same name as the type - throw new MapperParsingException("object mapping for [" + name + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); - } - - if (nested.isNested()) { - context = context.createNestedContext(fullPath); - Document nestedDoc = context.doc(); - Document parentDoc = nestedDoc.getParent(); - // pre add the uid field if possible (id was already provided) - IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); - if (uidField != null) { - // we don't need to add it as a full uid field in nested docs, since we don't need versioning - // we also rely on this for UidField#loadVersion - - // this is a deeply nested field - nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - } - // the type of the nested doc starts with __, so we can identify that its a nested one in filters - // note, we don't prefix it with the type of the doc since it allows us to execute a nested query - // across types (for example, with similar nested objects) - nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePathAsString, TypeFieldMapper.Defaults.FIELD_TYPE)); - } - - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - - // if we are at the end of the previous object, advance - if (token == XContentParser.Token.END_OBJECT) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first - token = parser.nextToken(); - } - - ObjectMapper update = null; - while (token != XContentParser.Token.END_OBJECT) { - ObjectMapper newUpdate = null; - if (token == XContentParser.Token.START_OBJECT) { - newUpdate = serializeObject(context, currentFieldName); - } else if (token == XContentParser.Token.START_ARRAY) { - newUpdate = serializeArray(context, currentFieldName); - } else if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NULL) { - serializeNullValue(context, currentFieldName); - } else if (token == null) { - throw new MapperParsingException("object mapping for [" + name + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); - } else if (token.isValue()) { - newUpdate = serializeValue(context, currentFieldName, token); - } - token = parser.nextToken(); - if (newUpdate != null) { - if (update == null) { - update = newUpdate; - } else { - MapperUtils.merge(update, newUpdate); - } - } - } - // restore the enable path flag - context.path().pathType(origPathType); - if (nested.isNested()) { - Document nestedDoc = context.doc(); - Document parentDoc = nestedDoc.getParent(); - if (nested.isIncludeInParent()) { - for (IndexableField field : nestedDoc.getFields()) { - if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { - continue; - } else { - parentDoc.add(field); - } - } - } - if (nested.isIncludeInRoot()) { - Document rootDoc = context.rootDoc(); - // don't add it twice, if its included in parent, and we are handling the master doc... - if (!nested.isIncludeInParent() || parentDoc != rootDoc) { - for (IndexableField field : nestedDoc.getFields()) { - if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { - continue; - } else { - rootDoc.add(field); - } - } - } - } - } - return update; - } - - private void serializeNullValue(ParseContext context, String lastFieldName) throws IOException { - // we can only handle null values if we have mappings for them - Mapper mapper = mappers.get(lastFieldName); - if (mapper != null) { - if (mapper instanceof FieldMapper) { - if (!((FieldMapper) mapper).supportsNullValue()) { - throw new MapperParsingException("no object mapping found for null value in [" + lastFieldName + "]"); - } - } - mapper.parse(context); - } else if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, lastFieldName); - } - } - - private ObjectMapper serializeObject(final ParseContext context, String currentFieldName) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + name + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } - context.path().add(currentFieldName); - - ObjectMapper update = null; - Mapper objectMapper = mappers.get(currentFieldName); - if (objectMapper != null) { - final Mapper subUpdate = objectMapper.parse(context); - if (subUpdate != null) { - // propagate mapping update - update = mappingUpdate(subUpdate); - } - } else { - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, currentFieldName); - } else if (dynamic == Dynamic.TRUE) { - // remove the current field name from path, since template search and the object builder add it as well... - context.path().remove(); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); - if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(pathType); - // if this is a non root object, then explicitly set the dynamic behavior if set - if (!(this instanceof RootObjectMapper) && this.dynamic != Defaults.DYNAMIC) { - ((Builder) builder).dynamic(this.dynamic); - } - } - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - objectMapper = builder.build(builderContext); - context.path().add(currentFieldName); - update = mappingUpdate(MapperUtils.parseAndMergeUpdate(objectMapper, context)); - } else { - // not dynamic, read everything up to end object - context.parser().skipChildren(); - } - } - - context.path().remove(); - return update; - } - - private ObjectMapper serializeArray(ParseContext context, String lastFieldName) throws IOException { - String arrayFieldName = lastFieldName; - Mapper mapper = mappers.get(lastFieldName); - if (mapper != null) { - // There is a concrete mapper for this field already. Need to check if the mapper - // expects an array, if so we pass the context straight to the mapper and if not - // we serialize the array components - if (mapper instanceof ArrayValueMapperParser) { - final Mapper subUpdate = mapper.parse(context); - if (subUpdate != null) { - // propagate the mapping update - return mappingUpdate(subUpdate); - } else { - return null; - } - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } else { - - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, arrayFieldName); - } else if (dynamic == Dynamic.TRUE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); - if (builder == null) { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - mapper = builder.build(builderContext); - if (mapper != null && mapper instanceof ArrayValueMapperParser) { - context.path().add(arrayFieldName); - mapper = MapperUtils.parseAndMergeUpdate(mapper, context); - return mappingUpdate(mapper); - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } - } - - private ObjectMapper serializeNonDynamicArray(ParseContext context, String lastFieldName, String arrayFieldName) throws IOException { - XContentParser parser = context.parser(); - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - return serializeObject(context, lastFieldName); - } else if (token == XContentParser.Token.START_ARRAY) { - return serializeArray(context, lastFieldName); - } else if (token == XContentParser.Token.FIELD_NAME) { - lastFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NULL) { - serializeNullValue(context, lastFieldName); - } else if (token == null) { - throw new MapperParsingException("object mapping for [" + name + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); - } else { - return serializeValue(context, lastFieldName, token); - } - } - return null; - } - - private ObjectMapper serializeValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + name + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } - Mapper mapper = mappers.get(currentFieldName); - if (mapper != null) { - Mapper subUpdate = mapper.parse(context); - if (subUpdate == null) { - return null; - } - return mappingUpdate(subUpdate); - } else { - return parseDynamicValue(context, currentFieldName, token); - } - } - - public ObjectMapper parseDynamicValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException { - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, currentFieldName); - } - if (dynamic == Dynamic.FALSE) { - return null; - } - Mapper mapper = null; - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - if (token == XContentParser.Token.VALUE_STRING) { - boolean resolved = false; - - // do a quick test to see if its fits a dynamic template, if so, use it. - // we need to do it here so we can handle things like attachment templates, where calling - // text (to see if its a date) causes the binary value to be cleared - if (!resolved) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null); - if (builder != null) { - mapper = builder.build(builderContext); - resolved = true; - } - } - - if (!resolved && context.root().dateDetection()) { - String text = context.parser().text(); - // a safe check since "1" gets parsed as well - if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) { - for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { - try { - dateTimeFormatter.parser().parseMillis(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); - if (builder == null) { - builder = dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter); - } - mapper = builder.build(builderContext); - resolved = true; - break; - } catch (Exception e) { - // failure to parse this, continue - } - } - } - } - if (!resolved && context.root().numericDetection()) { - String text = context.parser().text(); - try { - Long.parseLong(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - resolved = true; - } catch (Exception e) { - // not a long number - } - if (!resolved) { - try { - Double.parseDouble(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - resolved = true; - } catch (Exception e) { - // not a long number - } - } - } - if (!resolved) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); - if (builder == null) { - builder = stringField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (token == XContentParser.Token.VALUE_NUMBER) { - XContentParser.NumberType numberType = context.parser().numberType(); - if (numberType == XContentParser.NumberType.INT) { - if (context.parser().estimatedNumberType()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer"); - if (builder == null) { - builder = integerField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (numberType == XContentParser.NumberType.LONG) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - } else if (numberType == XContentParser.NumberType.FLOAT) { - if (context.parser().estimatedNumberType()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float"); - if (builder == null) { - builder = floatField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (numberType == XContentParser.NumberType.DOUBLE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean"); - if (builder == null) { - builder = booleanField(currentFieldName); - } - mapper = builder.build(builderContext); - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary"); - if (builder == null) { - builder = binaryField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null); - if (builder != null) { - mapper = builder.build(builderContext); - } else { - // TODO how do we identify dynamically that its a binary value? - throw new ElasticsearchIllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); - } - } - - mapper = MapperUtils.parseAndMergeUpdate(mapper, context); - - ObjectMapper update = null; - if (mapper != null) { - update = mappingUpdate(mapper); - } - return update; + return dynamic; } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index 740b38cc08b..e4f5a8d7a03 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -254,11 +254,6 @@ public class RootObjectMapper extends ObjectMapper { return null; } - @Override - protected boolean allowValue() { - return true; - } - @Override protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java similarity index 98% rename from src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java rename to src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java index 36fd5dc0348..32da43da403 100644 --- a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.mapper.dynamic; +package org.elasticsearch.index.mapper; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import java.io.IOException; diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java similarity index 96% rename from src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java rename to src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index f4a8a59e98b..19994257d0d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.mapper.dynamic; +package org.elasticsearch.index.mapper; import com.google.common.collect.ImmutableMap; @@ -31,15 +31,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.FieldMappers; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import java.io.IOException; @@ -210,7 +201,7 @@ public class DynamicMappingTests extends ElasticsearchSingleNodeTest { ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source, null); assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken()); ctx.parser().nextToken(); - return mapper.root().parse(ctx); + return DocumentParser.parseObject(ctx, mapper.root()); } public void testDynamicMappingsNotNeeded() throws Exception { diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e51f4d4fc50..dc438fc4c06 100755 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -111,7 +111,7 @@ public class ExternalMapper extends AbstractFieldMapper { BooleanFieldMapper boolMapper = boolBuilder.build(context); GeoPointFieldMapper pointMapper = pointBuilder.build(context); GeoShapeFieldMapper shapeMapper = shapeBuilder.build(context); - Mapper stringMapper = stringBuilder.build(context); + FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); context.path().pathType(origPathType); @@ -157,12 +157,12 @@ public class ExternalMapper extends AbstractFieldMapper { private final BooleanFieldMapper boolMapper; private final GeoPointFieldMapper pointMapper; private final GeoShapeFieldMapper shapeMapper; - private final Mapper stringMapper; + private final FieldMapper stringMapper; public ExternalMapper(FieldMapper.Names names, String generatedValue, String mapperName, BinaryFieldMapper binMapper, BooleanFieldMapper boolMapper, GeoPointFieldMapper pointMapper, - GeoShapeFieldMapper shapeMapper, Mapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + GeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(names, 1.0f, Defaults.FIELD_TYPE, false, null, null, null, null, null, indexSettings, multiFields, copyTo); this.generatedValue = generatedValue; @@ -207,9 +207,6 @@ public class ExternalMapper extends AbstractFieldMapper { stringMapper.parse(context); multiFields.parse(this, context); - if (copyTo != null) { - copyTo.parse(context); - } return null; } diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java index dd2c78fd735..16b6fe54cb0 100644 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java @@ -38,11 +38,6 @@ public class ExternalRootMapper implements RootMapper { return CONTENT_TYPE; } - @Override - public Mapper parse(ParseContext context) throws IOException { - return null; - } - @Override public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (!(mergeWith instanceof ExternalRootMapper)) { diff --git a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index eb43416891b..7a73da835fd 100644 --- a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -70,7 +70,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").endObject().endObject().string(); DocumentMapper mapper = parser.parse(objectMapping); - assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.TRUE)); + assertNull(mapper.root().dynamic()); String withDynamicMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").field("dynamic", "false").endObject().endObject().string(); DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); From bf09e58cb3927c615faa16ebd57ee2489c2b83f3 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sun, 26 Apr 2015 19:21:55 -0700 Subject: [PATCH 183/236] Mappings: Remove includes and excludes from _source Regardless of the outcome of #8142, we should at least enforce that when _source is enabled, it is sufficient to reindex. This change removes the excludes and includes settings, since these modify the source, causing us to lose the ability to reindex some fields. closes #10814 --- .../mapping/fields/source-field.asciidoc | 19 --- docs/reference/migration/migrate_2_0.asciidoc | 8 +- .../mapper/internal/SourceFieldMapper.java | 5 +- .../org/elasticsearch/get/GetActionTests.java | 8 +- .../source/DefaultSourceMappingTests.java | 124 +++++++----------- ...ava => UpdateMappingIntegrationTests.java} | 24 ++-- .../search/geo/GeoShapeIntegrationTests.java | 41 ------ .../search/innerhits/InnerHitsTests.java | 6 +- 8 files changed, 72 insertions(+), 163 deletions(-) rename src/test/java/org/elasticsearch/indices/mapping/{UpdateMappingTests.java => UpdateMappingIntegrationTests.java} (96%) diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index f09c192f57a..8f57613dbe1 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -20,22 +20,3 @@ example: } } -------------------------------------------------- - -[float] -[[include-exclude]] -==== Includes / Excludes - -Allow to specify paths in the source that would be included / excluded -when it's stored, supporting `*` as wildcard annotation. For example: - -[source,js] --------------------------------------------------- -{ - "my_type" : { - "_source" : { - "includes" : ["path1.*", "path2.*"], - "excludes" : ["path3.*"] - } - } -} --------------------------------------------------- diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index f4dc3e506ec..40290e4cb1f 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -270,7 +270,7 @@ to provide special features. They now have limited configuration options. * `_field_names` configuration is limited to disabling the field. * `_size` configuration is limited to enabling the field. -=== Boolean fields +==== Boolean fields Boolean fields used to have a string fielddata with `F` meaning `false` and `T` meaning `true`. They have been refactored to use numeric fielddata, with `0` @@ -302,10 +302,14 @@ the user-friendly representation of boolean fields: `false`/`true`: ] --------------- -=== Murmur3 Fields +==== Murmur3 Fields Fields of type `murmur3` can no longer change `doc_values` or `index` setting. They are always stored with doc values, and not indexed. +==== Source field configuration +The `_source` field no longer supports `includes` and `excludes` paramters. When +`_source` is enabled, the entire original source will be stored. + === Codecs It is no longer possible to specify per-field postings and doc values formats diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index 2ee13f20982..e1315d3e0c4 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -26,6 +26,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -166,7 +167,7 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } else if ("format".equals(fieldName)) { builder.format(nodeStringValue(fieldNode, null)); iterator.remove(); - } else if (fieldName.equals("includes")) { + } else if (fieldName.equals("includes") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { List values = (List) fieldNode; String[] includes = new String[values.size()]; for (int i = 0; i < includes.length; i++) { @@ -174,7 +175,7 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } builder.includes(includes); iterator.remove(); - } else if (fieldName.equals("excludes")) { + } else if (fieldName.equals("excludes") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { List values = (List) fieldNode; String[] excludes = new String[values.size()]; for (int i = 0; i < excludes.length; i++) { diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java index 071cccd4f46..822ab88194c 100644 --- a/src/test/java/org/elasticsearch/get/GetActionTests.java +++ b/src/test/java/org/elasticsearch/get/GetActionTests.java @@ -21,11 +21,13 @@ package org.elasticsearch.get; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.*; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Base64; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -412,7 +414,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject()) @@ -446,7 +448,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject()) @@ -482,7 +484,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject() diff --git a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index b8b0a91af38..fb50de2205d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -20,10 +20,13 @@ package org.elasticsearch.index.mapper.source; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -38,12 +41,8 @@ import java.util.Map; import static org.hamcrest.Matchers.*; -/** - * - */ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { - @Test public void testNoFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").endObject() @@ -65,7 +64,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - @Test public void testJsonFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() @@ -87,7 +85,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); } - @Test public void testJsonFormatCompressed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").field("compress", true).endObject() @@ -113,18 +110,25 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); } - @Test - public void testIncludeExclude() throws Exception { + public void testIncludesBackcompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("includes", new String[]{"path1*"}).endObject() - .endObject().endObject().string(); + .startObject("_source").field("includes", new String[]{"path1*"}).endObject() + .endObject().endObject().string(); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + try { + createIndex("testbad").mapperService().documentMapperParser().parse(mapping); + fail("includes should not be allowed"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("unsupported parameters")); + } + + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() - .startObject("path1").field("field1", "value1").endObject() - .startObject("path2").field("field2", "value2").endObject() - .endObject().bytes()); + .startObject("path1").field("field1", "value1").endObject() + .startObject("path2").field("field2", "value2").endObject() + .endObject().bytes()); IndexableField sourceField = doc.rootDoc().getField("_source"); Map sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose(); @@ -132,7 +136,32 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(sourceAsMap.containsKey("path2"), equalTo(false)); } - @Test + public void testExcludesBackcompat() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_source").field("excludes", new String[]{"path1*"}).endObject() + .endObject().endObject().string(); + + try { + createIndex("testbad").mapperService().documentMapperParser().parse(mapping); + fail("excludes should not be allowed"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("unsupported parameters")); + } + + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); + + ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() + .startObject("path1").field("field1", "value1").endObject() + .startObject("path2").field("field2", "value2").endObject() + .endObject().bytes()); + + IndexableField sourceField = doc.rootDoc().getField("_source"); + Map sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose(); + assertThat(sourceAsMap.containsKey("path1"), equalTo(false)); + assertThat(sourceAsMap.containsKey("path2"), equalTo(true)); + } + public void testDefaultMappingAndNoMapping() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -161,7 +190,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { } } - @Test public void testDefaultMappingAndWithMappingOverride() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -176,7 +204,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.sourceMapper().enabled(), equalTo(true)); } - @Test public void testDefaultMappingAndNoMappingWithMapperService() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -190,7 +217,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.sourceMapper().enabled(), equalTo(false)); } - @Test public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -208,66 +234,4 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.type(), equalTo("my_type")); assertThat(mapper.sourceMapper().enabled(), equalTo(true)); } - - @Test - public void testParsingWithDefaultAppliedAndNotApplied() throws Exception { - String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("_source").array("includes", "default_field_path.").endObject() - .endObject().endObject().string(); - - MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true); - - String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("_source").array("includes", "custom_field_path.").endObject() - .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedString(mapping), true); - DocumentMapper mapper = mapperService.documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("default_field_path.")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("custom_field_path.")); - - mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("properties").startObject("text").field("type", "string").endObject().endObject() - .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedString(mapping), false); - mapper = mapperService.documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("default_field_path.")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("custom_field_path.")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - } - - public void testDefaultNotAppliedOnUpdate() throws Exception { - XContentBuilder defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("_source").array("includes", "default_field_path.").endObject() - .endObject().endObject(); - - IndexService indexService = createIndex("test", ImmutableSettings.EMPTY, MapperService.DEFAULT_MAPPING, defaultMapping); - - String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("_source").array("includes", "custom_field_path.").endObject() - .endObject().endObject().string(); - client().admin().indices().preparePutMapping("test").setType("my_type").setSource(mapping).get(); - - DocumentMapper mapper = indexService.mapperService().documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - List includes = Arrays.asList(mapper.sourceMapper().includes()); - assertThat("default_field_path.", isIn(includes)); - assertThat("custom_field_path.", isIn(includes)); - - mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("properties").startObject("text").field("type", "string").endObject().endObject() - .endObject().endObject().string(); - client().admin().indices().preparePutMapping("test").setType("my_type").setSource(mapping).get(); - - mapper = indexService.mapperService().documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - includes = Arrays.asList(mapper.sourceMapper().includes()); - assertThat("default_field_path.", isIn(includes)); - assertThat("custom_field_path.", isIn(includes)); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java similarity index 96% rename from src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java rename to src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java index 2a82d92efa0..11638c74660 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.mapping; import com.google.common.collect.Lists; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -28,6 +29,7 @@ import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -54,7 +56,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; @ClusterScope(randomDynamicTemplates = false) -public class UpdateMappingTests extends ElasticsearchIntegrationTest { +public class UpdateMappingIntegrationTests extends ElasticsearchIntegrationTest { @Test public void dynamicUpdates() throws Exception { @@ -213,13 +215,13 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { @SuppressWarnings("unchecked") @Test - public void updateIncludeExclude() throws Exception { - assertAcked(prepareCreate("test").addMapping("type", - jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("normal").field("type", "long").endObject() - .startObject("exclude").field("type", "long").endObject() - .startObject("include").field("type", "long").endObject() - .endObject().endObject().endObject())); + public void updateIncludeExcludeBackcompat() throws Exception { + assertAcked(prepareCreate("test").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) + .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("normal").field("type", "long").endObject() + .startObject("exclude").field("type", "long").endObject() + .startObject("include").field("type", "long").endObject() + .endObject().endObject().endObject())); ensureGreen(); // make sure that replicas are initialized so the refresh command will work them too logger.info("Index doc"); @@ -229,7 +231,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { ); refresh(); // commit it for later testing. - logger.info("Adding exclude settings"); PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -259,7 +260,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); - logger.info("Changing mapping to includes"); putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -278,7 +278,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat((Map) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); assertThat((ArrayList) ((Map) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable()); - logger.info("Indexing doc yet again"); index("test", "type", "1", JsonXContent.contentBuilder().startObject() .field("normal", 3).field("exclude", 3).field("include", 3) @@ -290,7 +289,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); - logger.info("Adding excludes, but keep includes"); putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -308,8 +306,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat((Map) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); ArrayList excludes = (ArrayList) ((Map) typeMapping.getSourceAsMap().get("_source")).get("excludes"); assertThat(excludes, contains("*.excludes")); - - } @SuppressWarnings("unchecked") diff --git a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java index d663b95e92b..4cec4c431f6 100644 --- a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java +++ b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java @@ -346,47 +346,6 @@ public class GeoShapeIntegrationTests extends ElasticsearchIntegrationTest { assertHitCount(result, 1); } - @Test // Issue 2944 - public void testThatShapeIsReturnedEvenWhenExclusionsAreSet() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .endObject().endObject() - .startObject("_source") - .startArray("excludes").value("nonExistingField").endArray() - .endObject() - .endObject().endObject() - .string(); - assertAcked(prepareCreate("test").addMapping("type1", mapping)); - ensureGreen(); - - indexRandom(true, - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject() - .field("name", "Document 1") - .startObject("location") - .field("type", "envelope") - .startArray("coordinates").startArray().value(-45.0).value(45).endArray().startArray().value(45).value(-45).endArray().endArray() - .endObject() - .endObject())); - - SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); - - Map indexedMap = searchResponse.getHits().getAt(0).sourceAsMap(); - assertThat(indexedMap.get("location"), instanceOf(Map.class)); - Map locationMap = (Map) indexedMap.get("location"); - assertThat(locationMap.get("coordinates"), instanceOf(List.class)); - List> coordinates = (List>) locationMap.get("coordinates"); - assertThat(coordinates.size(), equalTo(2)); - assertThat(coordinates.get(0).size(), equalTo(2)); - assertThat(coordinates.get(0).get(0).doubleValue(), equalTo(-45.0)); - assertThat(coordinates.get(0).get(1).doubleValue(), equalTo(45.0)); - assertThat(coordinates.get(1).size(), equalTo(2)); - assertThat(coordinates.get(1).get(0).doubleValue(), equalTo(45.0)); - assertThat(coordinates.get(1).get(1).doubleValue(), equalTo(-45.0)); - assertThat(locationMap.size(), equalTo(2)); - } - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9904") @Test public void testShapeFilterWithRandomGeoCollection() throws Exception { diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 8a15549f0af..428d76880b3 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.innerhits; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.support.QueryInnerHitBuilder; @@ -772,7 +774,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { @Test public void testNestedInnerHitsWithExcludeSource() throws Exception { - assertAcked(prepareCreate("articles") + assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() .startObject("properties") @@ -810,7 +812,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { @Test public void testNestedInnerHitsHiglightWithExcludeSource() throws Exception { - assertAcked(prepareCreate("articles") + assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() .startObject("properties") From f18f62387818ea53dd446120d454fae370fa795c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Apr 2015 09:33:20 +0200 Subject: [PATCH 184/236] [TEST] Remove searchers from tracking map once they are closed --- .../test/engine/AssertingSearcher.java | 20 ++----- .../test/engine/MockEngineSupport.java | 60 +++++++++++++++---- 2 files changed, 53 insertions(+), 27 deletions(-) diff --git a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java index e88881faae9..fec406a7841 100644 --- a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java +++ b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java @@ -21,15 +21,10 @@ package org.elasticsearch.test.engine; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; -import java.io.Closeable; -import java.io.IOException; -import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -57,15 +52,6 @@ class AssertingSearcher extends Engine.Searcher { initialRefCount = wrappedSearcher.reader().getRefCount(); this.indexSearcher = indexSearcher; assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed"; - final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]"); - LuceneTestCase.closeAfterSuite(new Closeable() { - @Override - public void close() throws IOException { - if (closed.get() == false) { - throw ex; - } - } - }); } @Override @@ -74,7 +60,7 @@ class AssertingSearcher extends Engine.Searcher { } @Override - public void close() throws ElasticsearchException { + public void close() { synchronized (lock) { if (closed.compareAndSet(false, true)) { firstReleaseStack = new RuntimeException(); @@ -109,4 +95,8 @@ class AssertingSearcher extends Engine.Searcher { public ShardId shardId() { return shardId; } + + public boolean isOpen() { + return closed.get() == false; + } } diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index 47c9472d2d6..e0e1d2db4ea 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -32,17 +33,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchIntegrationTest; import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Constructor; -import java.util.Map; +import java.util.IdentityHashMap; import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -54,15 +52,18 @@ public final class MockEngineSupport { public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio"; public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper"; public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio"; + private final AtomicBoolean closing = new AtomicBoolean(false); private final ESLogger logger = Loggers.getLogger(Engine.class); private final ShardId shardId; + private final SearcherCloseable searcherCloseable; + private final MockContext mockContext; public static class MockContext { - public final Random random; - public final boolean wrapReader; - public final Class wrapper; - public final Settings indexSettings; + private final Random random; + private final boolean wrapReader; + private final Class wrapper; + private final Settings indexSettings; private final double flushOnClose; public MockContext(Random random, boolean wrapReader, Class wrapper, Settings indexSettings) { @@ -74,9 +75,6 @@ public final class MockEngineSupport { } } - - private final MockContext mockContext; - public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); shardId = config.getShardId(); @@ -89,6 +87,8 @@ public final class MockEngineSupport { logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), config.getShardId(), seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, indexSettings); + this.searcherCloseable = new SearcherCloseable(); + LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine } enum CloseAction { @@ -176,8 +176,44 @@ public final class MockEngineSupport { // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here - return new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger); + AssertingSearcher assertingSearcher = new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger) { + @Override + public void close() { + try { + searcherCloseable.remove(this); + } finally { + super.close(); + } + } + }; + searcherCloseable.add(assertingSearcher, engineSearcher.source()); + return assertingSearcher; } + private static final class SearcherCloseable implements Closeable { + private final IdentityHashMap openSearchers = new IdentityHashMap<>(); + + @Override + public synchronized void close() throws IOException { + if (openSearchers.isEmpty() == false) { + AssertionError error = new AssertionError("Unreleased searchers found"); + for (RuntimeException ex : openSearchers.values()) { + error.addSuppressed(ex); + } + throw error; + } + } + + void add(AssertingSearcher searcher, String source) { + final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + source+ "]"); + synchronized (this) { + openSearchers.put(searcher, ex); + } + } + + synchronized void remove(AssertingSearcher searcher) { + openSearchers.remove(searcher); + } + } } From ab11be55a2276e66226cd84b0de65d0696a4fb94 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 29 Apr 2015 04:39:41 -0400 Subject: [PATCH 185/236] add another delete-by-query deprecation --- .../index/engine/DeleteByQueryFailedEngineException.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java index 28b57701349..d555cbc1a43 100644 --- a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java +++ b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.engine; import org.elasticsearch.index.shard.ShardId; +/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ +@Deprecated public class DeleteByQueryFailedEngineException extends EngineException { public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) { From 4f14af21c5b9213e4b1add84d6d880eb20451e7d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 28 Apr 2015 22:41:15 +0200 Subject: [PATCH 186/236] Remove ElasticsearchIAE and ElasticsearchISE This commit removes ElasticsearchIAE and ElasticsearchISE in favor of the JDKs IAE and ISE. Closes #10794 --- .../lucene/analysis/PrefixAnalyzer.java | 4 +- .../lucene/store/StoreRateLimiting.java | 8 +-- ...ElasticsearchIllegalArgumentException.java | 45 ------------- .../ElasticsearchIllegalStateException.java | 38 ----------- src/main/java/org/elasticsearch/Version.java | 4 +- .../elasticsearch/action/ActionFuture.java | 10 +-- .../ActionRequestValidationException.java | 8 ++- .../elasticsearch/action/ThreadingModel.java | 4 +- .../action/WriteConsistencyLevel.java | 6 +- .../cluster/health/ClusterHealthStatus.java | 4 +- .../health/TransportClusterHealthAction.java | 4 +- .../put/PutRepositoryRequest.java | 12 ++-- .../shards/ClusterSearchShardsRequest.java | 6 +- .../create/CreateSnapshotRequest.java | 12 ++-- .../restore/RestoreSnapshotRequest.java | 24 +++---- .../status/SnapshotIndexShardStage.java | 4 +- .../status/SnapshotIndexShardStatus.java | 4 +- .../snapshots/status/SnapshotShardsStats.java | 4 +- .../TransportSnapshotsStatusAction.java | 6 +- .../action/admin/indices/alias/Alias.java | 4 +- .../analyze/TransportAnalyzeAction.java | 27 ++++---- .../indices/create/CreateIndexRequest.java | 4 +- .../admin/indices/get/GetIndexRequest.java | 12 ++-- .../indices/get/TransportGetIndexAction.java | 4 +- .../mapping/put/PutMappingRequest.java | 10 +-- .../template/put/PutIndexTemplateRequest.java | 16 ++--- .../action/bulk/BulkProcessor.java | 4 +- .../action/bulk/BulkRequest.java | 6 +- .../action/bulk/TransportShardBulkAction.java | 8 +-- .../action/count/CountRequest.java | 4 +- .../action/delete/TransportDeleteAction.java | 4 +- .../TransportFieldStatsTransportAction.java | 8 +-- .../action/get/MultiGetRequest.java | 8 +-- .../action/index/IndexRequest.java | 16 ++--- .../action/mlt/MoreLikeThisRequest.java | 4 +- .../mlt/MoreLikeThisRequestBuilder.java | 4 +- .../mlt/TransportMoreLikeThisAction.java | 4 +- .../percolate/MultiPercolateRequest.java | 7 +- .../action/search/MultiSearchRequest.java | 7 +- .../action/search/SearchRequest.java | 8 +-- .../action/search/SearchRequestBuilder.java | 4 +- .../action/search/SearchType.java | 8 +-- .../action/search/TransportSearchAction.java | 4 +- .../search/TransportSearchScrollAction.java | 4 +- .../search/type/TransportSearchHelper.java | 14 ++-- .../type/TransportSearchTypeAction.java | 4 +- .../suggest/TransportSuggestAction.java | 4 +- .../action/support/AdapterActionFuture.java | 12 ++-- .../action/support/DestructiveOperations.java | 8 +-- .../action/support/IndicesOptions.java | 8 +-- .../termvectors/MultiTermVectorsRequest.java | 6 +- .../termvectors/TermVectorsResponse.java | 4 +- .../action/update/TransportUpdateAction.java | 4 +- .../action/update/UpdateHelper.java | 6 +- .../cache/recycler/PageCacheRecycler.java | 4 +- .../TransportClientNodesService.java | 6 +- .../elasticsearch/cluster/ClusterService.java | 6 +- .../elasticsearch/cluster/ClusterState.java | 8 +-- .../action/index/MappingUpdatedAction.java | 4 +- .../cluster/block/ClusterBlockLevel.java | 4 +- .../cluster/metadata/AliasAction.java | 4 +- .../cluster/metadata/AliasValidator.java | 26 ++++---- .../cluster/metadata/IndexMetaData.java | 14 ++-- .../cluster/metadata/MappingMetaData.java | 4 +- .../cluster/metadata/MetaData.java | 30 ++++----- .../metadata/MetaDataCreateIndexService.java | 14 ++-- .../metadata/MetaDataIndexStateService.java | 6 +- .../MetaDataIndexTemplateService.java | 6 +- .../MetaDataUpdateSettingsService.java | 8 +-- .../cluster/metadata/RestoreMetaData.java | 4 +- .../cluster/metadata/SnapshotMetaData.java | 4 +- .../cluster/node/DiscoveryNode.java | 4 +- .../cluster/node/DiscoveryNodes.java | 8 +-- .../cluster/routing/IndexRoutingTable.java | 6 +- .../cluster/routing/OperationRouting.java | 8 +-- .../cluster/routing/Preference.java | 4 +- .../cluster/routing/RoutingNode.java | 4 +- .../cluster/routing/ShardRoutingState.java | 4 +- .../routing/allocation/AllocationService.java | 5 +- .../allocator/BalancedShardsAllocator.java | 6 +- .../command/AllocateAllocationCommand.java | 14 ++-- .../command/AllocationCommands.java | 6 +- .../command/CancelAllocationCommand.java | 8 +-- .../command/MoveAllocationCommand.java | 8 +-- .../ClusterRebalanceAllocationDecider.java | 12 ++-- .../routing/allocation/decider/Decision.java | 6 +- .../decider/EnableAllocationDecider.java | 12 ++-- .../service/InternalClusterService.java | 10 +-- .../org/elasticsearch/common/Booleans.java | 4 +- .../org/elasticsearch/common/ParseField.java | 4 +- .../org/elasticsearch/common/PidFile.java | 6 +- .../elasticsearch/common/Preconditions.java | 28 ++++---- .../org/elasticsearch/common/Priority.java | 4 +- .../common/RandomBasedUUIDGenerator.java | 4 +- .../java/org/elasticsearch/common/Table.java | 14 ++-- .../common/TimeBasedUUIDGenerator.java | 6 +- .../common/breaker/CircuitBreaker.java | 4 +- .../common/bytes/BytesArray.java | 4 +- .../common/bytes/PagedBytesReference.java | 4 +- .../org/elasticsearch/common/cli/CliTool.java | 4 +- .../common/collect/HppcMaps.java | 6 +- .../common/component/Lifecycle.java | 34 +++++----- .../elasticsearch/common/geo/GeoDistance.java | 4 +- .../common/geo/GeoHashUtils.java | 6 +- .../common/geo/builders/ShapeBuilder.java | 6 +- .../elasticsearch/common/lucene/Lucene.java | 18 +++--- .../common/lucene/all/AllEntries.java | 4 +- .../common/lucene/docset/DocIdSets.java | 8 +-- .../lucene/index/FilterableTermsEnum.java | 4 +- .../search/function/WeightFactorFunction.java | 4 +- .../common/recycler/DequeRecycler.java | 4 +- .../common/recycler/NoneRecycler.java | 4 +- .../common/recycler/Recyclers.java | 6 +- .../org/elasticsearch/common/regex/Regex.java | 4 +- .../common/rounding/TimeZoneRounding.java | 6 +- .../common/settings/ImmutableSettings.java | 8 +-- .../TransportAddressSerializers.java | 4 +- .../common/unit/ByteSizeValue.java | 6 +- .../common/unit/DistanceUnit.java | 8 +-- .../elasticsearch/common/unit/Fuzziness.java | 6 +- .../common/util/BloomFilter.java | 4 +- .../common/util/LocaleUtils.java | 4 +- .../common/util/MultiDataPathUpgrader.java | 4 +- .../common/util/concurrent/CountDown.java | 4 +- .../common/util/concurrent/EsAbortPolicy.java | 6 +- .../util/concurrent/EsThreadPoolExecutor.java | 5 +- .../common/util/concurrent/KeyedLock.java | 8 +-- .../util/concurrent/SizeBlockingQueue.java | 6 +- .../UncategorizedExecutionException.java | 4 -- .../common/xcontent/XContentFactory.java | 6 +- .../xcontent/json/JsonXContentParser.java | 6 +- .../discovery/DiscoverySettings.java | 4 +- .../discovery/local/LocalDiscovery.java | 6 +- .../discovery/zen/NotMasterException.java | 12 +--- .../discovery/zen/ZenDiscovery.java | 16 ++--- .../zen/fd/MasterFaultDetection.java | 8 +-- .../discovery/zen/fd/NodesFaultDetection.java | 6 +- .../discovery/zen/ping/ZenPingService.java | 4 +- .../zen/ping/multicast/MulticastZenPing.java | 6 +- .../zen/ping/unicast/UnicastZenPing.java | 8 +-- .../elasticsearch/env/NodeEnvironment.java | 20 +++--- .../gateway/GatewayMetaState.java | 8 +-- .../gateway/MetaDataStateFormat.java | 4 +- .../elasticsearch/index/IndexException.java | 2 +- .../org/elasticsearch/index/IndexService.java | 4 +- .../org/elasticsearch/index/VersionType.java | 6 +- .../index/analysis/Analysis.java | 16 ++--- .../index/analysis/AnalysisModule.java | 18 +++--- .../index/analysis/AnalysisService.java | 6 +- .../analysis/ChineseAnalyzerProvider.java | 2 - .../CommonGramsTokenFilterFactory.java | 4 +- .../analysis/EdgeNGramTokenizerFactory.java | 8 +-- .../analysis/HunspellTokenFilterFactory.java | 6 +- .../analysis/KeepTypesFilterFactory.java | 4 +- .../index/analysis/KeepWordFilterFactory.java | 6 +- .../KeywordMarkerTokenFilterFactory.java | 4 +- .../analysis/LengthTokenFilterFactory.java | 4 +- .../analysis/LowerCaseTokenFilterFactory.java | 4 +- .../analysis/MappingCharFilterFactory.java | 4 +- .../index/analysis/NGramTokenizerFactory.java | 7 +- .../PathHierarchyTokenizerFactory.java | 6 +- .../analysis/PatternAnalyzerProvider.java | 4 +- ...PatternCaptureGroupTokenFilterFactory.java | 4 +- .../PatternReplaceCharFilterFactory.java | 4 +- .../PatternReplaceTokenFilterFactory.java | 4 +- .../analysis/PatternTokenizerFactory.java | 5 +- .../StemmerOverrideTokenFilterFactory.java | 4 +- .../analysis/StopTokenFilterFactory.java | 4 +- .../analysis/SynonymTokenFilterFactory.java | 9 ++- .../analysis/TrimTokenFilterFactory.java | 4 +- .../analysis/TruncateTokenFilterFactory.java | 4 +- ...bstractCompoundWordTokenFilterFactory.java | 4 +- ...enationCompoundWordTokenFilterFactory.java | 8 +-- .../index/codec/CodecService.java | 6 +- .../elasticsearch/index/engine/Engine.java | 6 +- .../index/engine/EngineSearcher.java | 7 +- .../fielddata/IndexFieldDataService.java | 8 +-- .../fielddata/IndexNumericFieldData.java | 4 +- .../plain/AbstractIndexGeoPointFieldData.java | 4 +- .../plain/BinaryDVAtomicFieldData.java | 4 +- .../plain/BinaryDVNumericIndexFieldData.java | 8 +-- .../plain/BytesBinaryDVIndexFieldData.java | 8 +-- .../plain/DisabledIndexFieldData.java | 8 +-- .../plain/DocValuesIndexFieldData.java | 4 +- .../plain/GeoPointBinaryDVIndexFieldData.java | 8 +-- .../plain/NumericDVIndexFieldData.java | 4 +- .../plain/ParentChildIndexFieldData.java | 4 +- .../plain/SortedNumericDVIndexFieldData.java | 8 +-- .../SortedSetDVBytesAtomicFieldData.java | 4 +- .../index/get/ShardGetService.java | 6 +- .../index/mapper/DocumentMapper.java | 4 +- .../index/mapper/DocumentParser.java | 5 +- .../index/mapper/MapperService.java | 4 +- .../index/mapper/ParseContext.java | 10 +-- .../mapper/core/AbstractFieldMapper.java | 4 +- .../index/mapper/core/BooleanFieldMapper.java | 4 +- .../index/mapper/core/ByteFieldMapper.java | 4 +- .../mapper/core/CompletionFieldMapper.java | 20 +++--- .../index/mapper/core/DateFieldMapper.java | 4 +- .../index/mapper/core/DoubleFieldMapper.java | 4 +- .../index/mapper/core/FloatFieldMapper.java | 4 +- .../index/mapper/core/IntegerFieldMapper.java | 4 +- .../index/mapper/core/LongFieldMapper.java | 4 +- .../index/mapper/core/NumberFieldMapper.java | 4 +- .../index/mapper/core/ShortFieldMapper.java | 4 +- .../index/mapper/core/StringFieldMapper.java | 4 +- .../index/mapper/geo/GeoPointFieldMapper.java | 10 +-- .../index/mapper/geo/GeoShapeFieldMapper.java | 6 +- .../internal/FieldNamesFieldMapper.java | 4 +- .../index/mapper/ip/IpFieldMapper.java | 16 ++--- .../index/mapper/object/DynamicTemplate.java | 4 +- .../index/mapper/object/ObjectMapper.java | 2 +- .../policy/AbstractMergePolicyProvider.java | 6 +- .../index/query/BoostingQueryBuilder.java | 8 +-- .../index/query/CommonTermsQueryBuilder.java | 6 +- .../index/query/CommonTermsQueryParser.java | 4 +- .../query/GeoBoundingBoxFilterBuilder.java | 10 +-- .../index/query/GeoShapeQueryParser.java | 4 +- .../index/query/GeohashCellFilter.java | 4 +- .../index/query/MoreLikeThisQueryBuilder.java | 4 +- .../index/query/MoreLikeThisQueryParser.java | 10 +-- .../elasticsearch/index/query/RegexpFlag.java | 4 +- .../index/query/ScriptFilterParser.java | 4 +- .../index/query/SimpleQueryStringFlag.java | 4 +- .../index/query/SpanNearQueryBuilder.java | 6 +- .../index/query/SpanNotQueryBuilder.java | 8 +-- .../index/query/SpanOrQueryBuilder.java | 4 +- .../index/query/TemplateQueryBuilder.java | 4 +- .../functionscore/DecayFunctionBuilder.java | 4 +- .../functionscore/DecayFunctionParser.java | 8 +-- .../FunctionScoreQueryBuilder.java | 9 ++- .../functionscore/factor/FactorBuilder.java | 4 +- .../support/InnerHitsQueryParserHelper.java | 8 +-- .../index/query/support/QueryParsers.java | 12 +--- .../index/search/MatchQuery.java | 8 +-- .../index/search/MultiMatchQuery.java | 4 +- .../index/search/child/ScoreType.java | 4 +- .../index/search/child/TopChildrenQuery.java | 4 +- .../index/search/geo/GeoDistanceFilter.java | 4 +- .../search/geo/GeoDistanceRangeFilter.java | 4 +- .../geo/IndexedGeoBoundingBoxFilter.java | 4 +- .../index/search/shape/ShapeFetchService.java | 8 +-- .../elasticsearch/index/shard/IndexShard.java | 8 +-- .../index/shard/IndexShardState.java | 6 +- .../index/shard/ShadowIndexShard.java | 4 +- .../elasticsearch/index/shard/ShardPath.java | 6 +- .../shard/TranslogRecoveryPerformer.java | 5 +- .../AbstractSimilarityProvider.java | 4 +- .../similarity/DFRSimilarityProvider.java | 6 +- .../similarity/IBSimilarityProvider.java | 6 +- .../index/similarity/SimilarityModule.java | 4 +- .../org/elasticsearch/index/store/Store.java | 10 +-- .../index/translog/Translog.java | 10 +-- .../index/translog/fs/FsTranslogFile.java | 6 +- .../elasticsearch/indices/IndicesService.java | 18 +++--- .../HierarchyCircuitBreakerService.java | 8 +-- .../cache/filter/IndicesFilterCache.java | 8 +-- .../cache/query/IndicesQueryCache.java | 4 +- .../cluster/IndicesClusterStateService.java | 6 +- .../cache/IndicesFieldDataCache.java | 4 +- .../indices/recovery/RecoveryState.java | 16 ++--- .../SharedFSRecoverySourceHandler.java | 4 +- .../elasticsearch/monitor/jvm/HotThreads.java | 4 +- .../java/org/elasticsearch/node/Node.java | 4 +- .../percolator/PercolatorService.java | 8 +-- .../elasticsearch/plugins/PluginManager.java | 16 ++--- .../elasticsearch/plugins/PluginsService.java | 6 +- .../repositories/RepositoriesService.java | 7 +- .../blobstore/BlobStoreRepository.java | 1 - .../elasticsearch/rest/BaseRestHandler.java | 6 -- .../elasticsearch/rest/RestController.java | 8 +-- .../org/elasticsearch/rest/RestRequest.java | 8 +-- .../alias/RestIndicesAliasesAction.java | 6 +- .../alias/put/RestIndexPutAliasAction.java | 4 +- .../indices/analyze/RestAnalyzeAction.java | 16 ++--- .../indices/get/RestGetIndicesAction.java | 4 +- .../rest/action/count/RestCountAction.java | 4 +- .../action/explain/RestExplainAction.java | 5 +- .../rest/action/index/RestIndexAction.java | 4 +- .../script/RestPutIndexedScriptAction.java | 4 +- .../action/search/RestClearScrollAction.java | 13 ++-- .../rest/action/search/RestSearchAction.java | 6 +- .../action/search/RestSearchScrollAction.java | 11 ++-- .../action/suggest/RestSuggestAction.java | 4 +- .../rest/action/support/RestActions.java | 4 +- .../script/NativeScriptEngineService.java | 4 +- .../java/org/elasticsearch/script/Script.java | 6 +- .../elasticsearch/script/ScriptContext.java | 6 +- .../script/ScriptContextRegistry.java | 8 +-- .../org/elasticsearch/script/ScriptMode.java | 4 +- .../org/elasticsearch/script/ScriptModes.java | 4 +- .../elasticsearch/script/ScriptModule.java | 4 +- .../elasticsearch/script/ScriptService.java | 48 +++++++------- .../script/expression/ExpressionScript.java | 4 +- .../elasticsearch/search/MultiValueMode.java | 10 +-- .../elasticsearch/search/SearchService.java | 4 +- .../aggregations/AggregatorFactories.java | 4 +- .../aggregations/InternalAggregations.java | 4 +- .../InternalMultiBucketAggregation.java | 8 +-- .../bucket/BestBucketsDeferringCollector.java | 14 ++-- .../bucket/BestDocsDeferringCollector.java | 6 +- .../bucket/DeferringBucketCollector.java | 8 +-- .../InternalSingleBucketAggregation.java | 6 +- .../children/ParentToChildrenAggregator.java | 4 +- .../bucket/geogrid/GeoHashGridBuilder.java | 4 +- .../bucket/global/GlobalAggregator.java | 4 +- .../bucket/histogram/InternalHistogram.java | 6 +- ...DiversifiedBytesHashSamplerAggregator.java | 4 +- .../DiversifiedMapSamplerAggregator.java | 4 +- .../DiversifiedNumericSamplerAggregator.java | 4 +- .../DiversifiedOrdinalsSamplerAggregator.java | 4 +- .../bucket/sampler/SamplerAggregator.java | 4 +- .../SignificantTermsAggregatorFactory.java | 5 +- .../significant/SignificantTermsBuilder.java | 14 ++-- .../significant/heuristics/JLHScore.java | 1 - .../heuristics/NXYSignificanceHeuristic.java | 8 +-- .../heuristics/SignificanceHeuristic.java | 8 +-- .../bucket/terms/InternalTerms.java | 4 +- .../bucket/terms/TermsAggregatorFactory.java | 5 +- .../bucket/terms/TermsBuilder.java | 18 +++--- .../bucket/terms/support/IncludeExclude.java | 4 +- .../InternalNumericMetricsAggregation.java | 6 +- .../metrics/geobounds/InternalGeoBounds.java | 10 +-- .../AbstractInternalPercentiles.java | 4 +- .../scripted/InternalScriptedMetric.java | 4 +- .../metrics/stats/StatsAggegator.java | 4 +- .../extended/ExtendedStatsAggregator.java | 4 +- .../metrics/tophits/InternalTopHits.java | 4 +- .../aggregations/support/AggregationPath.java | 14 ++-- .../support/format/ValueFormatterStreams.java | 4 +- .../search/builder/SearchSourceBuilder.java | 4 +- .../search/fetch/FetchPhase.java | 8 +-- .../FieldDataFieldsParseElement.java | 4 +- .../innerhits/InnerHitsParseElement.java | 28 ++++---- .../script/ScriptFieldsFetchSubPhase.java | 4 +- .../highlight/FastVectorHighlighter.java | 4 +- .../search/highlight/HighlightPhase.java | 7 +- .../highlight/HighlighterParseElement.java | 10 +-- .../search/highlight/PlainHighlighter.java | 4 +- .../search/highlight/PostingsHighlighter.java | 4 +- .../search/lookup/LeafDocLookup.java | 4 +- .../search/lookup/LeafFieldsLookup.java | 4 +- .../query/TerminateAfterParseElement.java | 4 +- .../search/rescore/QueryRescorer.java | 10 +-- .../search/rescore/RescoreParseElement.java | 6 +- .../search/sort/FieldSortBuilder.java | 4 +- .../search/sort/GeoDistanceSortParser.java | 6 +- .../search/sort/SortParseElement.java | 12 ++-- .../elasticsearch/search/suggest/Suggest.java | 8 +-- .../search/suggest/SuggestBuilder.java | 6 +- .../search/suggest/SuggestParseElement.java | 10 +-- .../search/suggest/SuggestUtils.java | 18 +++--- .../suggest/SuggestionSearchContext.java | 6 +- .../Completion090PostingsFormat.java | 4 +- .../completion/CompletionSuggestParser.java | 10 +-- .../phrase/DirectCandidateGenerator.java | 4 +- .../suggest/phrase/PhraseSuggestParser.java | 64 +++++++++---------- .../phrase/PhraseSuggestionBuilder.java | 8 +-- .../phrase/PhraseSuggestionContext.java | 4 +- .../search/suggest/phrase/WordScorer.java | 4 +- .../suggest/term/TermSuggestParser.java | 6 +- .../snapshots/SnapshotState.java | 4 +- .../elasticsearch/threadpool/ThreadPool.java | 8 +-- .../transport/PlainTransportFuture.java | 6 +- .../transport/RequestHandlerRegistry.java | 6 +- .../transport/TransportRequestOptions.java | 4 +- .../transport/TransportService.java | 4 +- .../netty/MessageChannelHandler.java | 4 +- .../transport/netty/NettyTransport.java | 12 ++-- .../watcher/ResourceWatcherService.java | 6 +- .../ElasticsearchExceptionTests.java | 15 +++-- .../java/org/elasticsearch/VersionTests.java | 2 +- .../indices/create/CreateIndexTests.java | 14 ++-- .../action/index/IndexRequestTests.java | 4 +- .../aliases/IndexAliasesTests.java | 22 +++---- .../BasicBackwardsCompatibilityTest.java | 4 +- .../UpdateSettingsValidationTests.java | 4 +- .../cluster/metadata/MetaDataTests.java | 8 +-- .../allocation/AllocationCommandsTests.java | 16 ++--- .../decider/DiskThresholdDeciderTests.java | 5 +- .../elasticsearch/common/BooleansTests.java | 4 +- .../elasticsearch/common/ParseFieldTests.java | 8 +-- .../elasticsearch/common/PidFileTests.java | 4 +- .../org/elasticsearch/common/TableTests.java | 16 ++--- .../collect/CopyOnWriteHashMapTests.java | 6 +- .../collect/CopyOnWriteHashSetTests.java | 4 +- .../common/geo/GeoJSONShapeParserTests.java | 7 +- .../recycler/AbstractRecyclerTests.java | 4 +- .../common/unit/SizeValueTests.java | 4 +- .../discovery/zen/ZenDiscoveryUnitTest.java | 4 +- .../env/NodeEnvironmentTests.java | 5 +- .../FieldStatsIntegrationTests.java | 4 +- .../gateway/MetaDataStateFormatTest.java | 8 +-- .../org/elasticsearch/get/GetActionTests.java | 5 +- .../analysis/KeepFilterFactoryTests.java | 6 +- .../analysis/NGramTokenizerFactoryTests.java | 6 +- .../PatternCaptureTokenFilterTests.java | 5 +- .../CommonGramsTokenFilterFactoryTests.java | 4 +- .../engine/InternalEngineSettingsTest.java | 8 +-- .../mapper/date/SimpleDateMappingTests.java | 4 +- .../index/mapper/ip/SimpleIpMappingTests.java | 12 ++-- .../merge/policy/MergePolicySettingsTest.java | 9 ++- .../query/SimpleIndexQueryParserTests.java | 6 +- .../index/search/child/ScoreTypeTests.java | 8 +-- .../index/shard/IndexShardTests.java | 8 +-- .../index/shard/ShardPathTests.java | 6 +- .../IndicesOptionsIntegrationTests.java | 4 +- .../indices/IndicesServiceTest.java | 8 +-- .../indices/analyze/AnalyzeActionTests.java | 10 +-- .../indices/recovery/RecoveryStateTest.java | 4 +- .../settings/UpdateNumberOfReplicasTests.java | 4 +- .../indices/settings/UpdateSettingsTests.java | 4 +- .../state/CloseIndexDisableCloseAllTests.java | 12 ++-- .../template/SimpleIndexTemplateTests.java | 13 ++-- ...DestructiveOperationsIntegrationTests.java | 19 +++--- .../plugins/PluginManagerTests.java | 12 ++-- .../routing/AliasResolveRoutingTests.java | 6 +- .../script/CustomScriptContextTests.java | 6 +- .../script/IndexedScriptTests.java | 2 +- .../script/ScriptContextRegistryTests.java | 12 ++-- .../script/ScriptModesTests.java | 4 +- .../script/ScriptServiceTests.java | 8 +-- .../SignificanceHeuristicTests.java | 30 ++++----- .../child/SimpleChildQuerySearchTests.java | 6 +- .../DecayFunctionScoreTests.java | 14 ++-- .../preference/SearchPreferenceTests.java | 5 +- .../search/scroll/SearchScrollTests.java | 14 ++-- .../search/simple/SimpleSearchTests.java | 8 +-- .../suggest/ContextSuggestSearchTests.java | 6 +- .../search/suggest/SuggestSearchTests.java | 12 ++-- .../SharedClusterSnapshotRestoreTests.java | 4 +- .../test/InternalTestCluster.java | 5 +- .../org/elasticsearch/test/TestCluster.java | 4 +- .../test/cluster/NoopClusterService.java | 6 +- .../test/cluster/TestClusterService.java | 6 +- .../transport/netty/KeyedLockTests.java | 8 +-- 436 files changed, 1502 insertions(+), 1665 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java delete mode 100644 src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java diff --git a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java index 8a8f1fce31d..6e7c718769c 100644 --- a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java +++ b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java @@ -21,7 +21,7 @@ package org.apache.lucene.analysis; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.io.IOException; import java.util.Collections; @@ -97,7 +97,7 @@ public class PrefixAnalyzer extends Analyzer { this.currentPrefix = null; this.separator = separator; if (prefixes == null || !prefixes.iterator().hasNext()) { - throw new ElasticsearchIllegalArgumentException("one or more prefixes needed"); + throw new IllegalArgumentException("one or more prefixes needed"); } } diff --git a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index ae021b07b09..b474817183b 100644 --- a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -19,7 +19,7 @@ package org.apache.lucene.store; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.ByteSizeValue; @@ -42,7 +42,7 @@ public class StoreRateLimiting { MERGE, ALL; - public static Type fromString(String type) throws ElasticsearchIllegalArgumentException { + public static Type fromString(String type) throws IllegalArgumentException { if ("none".equalsIgnoreCase(type)) { return NONE; } else if ("merge".equalsIgnoreCase(type)) { @@ -50,7 +50,7 @@ public class StoreRateLimiting { } else if ("all".equalsIgnoreCase(type)) { return ALL; } - throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]"); + throw new IllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]"); } } @@ -88,7 +88,7 @@ public class StoreRateLimiting { this.type = type; } - public void setType(String type) throws ElasticsearchIllegalArgumentException { + public void setType(String type) throws IllegalArgumentException { this.type = Type.fromString(type); } } diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java b/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java deleted file mode 100644 index fc6d110bf16..00000000000 --- a/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch; - -import org.elasticsearch.rest.RestStatus; - -/** - * - */ -public class ElasticsearchIllegalArgumentException extends ElasticsearchException { - - public ElasticsearchIllegalArgumentException() { - super(null); - } - - public ElasticsearchIllegalArgumentException(String msg) { - super(msg); - } - - public ElasticsearchIllegalArgumentException(String msg, Throwable cause) { - super(msg, cause); - } - - @Override - public RestStatus status() { - return RestStatus.BAD_REQUEST; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java b/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java deleted file mode 100644 index d837d699402..00000000000 --- a/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch; - -/** - * - */ -public class ElasticsearchIllegalStateException extends ElasticsearchException { - - public ElasticsearchIllegalStateException() { - super(null); - } - - public ElasticsearchIllegalStateException(String msg) { - super(msg); - } - - public ElasticsearchIllegalStateException(String msg, Throwable cause) { - super(msg, cause); - } -} diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java index 794dbdf49ab..7f420e6bb5d 100644 --- a/src/main/java/org/elasticsearch/Version.java +++ b/src/main/java/org/elasticsearch/Version.java @@ -459,12 +459,12 @@ public class Version { /** * Return the {@link Version} of Elasticsearch that has been used to create an index given its settings. * - * @throws ElasticsearchIllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED} + * @throws IllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED} */ public static Version indexCreated(Settings indexSettings) { final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null); if (indexVersion == null) { - throw new ElasticsearchIllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]"); + throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]"); } return indexVersion; } diff --git a/src/main/java/org/elasticsearch/action/ActionFuture.java b/src/main/java/org/elasticsearch/action/ActionFuture.java index bca3730b61b..60115143e9d 100644 --- a/src/main/java/org/elasticsearch/action/ActionFuture.java +++ b/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -35,7 +35,7 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get()}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

    *

    Note, the actual cause is unwrapped to the actual failure (for example, unwrapped @@ -46,7 +46,7 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

    *

    Note, the actual cause is unwrapped to the actual failure (for example, unwrapped @@ -57,7 +57,7 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

    *

    Note, the actual cause is unwrapped to the actual failure (for example, unwrapped @@ -70,7 +70,7 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

    *

    Note, the actual cause is unwrapped to the actual failure (for example, unwrapped @@ -81,7 +81,7 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

    *

    Note, the actual cause is unwrapped to the actual failure (for example, unwrapped diff --git a/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java b/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java index b353e75aa3b..b4fd7c30059 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java +++ b/src/main/java/org/elasticsearch/action/ActionRequestValidationException.java @@ -19,7 +19,9 @@ package org.elasticsearch.action; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ElasticsearchException; + +import java.lang.IllegalArgumentException; import java.util.ArrayList; import java.util.List; @@ -27,12 +29,12 @@ import java.util.List; /** * */ -public class ActionRequestValidationException extends ElasticsearchIllegalArgumentException { +public class ActionRequestValidationException extends IllegalArgumentException { private final List validationErrors = new ArrayList<>(); public ActionRequestValidationException() { - super(null); + super("validation failed"); } public void addValidationError(String error) { diff --git a/src/main/java/org/elasticsearch/action/ThreadingModel.java b/src/main/java/org/elasticsearch/action/ThreadingModel.java index 5f87d82c528..f812a5c7777 100644 --- a/src/main/java/org/elasticsearch/action/ThreadingModel.java +++ b/src/main/java/org/elasticsearch/action/ThreadingModel.java @@ -19,7 +19,7 @@ package org.elasticsearch.action; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * @@ -108,7 +108,7 @@ public enum ThreadingModel { } else if (id == 3) { return OPERATION_LISTENER; } else { - throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]"); + throw new IllegalArgumentException("No threading model for [" + id + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java index eeabdb99cb3..1ce18498eab 100644 --- a/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java +++ b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java @@ -19,7 +19,7 @@ package org.elasticsearch.action; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation @@ -53,7 +53,7 @@ public enum WriteConsistencyLevel { } else if (value == 3) { return ALL; } - throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]"); + throw new IllegalArgumentException("No write consistency match [" + value + "]"); } public static WriteConsistencyLevel fromString(String value) { @@ -66,6 +66,6 @@ public enum WriteConsistencyLevel { } else if (value.equals("all")) { return ALL; } - throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]"); + throw new IllegalArgumentException("No write consistency match [" + value + "]"); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java index 50479ee2df0..9a41708693f 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * @@ -48,7 +48,7 @@ public enum ClusterHealthStatus { case 2: return RED; default: - throw new ElasticsearchIllegalArgumentException("No cluster health status for value [" + value + "]"); + throw new IllegalArgumentException("No cluster health status for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 9d5812a3810..fdcdcb0a97a 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.cluster.health; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -141,7 +141,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati @Override public void onClusterServiceClose() { - listener.onFailure(new ElasticsearchIllegalStateException("ClusterService was close during health call")); + listener.onFailure(new IllegalStateException("ClusterService was close during health call")); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 8682ee1611f..cc7538ce1c5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -20,8 +20,8 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.Version; +import java.lang.IllegalArgumentException; + import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; @@ -218,7 +218,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest) entry.getValue()); } @@ -236,7 +236,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); + throw new IllegalArgumentException("malformed indices section, should be an array of strings"); } } else if (name.equals("partial")) { partial(nodeBooleanValue(entry.getValue())); } else if (name.equals("settings")) { if (!(entry.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object"); + throw new IllegalArgumentException("malformed settings section, should indices an inner object"); } settings((Map) entry.getValue()); } else if (name.equals("include_global_state")) { @@ -407,7 +407,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); + throw new IllegalArgumentException("malformed indices section, should be an array of strings"); } } else if (name.equals("partial")) { partial(nodeBooleanValue(entry.getValue())); } else if (name.equals("settings")) { if (!(entry.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("malformed settings section"); + throw new IllegalArgumentException("malformed settings section"); } settings((Map) entry.getValue()); } else if (name.equals("include_global_state")) { @@ -529,17 +529,17 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else if (name.equals("ignore_index_settings")) { @@ -548,10 +548,10 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); + throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name); + throw new IllegalArgumentException("Unknown parameter " + name); } } indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); @@ -571,7 +571,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field()); if (fieldMapper != null) { if (fieldMapper.isNumeric()) { - throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields"); + throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields"); } analyzer = fieldMapper.indexAnalyzer(); field = fieldMapper.names().indexName(); @@ -134,20 +133,20 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction analyzer = indexService.analysisService().analyzer(request.analyzer()); } if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]"); + throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]"); } } else if (request.tokenizer() != null) { TokenizerFactory tokenizerFactory; if (indexService == null) { TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer()); if (tokenizerFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]"); + throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]"); } tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS); } else { tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer()); if (tokenizerFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]"); + throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]"); } } @@ -159,17 +158,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction if (indexService == null) { TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName); if (tokenFilterFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]"); } tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS); } else { tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName); if (tokenFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); } } if (tokenFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); } } } @@ -182,17 +181,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction if (indexService == null) { CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName); if (charFilterFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global char filter under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]"); } charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS); } else { charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName); if (charFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]"); } } if (charFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]"); } } } @@ -207,7 +206,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction } } if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer"); + throw new IllegalArgumentException("failed to find analyzer"); } List tokens = Lists.newArrayList(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 0be9e2767c1..38782547440 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.create; import com.google.common.base.Charsets; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -239,7 +239,7 @@ public class CreateIndexRequest extends AcknowledgedRequest try { mappings.put(type, source.string()); } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e); + throw new IllegalArgumentException("Failed to build json for mapping request", e); } return this; } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 417744176be..03ca578bedb 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ObjectArrays; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -74,18 +74,18 @@ public class GetIndexRequest extends ClusterInfoRequest { return this.validNames.contains(name); } - public static Feature fromName(String name) throws ElasticsearchIllegalArgumentException { + public static Feature fromName(String name) throws IllegalArgumentException { for (Feature feature : Feature.values()) { if (feature.validName(name)) { return feature; } } - throw new ElasticsearchIllegalArgumentException("No feature for name [" + name + "]"); + throw new IllegalArgumentException("No feature for name [" + name + "]"); } - public static Feature fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Feature fromId(byte id) throws IllegalArgumentException { if (id < 0 || id >= FEATURES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return FEATURES[id]; } @@ -104,7 +104,7 @@ public class GetIndexRequest extends ClusterInfoRequest { public GetIndexRequest features(Feature... features) { if (features == null) { - throw new ElasticsearchIllegalArgumentException("features cannot be null"); + throw new IllegalArgumentException("features cannot be null"); } else { this.features = features; } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 096878c61f6..4b47e95c2bc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.ActionFilters; @@ -112,7 +112,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction im for (String s : s1) { String[] s2 = Strings.split(s, "="); if (s2.length != 2) { - throw new ElasticsearchIllegalArgumentException("malformed " + s); + throw new IllegalArgumentException("malformed " + s); } builder.field(s2[0], s2[1]); } @@ -190,7 +190,7 @@ public class PutMappingRequest extends AcknowledgedRequest im for (String s : s1) { String[] s2 = Strings.split(s, "="); if (s2.length != 2) { - throw new ElasticsearchIllegalArgumentException("malformed " + s); + throw new IllegalArgumentException("malformed " + s); } builder.field(s2[0], s2[1]); } @@ -203,7 +203,7 @@ public class PutMappingRequest extends AcknowledgedRequest im builder.endObject(); return builder; } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e); + throw new IllegalArgumentException("failed to generate simplified mapping definition", e); } } @@ -214,7 +214,7 @@ public class PutMappingRequest extends AcknowledgedRequest im try { return source(mappingBuilder.string()); } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e); + throw new IllegalArgumentException("Failed to build json for mapping request", e); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b728abf934e..9d8619219f8 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -215,7 +215,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else if (name.equals("mappings")) { Map mappings = (Map) entry.getValue(); for (Map.Entry entry1 : mappings.entrySet()) { if (!(entry1.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping"); + throw new IllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping"); } mapping(entry1.getKey(), (Map) entry1.getValue()); } @@ -313,7 +313,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest implements Composite } else if (request instanceof UpdateRequest) { add((UpdateRequest) request, payload); } else { - throw new ElasticsearchIllegalArgumentException("No support for request [" + request + "]"); + throw new IllegalArgumentException("No support for request [" + request + "]"); } return this; } @@ -294,7 +294,7 @@ public class BulkRequest extends ActionRequest implements Composite } else if (token.isValue()) { if ("_index".equals(currentFieldName)) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in bulk is not allowed"); + throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = parser.text(); } else if ("_type".equals(currentFieldName)) { diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 3a96f3aeff3..799c7c073b1 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionWriteResponse; @@ -292,7 +292,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } } else { - throw new ElasticsearchIllegalStateException("Unexpected index operation: " + item.request()); + throw new IllegalStateException("Unexpected index operation: " + item.request()); } assert item.getPrimaryResponse() != null; @@ -502,7 +502,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation indexShard.indexingService().noopUpdate(updateRequest.type()); return new UpdateResult(translate, updateResponse); default: - throw new ElasticsearchIllegalStateException("Illegal update operation " + translate.operation()); + throw new IllegalStateException("Illegal update operation " + translate.operation()); } } @@ -556,7 +556,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } } else { - throw new ElasticsearchIllegalStateException("Unexpected index operation: " + item.request()); + throw new IllegalStateException("Unexpected index operation: " + item.request()); } } diff --git a/src/main/java/org/elasticsearch/action/count/CountRequest.java b/src/main/java/org/elasticsearch/action/count/CountRequest.java index 7233d5ae7fc..6ed17c8c11a 100644 --- a/src/main/java/org/elasticsearch/action/count/CountRequest.java +++ b/src/main/java/org/elasticsearch/action/count/CountRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.count; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; @@ -217,7 +217,7 @@ public class CountRequest extends BroadcastOperationRequest { */ public CountRequest terminateAfter(int terminateAfterCount) { if (terminateAfterCount <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } this.terminateAfter = terminateAfterCount; return this; diff --git a/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 747a9aa8953..47c88906165 100644 --- a/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; @@ -103,7 +103,7 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct if (request.request().routing() == null) { if (request.request().versionType() != VersionType.INTERNAL) { // TODO: implement this feature - throw new ElasticsearchIllegalArgumentException("routing value is required for deleting documents of type [" + request.request().type() + throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.request().type() + "] while using version_type [" + request.request().versionType() + "]"); } throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); diff --git a/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java index 63be6434fe1..aa9d806c812 100644 --- a/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java +++ b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java @@ -23,8 +23,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.Terms; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; @@ -87,7 +87,7 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastOperat indexName = shardResponse.getIndex(); } else { // should already have been catched by the FieldStatsRequest#validate(...) - throw new ElasticsearchIllegalArgumentException("Illegal level option [" + request.level() + "]"); + throw new IllegalArgumentException("Illegal level option [" + request.level() + "]"); } Map indexMergedFieldStats = indicesMergedFieldStats.get(indexName); @@ -100,7 +100,7 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastOperat FieldStats existing = indexMergedFieldStats.get(entry.getKey()); if (existing != null) { if (existing.getType() != entry.getValue().getType()) { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "trying to merge the field stats of field [" + entry.getKey() + "] from index [" + shardResponse.getIndex() + "] but the field type is incompatible, try to set the 'level' option to 'indices'" ); } diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 74e76253ca2..e4c8c22d0bd 100644 --- a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.get; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.action.support.IndicesOptions; @@ -376,7 +376,7 @@ public class MultiGetRequest extends ActionRequest implements I XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("docs array element should include an object"); + throw new IllegalArgumentException("docs array element should include an object"); } String index = defaultIndex; String type = defaultType; @@ -395,7 +395,7 @@ public class MultiGetRequest extends ActionRequest implements I } else if (token.isValue()) { if ("_index".equals(currentFieldName)) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi get is not allowed"); + throw new IllegalArgumentException("explicit index in multi get is not allowed"); } index = parser.text(); } else if ("_type".equals(currentFieldName)) { @@ -486,7 +486,7 @@ public class MultiGetRequest extends ActionRequest implements I XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (!token.isValue()) { - throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids"); + throw new IllegalArgumentException("ids array element should only contain ids"); } items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting)); } diff --git a/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 5dcd790a422..b8d3026feac 100644 --- a/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.index; import com.google.common.base.Charsets; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; @@ -109,11 +109,11 @@ public class IndexRequest extends ShardReplicationOperationRequest } else if (id == 1) { return CREATE; } else { - throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]"); + throw new IllegalArgumentException("No type match for [" + id + "]"); } } - public static OpType fromString(String sOpType) throws ElasticsearchIllegalArgumentException { + public static OpType fromString(String sOpType) throws IllegalArgumentException { String lowersOpType = sOpType.toLowerCase(Locale.ROOT); switch(lowersOpType){ case "create": @@ -121,7 +121,7 @@ public class IndexRequest extends ShardReplicationOperationRequest case "index": return OpType.INDEX; default: - throw new ElasticsearchIllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); + throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); } } @@ -326,7 +326,7 @@ public class IndexRequest extends ShardReplicationOperationRequest return this; } if (ttl <= 0) { - throw new ElasticsearchIllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]"); + throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]"); } this.ttl = ttl; return this; @@ -490,7 +490,7 @@ public class IndexRequest extends ShardReplicationOperationRequest * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can * be either "index" or "create". */ - public IndexRequest opType(String opType) throws ElasticsearchIllegalArgumentException { + public IndexRequest opType(String opType) throws IllegalArgumentException { return opType(OpType.fromString(opType)); } @@ -608,11 +608,11 @@ public class IndexRequest extends ShardReplicationOperationRequest } if (parent != null && !mappingMd.hasParentField()) { - throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); } } else { if (parent != null) { - throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); } } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java index 202383dbf3c..88be29f5de1 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.action.mlt; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; @@ -475,7 +475,7 @@ public class MoreLikeThisRequest extends ActionRequest impl /** * The search type of the mlt search query. */ - public MoreLikeThisRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException { + public MoreLikeThisRequest searchType(String searchType) throws IllegalArgumentException { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java index 5b10269f3c2..de3d5d73f2d 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.mlt; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -213,7 +213,7 @@ public class MoreLikeThisRequestBuilder extends ActionRequestBuilder header.put("id", entry.getValue()); } else if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } getRequest.index(nodeStringValue(value, null)); } else if ("type".equals(entry.getKey())) { @@ -225,7 +224,7 @@ public class MultiPercolateRequest extends ActionRequest Object value = entry.getValue(); if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } percolateRequest.indices(nodeStringArrayValue(value)); } else if ("type".equals(entry.getKey())) { diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index a27c4a590ee..0208b519efe 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -20,15 +20,14 @@ package org.elasticsearch.action.search; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchParseException; +import java.lang.IllegalArgumentException; + import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -125,7 +124,7 @@ public class MultiSearchRequest extends ActionRequest implem Object value = entry.getValue(); if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } searchRequest.indices(nodeStringArrayValue(value)); } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) { diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 1c16f3a8b35..8fee22267c2 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -152,11 +152,11 @@ public class SearchRequest extends ActionRequest implements Indic @Override public SearchRequest indices(String... indices) { if (indices == null) { - throw new ElasticsearchIllegalArgumentException("indices must not be null"); + throw new IllegalArgumentException("indices must not be null"); } else { for (int i = 0; i < indices.length; i++) { if (indices[i] == null) { - throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null"); + throw new IllegalArgumentException("indices[" + i + "] must not be null"); } } } @@ -241,7 +241,7 @@ public class SearchRequest extends ActionRequest implements Indic * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". */ - public SearchRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException { + public SearchRequest searchType(String searchType) throws IllegalArgumentException { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index e51bbb6234f..0ea1b41a5dd 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; @@ -86,7 +86,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder[] context = new Tuple[contextSize]; @@ -110,7 +110,7 @@ public abstract class TransportSearchHelper { String element = elements[index++]; int sep = element.indexOf(':'); if (sep == -1) { - throw new ElasticsearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]"); + throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); } context[i] = new Tuple<>(element.substring(sep + 1), Long.parseLong(element.substring(0, sep))); } diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java index 189b035a918..f2d6340fd40 100644 --- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java +++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.search.ReduceSearchPhaseException; @@ -194,7 +194,7 @@ public abstract class TransportSearchTypeAction extends TransportAction expectedTotalOps) { - raiseEarlyFailure(new ElasticsearchIllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]")); + raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]")); } } diff --git a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index 237dd64ee31..2054cad61cb 100644 --- a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.suggest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; @@ -141,7 +141,7 @@ public class TransportSuggestAction extends TransportBroadcastOperationAction 0) { parser = XContentFactory.xContent(suggest).createParser(suggest); if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("suggest content missing"); + throw new IllegalArgumentException("suggest content missing"); } final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), request.shardId().getIndex(), request.shardId().id()); final Suggest result = suggestPhase.execute(context, searcher.reader()); diff --git a/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java index 1aa0ead9217..98dca680ef5 100644 --- a/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java +++ b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; @@ -44,7 +44,7 @@ public abstract class AdapterActionFuture extends BaseFuture implements try { return get(); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw rethrowExecutionException(e); } @@ -72,20 +72,24 @@ public abstract class AdapterActionFuture extends BaseFuture implements } catch (TimeoutException e) { throw new ElasticsearchTimeoutException(e.getMessage()); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw rethrowExecutionException(e); } } - static ElasticsearchException rethrowExecutionException(ExecutionException e) { + static RuntimeException rethrowExecutionException(ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { ElasticsearchException esEx = (ElasticsearchException) e.getCause(); Throwable root = esEx.unwrapCause(); if (root instanceof ElasticsearchException) { return (ElasticsearchException) root; + } else if (root instanceof RuntimeException) { + return (RuntimeException) root; } return new UncategorizedExecutionException("Failed execution", root); + } else if (e.getCause() instanceof RuntimeException) { + return (RuntimeException) e.getCause(); } else { return new UncategorizedExecutionException("Failed execution", e); } diff --git a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 0324114bf53..5d32b631ae6 100644 --- a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.settings.NodeSettingsService; @@ -55,15 +55,15 @@ public final class DestructiveOperations implements NodeSettingsService.Listener } if (aliasesOrIndices == null || aliasesOrIndices.length == 0) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } else if (aliasesOrIndices.length == 1) { if (hasWildcardUsage(aliasesOrIndices[0])) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } } else { for (String aliasesOrIndex : aliasesOrIndices) { if (hasWildcardUsage(aliasesOrIndex)) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } } } diff --git a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 84e458ded21..ccd458acb1f 100644 --- a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.action.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.common.Strings; +import java.lang.IllegalArgumentException; + import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; @@ -121,7 +121,7 @@ public class IndicesOptions { //we just receive the old corresponding value with the new flag set to true (default) byte id = in.readByte(); if (id >= VALUES.length) { - throw new ElasticsearchIllegalArgumentException("No valid missing index type id: " + id); + throw new IllegalArgumentException("No valid missing index type id: " + id); } return VALUES[id]; } @@ -179,7 +179,7 @@ public class IndicesOptions { expandWildcardsOpen = true; expandWildcardsClosed = true; } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index a413edfa232..858b55d1c5b 100644 --- a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.termvectors; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.common.Nullable; @@ -98,7 +98,7 @@ public class MultiTermVectorsRequest extends ActionRequest) script.unwrap(ctx); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } //Allow the script to set TTL using ctx._ttl ttl = getTTLFromScriptContext(ctx); @@ -200,7 +200,7 @@ public class UpdateHelper extends AbstractComponent { // we need to unwrap the ctx... ctx = (Map) script.unwrap(ctx); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } operation = (String) ctx.get("op"); diff --git a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index 96f6a098ab8..2f059ac2f41 100644 --- a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -20,7 +20,7 @@ package org.elasticsearch.cache.recycler; import com.google.common.base.Strings; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.recycler.AbstractRecyclerC; @@ -213,7 +213,7 @@ public class PageCacheRecycler extends AbstractComponent { try { return Type.valueOf(type.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]"); + throw new IllegalArgumentException("no type support [" + type + "]"); } } diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index ced572ecd15..4fe1d48a2e9 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -24,7 +24,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -145,7 +145,7 @@ public class TransportClientNodesService extends AbstractComponent { public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) { synchronized (mutex) { if (closed) { - throw new ElasticsearchIllegalStateException("transport client is closed, can't add an address"); + throw new IllegalStateException("transport client is closed, can't add an address"); } List filtered = Lists.newArrayListWithExpectedSize(transportAddresses.length); for (TransportAddress transportAddress : transportAddresses) { @@ -180,7 +180,7 @@ public class TransportClientNodesService extends AbstractComponent { public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) { synchronized (mutex) { if (closed) { - throw new ElasticsearchIllegalStateException("transport client is closed, can't remove an address"); + throw new IllegalStateException("transport client is closed, can't remove an address"); } ImmutableList.Builder builder = ImmutableList.builder(); for (DiscoveryNode otherNode : listedNodes) { diff --git a/src/main/java/org/elasticsearch/cluster/ClusterService.java b/src/main/java/org/elasticsearch/cluster/ClusterService.java index 805419ccc99..bf351e46758 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; @@ -50,12 +50,12 @@ public interface ClusterService extends LifecycleComponent { /** * Adds an initial block to be set on the first cluster state created. */ - void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException; + void addInitialStateBlock(ClusterBlock block) throws IllegalStateException; /** * Remove an initial block to be set on the first cluster state created. */ - void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException; + void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException; /** * The operation routing. diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index ef4d67740dc..1f1b8769efa 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -22,7 +22,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -103,10 +103,10 @@ public class ClusterState implements ToXContent { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { Custom.Factory factory = customFactories.get(type); if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom state factory registered for type [" + type + "]"); } return factory; } @@ -288,7 +288,7 @@ public class ClusterState implements ToXContent { Metric m = valueToEnum.get(metric); if (m == null) { if (!ignoreUnknown) { - throw new ElasticsearchIllegalArgumentException("Unknown metric [" + metric + "]"); + throw new IllegalArgumentException("Unknown metric [" + metric + "]"); } } else { result.add(m); diff --git a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 44727699354..532f829a06f 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.action.index; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -71,7 +71,7 @@ public class MappingUpdatedAction extends AbstractComponent { private PutMappingRequestBuilder updateMappingRequest(String index, String type, Mapping mappingUpdate, final TimeValue timeout) { if (type.equals(MapperService.DEFAULT_MAPPING)) { - throw new ElasticsearchIllegalArgumentException("_default_ mapping should not be updated"); + throw new IllegalArgumentException("_default_ mapping should not be updated"); } return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) .setMasterNodeTimeout(timeout).setTimeout(timeout); diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index dace9e8b4f5..3fc77b4ee4c 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.EnumSet; @@ -55,6 +55,6 @@ public enum ClusterBlockLevel { } else if (id == 3) { return METADATA_WRITE; } - throw new ElasticsearchIllegalArgumentException("No cluster block level matching [" + id + "]"); + throw new IllegalArgumentException("No cluster block level matching [" + id + "]"); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index 29bcba360bb..5d2156ef56e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +59,7 @@ public class AliasAction implements Streamable { } else if (value == 1) { return REMOVE; } else { - throw new ElasticsearchIllegalArgumentException("No type for action [" + value + "]"); + throw new IllegalArgumentException("No type for action [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 72f7a599488..8a5ae1b128a 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; @@ -48,7 +48,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasAction} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasAction(AliasAction aliasAction, MetaData metaData) { validateAlias(aliasAction.alias(), aliasAction.index(), aliasAction.indexRouting(), metaData); @@ -57,7 +57,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.action.admin.indices.alias.Alias} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAlias(Alias alias, String index, MetaData metaData) { validateAlias(alias.name(), index, alias.indexRouting(), metaData); @@ -66,7 +66,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasMetaData} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) { validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData); @@ -77,7 +77,7 @@ public class AliasValidator extends AbstractComponent { * Useful with index templates containing aliases. Checks also that it is possible to parse * the alias filter via {@link org.elasticsearch.common.xcontent.XContentParser}, * without validating it as a filter though. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasStandalone(Alias alias) { validateAliasStandalone(alias.name(), alias.indexRouting()); @@ -86,7 +86,7 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter()); parser.mapAndClose(); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); } } } @@ -95,7 +95,7 @@ public class AliasValidator extends AbstractComponent { validateAliasStandalone(alias, indexRouting); if (!Strings.hasText(index)) { - throw new ElasticsearchIllegalArgumentException("index name is required"); + throw new IllegalArgumentException("index name is required"); } assert metaData != null; @@ -106,17 +106,17 @@ public class AliasValidator extends AbstractComponent { private void validateAliasStandalone(String alias, String indexRouting) { if (!Strings.hasText(alias)) { - throw new ElasticsearchIllegalArgumentException("alias name is required"); + throw new IllegalArgumentException("alias name is required"); } if (indexRouting != null && indexRouting.indexOf(',') != -1) { - throw new ElasticsearchIllegalArgumentException("alias [" + alias + "] has several index routing values associated with it"); + throw new IllegalArgumentException("alias [" + alias + "] has several index routing values associated with it"); } } /** * Validates an alias filter by parsing it using the * provided {@link org.elasticsearch.index.query.IndexQueryParserService} - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the filter is not valid + * @throws IllegalArgumentException if the filter is not valid */ public void validateAliasFilter(String alias, String filter, IndexQueryParserService indexQueryParserService) { assert indexQueryParserService != null; @@ -124,14 +124,14 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(filter).createParser(filter); validateAliasFilter(parser, indexQueryParserService); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } /** * Validates an alias filter by parsing it using the * provided {@link org.elasticsearch.index.query.IndexQueryParserService} - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the filter is not valid + * @throws IllegalArgumentException if the filter is not valid */ public void validateAliasFilter(String alias, byte[] filter, IndexQueryParserService indexQueryParserService) { assert indexQueryParserService != null; @@ -139,7 +139,7 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(filter).createParser(filter); validateAliasFilter(parser, indexQueryParserService); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 1543151fad0..8cea5faf7a4 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -22,8 +22,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -109,10 +109,10 @@ public class IndexMetaData { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { Custom.Factory factory = customFactories.get(type); if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); } return factory; } @@ -142,7 +142,7 @@ public class IndexMetaData { } else if (id == 1) { return CLOSE; } - throw new ElasticsearchIllegalStateException("No state match for id [" + id + "]"); + throw new IllegalStateException("No state match for id [" + id + "]"); } public static State fromString(String state) { @@ -151,7 +151,7 @@ public class IndexMetaData { } else if ("close".equals(state)) { return CLOSE; } - throw new ElasticsearchIllegalStateException("No state match for [" + state + "]"); + throw new IllegalStateException("No state match for [" + state + "]"); } } public static final String INDEX_SETTING_PREFIX = "index."; @@ -237,7 +237,7 @@ public class IndexMetaData { try { routingHashFunction = hashFunctionClass.newInstance(); } catch (InstantiationException | IllegalAccessException e) { - throw new ElasticsearchIllegalStateException("Cannot instantiate hash function", e); + throw new IllegalStateException("Cannot instantiate hash function", e); } } useTypeForRouting = settings.getAsBoolean(SETTING_LEGACY_ROUTING_USE_TYPE, false); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index f2ace98caeb..6dc595be51c 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.common.Nullable; @@ -297,7 +297,7 @@ public class MappingMetaData { this.source = mapping; Map mappingMap = XContentHelper.createParser(mapping.compressed(), 0, mapping.compressed().length).mapOrderedAndClose(); if (mappingMap.size() != 1) { - throw new ElasticsearchIllegalStateException("Can't derive type from mapping, no root type: " + mapping.string()); + throw new IllegalStateException("Can't derive type from mapping, no root type: " + mapping.string()); } this.type = mappingMap.keySet().iterator().next(); initMappers((Map) mappingMap.get(this.type)); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 51793b1d27b..7dd6dc8f7bf 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -25,7 +25,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -116,10 +116,10 @@ public class MetaData implements Iterable { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { Custom.Factory factory = customFactories.get(type); if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); } return factory; } @@ -474,20 +474,20 @@ public class MetaData implements Iterable { return routing; } if (indexAliases.size() > 1) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op"); } AliasMetaData aliasMd = indexAliases.values().iterator().next().value; if (aliasMd.indexRouting() != null) { if (routing != null) { if (!routing.equals(aliasMd.indexRouting())) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); } } routing = aliasMd.indexRouting(); } if (routing != null) { if (routing.indexOf(',') != -1) { - throw new ElasticsearchIllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation"); + throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation"); } } return routing; @@ -651,10 +651,10 @@ public class MetaData implements Iterable { * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options * don't allow such a case. - * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options don't allow such a case. */ - public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { + public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException { if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { if (isAllIndices(aliasesOrIndices)) { String[] concreteIndices; @@ -677,7 +677,7 @@ public class MetaData implements Iterable { if (aliasesOrIndices == null || aliasesOrIndices.length == 0) { if (!indicesOptions.allowNoIndices()) { - throw new ElasticsearchIllegalArgumentException("no indices were specified and wildcard expansion is disabled."); + throw new IllegalArgumentException("no indices were specified and wildcard expansion is disabled."); } else { return Strings.EMPTY_ARRAY; } @@ -734,23 +734,23 @@ public class MetaData implements Iterable { * Utility method that allows to resolve an index or alias to its corresponding single concrete index. * Callers should make sure they provide proper {@link org.elasticsearch.action.support.IndicesOptions} * that require a single index as a result. The indices resolution must in fact return a single index when - * using this method, an {@link org.elasticsearch.ElasticsearchIllegalArgumentException} gets thrown otherwise. + * using this method, an {@link IllegalArgumentException} gets thrown otherwise. * * @param indexOrAlias the index or alias to be resolved to concrete index * @param indicesOptions the indices options to be used for the index resolution * @return the concrete index obtained as a result of the index resolution * @throws IndexMissingException if the index or alias provided doesn't exist - * @throws ElasticsearchIllegalArgumentException if the index resolution lead to more than one index + * @throws IllegalArgumentException if the index resolution lead to more than one index */ - public String concreteSingleIndex(String indexOrAlias, IndicesOptions indicesOptions) throws IndexMissingException, ElasticsearchIllegalArgumentException { + public String concreteSingleIndex(String indexOrAlias, IndicesOptions indicesOptions) throws IndexMissingException, IllegalArgumentException { String[] indices = concreteIndices(indicesOptions, indexOrAlias); if (indices.length != 1) { - throw new ElasticsearchIllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); + throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); } return indices[0]; } - private String[] concreteIndices(String aliasOrIndex, IndicesOptions options, boolean failNoIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { + private String[] concreteIndices(String aliasOrIndex, IndicesOptions options, boolean failNoIndices) throws IndexMissingException, IllegalArgumentException { boolean failClosed = options.forbidClosedIndices() && !options.ignoreUnavailable(); // a quick check, if this is an actual index, if so, return it @@ -772,7 +772,7 @@ public class MetaData implements Iterable { throw new IndexMissingException(new Index(aliasOrIndex)); } if (indices.length > 1 && !options.allowAliasesToMultipleIndices()) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one indices associated with it [" + Arrays.toString(indices) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one indices associated with it [" + Arrays.toString(indices) + "], can't execute a single index op"); } // No need to check whether indices referred by aliases are closed, because there are no closed indices. diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 244c598f0a3..67ddf9ea81b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -26,7 +26,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -425,7 +425,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { } for (Alias alias : request.aliases()) { AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) - .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); + .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); indexMetaDataBuilder.putAlias(aliasMetaData); } @@ -444,11 +444,11 @@ public class MetaDataCreateIndexService extends AbstractComponent { } indexService.indicesLifecycle().beforeIndexAddedToCluster(new Index(request.index()), - indexMetaData.settings()); + indexMetaData.settings()); MetaData newMetaData = MetaData.builder(currentState.metaData()) - .put(indexMetaData, false) - .build(); + .put(indexMetaData, false) + .build(); logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.numberOfShards(), indexMetaData.numberOfReplicas(), mappings.keySet()); @@ -466,7 +466,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (request.state() == State.OPEN) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.index())); + .addAsNew(updatedState.metaData().index(request.index())); RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder).build()); updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); } @@ -572,7 +572,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { } if (validationErrors.isEmpty() == false) { throw new IndexCreationException(new Index(indexName), - new ElasticsearchIllegalArgumentException(getMessage(validationErrors))); + new IllegalArgumentException(getMessage(validationErrors))); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 5c556b86246..8377f3ae41d 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; @@ -68,7 +68,7 @@ public class MetaDataIndexStateService extends AbstractComponent { public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { - throw new ElasticsearchIllegalArgumentException("Index name is required"); + throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); @@ -128,7 +128,7 @@ public class MetaDataIndexStateService extends AbstractComponent { public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { - throw new ElasticsearchIllegalArgumentException("Index name is required"); + throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 95c806f7d49..66ac3b17fb6 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -23,7 +23,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeOperationRequest; import org.elasticsearch.cluster.ClusterService; @@ -111,11 +111,11 @@ public class MetaDataIndexTemplateService extends AbstractComponent { request.settings(updatedSettingsBuilder.build()); if (request.name == null) { - listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a name")); + listener.onFailure(new IllegalArgumentException("index_template must provide a name")); return; } if (request.template == null) { - listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a template")); + listener.onFailure(new IllegalArgumentException("index_template must provide a template")); return; } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index bc7539a7ff1..3caa1a9e0e4 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -172,7 +172,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // never allow to change the number of shards for (String key : updatedSettingsBuilder.internalMap().keySet()) { if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) { - listener.onFailure(new ElasticsearchIllegalArgumentException("can't change the number of shards for an index")); + listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index")); return; } } @@ -193,7 +193,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } if (!errors.isEmpty()) { - listener.onFailure(new ElasticsearchIllegalArgumentException("can't process the settings: " + errors.toString())); + listener.onFailure(new IllegalArgumentException("can't process the settings: " + errors.toString())); return; } @@ -230,7 +230,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } if (!removedSettings.isEmpty() && !openIndices.isEmpty()) { - throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, + throw new IllegalArgumentException(String.format(Locale.ROOT, "Can't update non dynamic settings[%s] for open indices[%s]", removedSettings, openIndices diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 373d5ff858c..51aa9198e2f 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -389,7 +389,7 @@ public class RestoreMetaData implements MetaData.Custom { case 3: return FAILURE; default: - throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index b759fe5daeb..1622c247812 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -301,7 +301,7 @@ public class SnapshotMetaData implements MetaData.Custom { case 6: return WAITING; default: - throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 13b0a62f0e6..0ea050c2fe8 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.node; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -59,7 +59,7 @@ public class DiscoveryNode implements Streamable, Serializable { } else if ("network".equals(nodeMode)) { return false; } else { - throw new ElasticsearchIllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network]."); + throw new IllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network]."); } } return false; diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 0a4986476e5..ec2a6abc402 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -24,7 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; @@ -311,15 +311,15 @@ public class DiscoveryNodes implements Iterable { * * @param node id of the node to discover * @return discovered node matching the given id - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if more than one node matches the request or no nodes have been resolved + * @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved */ public DiscoveryNode resolveNode(String node) { String[] resolvedNodeIds = resolveNodesIds(node); if (resolvedNodeIds.length > 1) { - throw new ElasticsearchIllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"); + throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"); } if (resolvedNodeIds.length == 0) { - throw new ElasticsearchIllegalArgumentException("failed to resolve [" + node + " ], no matching nodes"); + throw new IllegalArgumentException("failed to resolve [" + node + " ], no matching nodes"); } return nodes.get(resolvedNodeIds[0]); } diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 5f0356d3572..afa92bae30d 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -25,7 +25,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -398,7 +398,7 @@ public class IndexRoutingTable implements Iterable { */ private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards, boolean asNew) { if (!shards.isEmpty()) { - throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); + throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) { IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true); @@ -420,7 +420,7 @@ public class IndexRoutingTable implements Iterable { */ private Builder initializeEmpty(IndexMetaData indexMetaData, boolean asNew) { if (!shards.isEmpty()) { - throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); + throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) { IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true); diff --git a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index ee9f4270c8c..9f55d7d8893 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -185,7 +185,7 @@ public class OperationRouting extends AbstractComponent { ensureNodeIdExists(nodes, nodeId); return indexShard.onlyNodeActiveInitializingShardsIt(nodeId); default: - throw new ElasticsearchIllegalArgumentException("unknown preference [" + preferenceType + "]"); + throw new IllegalArgumentException("unknown preference [" + preferenceType + "]"); } } // if not, then use it as the index @@ -260,14 +260,14 @@ public class OperationRouting extends AbstractComponent { @Deprecated protected int hash(HashFunction hashFunction, String type, String id) { if (type == null || "_all".equals(type)) { - throw new ElasticsearchIllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)"); + throw new IllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)"); } return hashFunction.hash(type, id); } private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) { if (!nodes.dataNodes().keys().contains(nodeId)) { - throw new ElasticsearchIllegalArgumentException("No data node with id[" + nodeId + "] found"); + throw new IllegalArgumentException("No data node with id[" + nodeId + "] found"); } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/Preference.java b/src/main/java/org/elasticsearch/cluster/routing/Preference.java index 534b13b9a70..480ea700157 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/Preference.java +++ b/src/main/java/org/elasticsearch/cluster/routing/Preference.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.cluster.routing; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * Routing Preference Type @@ -99,7 +99,7 @@ public enum Preference { case "_onlyLocal": return ONLY_LOCAL; default: - throw new ElasticsearchIllegalArgumentException("no Preference for [" + preferenceType + "]"); + throw new IllegalArgumentException("no Preference for [" + preferenceType + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 6f8341087e7..9231f0782b4 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import java.util.ArrayList; @@ -90,7 +90,7 @@ public class RoutingNode implements Iterable { // TODO use Set with ShardIds for faster lookup. for (MutableShardRouting shardRouting : shards) { if (shardRouting.shardId().equals(shard.shardId())) { - throw new ElasticsearchIllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists"); + throw new IllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists"); } } shards.add(shard); diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java index 7bea17c3c69..d5a1f25f984 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java +++ b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java @@ -19,7 +19,7 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; /** * Represents the current state of a {@link ShardRouting} as defined by the @@ -69,7 +69,7 @@ public enum ShardRoutingState { case 4: return RELOCATING; default: - throw new ElasticsearchIllegalStateException("No routing state mapped for [" + value + "]"); + throw new IllegalStateException("No routing state mapped for [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 58f3ae58f50..21aa3f82164 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -21,10 +21,9 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -501,7 +500,7 @@ public class AllocationService extends AbstractComponent { logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard); } } else { - throw new ElasticsearchIllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard); + throw new IllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard); } } else { // the shard is not relocating, its either started, or initializing, just cancel it and move on... diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 6dde296c186..941cae71c98 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -22,7 +22,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import com.google.common.base.Predicate; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.RoutingNode; @@ -78,7 +78,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance); float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold); if (threshold <= 0.0f) { - throw new ElasticsearchIllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); + throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); } BalancedShardsAllocator.this.threshold = threshold; BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance); @@ -180,7 +180,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public WeightFunction(float indexBalance, float shardBalance) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { - throw new ElasticsearchIllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); + throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); } theta = new float[]{shardBalance / sum, indexBalance / sum}; this.indexBalance = indexBalance; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java index 0bbb7e750da..515d0a60e9f 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java @@ -20,8 +20,8 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -185,7 +185,7 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "failed to find " + shardId + " on the list of unassigned shards")); } - throw new ElasticsearchIllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards"); + throw new IllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards"); } if (shardRouting.primary() && !allowPrimary) { @@ -193,7 +193,7 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "trying to allocate a primary shard " + shardId + ", which is disabled")); } - throw new ElasticsearchIllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled"); + throw new IllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled"); } RoutingNode routingNode = allocation.routingNodes().node(discoNode.id()); @@ -203,13 +203,13 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "Allocation can only be done on data nodes, not [" + node + "]")); } - throw new ElasticsearchIllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]"); + throw new IllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]"); } else { if (explain) { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "Could not find [" + node + "] among the routing nodes")); } - throw new ElasticsearchIllegalStateException("Could not find [" + node + "] among the routing nodes"); + throw new IllegalStateException("Could not find [" + node + "] among the routing nodes"); } } @@ -218,7 +218,7 @@ public class AllocateAllocationCommand implements AllocationCommand { if (explain) { return new RerouteExplanation(this, decision); } - throw new ElasticsearchIllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision); + throw new IllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision); } // go over and remove it from the unassigned for (Iterator it = allocation.routingNodes().unassigned().iterator(); it.hasNext(); ) { diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 40a75cdfd52..f73a6711587 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -61,10 +61,10 @@ public class AllocationCommands { } @SuppressWarnings("unchecked") - public static AllocationCommand.Factory lookupFactorySafe(String name) throws ElasticsearchIllegalArgumentException { + public static AllocationCommand.Factory lookupFactorySafe(String name) throws IllegalArgumentException { AllocationCommand.Factory factory = factories.get(name); if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No allocation command factory registered for name [" + name + "]"); + throw new IllegalArgumentException("No allocation command factory registered for name [" + name + "]"); } return factory; } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java index be01ccb6190..6ba8c88d21b 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -197,7 +197,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state")); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state"); } it.moveToUnassigned(); @@ -220,7 +220,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started")); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started"); } it.remove(); @@ -233,7 +233,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + ", failed to find it on node " + discoNode)); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode); + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode); } return new RerouteExplanation(this, allocation.decision(Decision.YES, "cancel_allocation_command", "shard " + shardId + " on node " + discoNode + " can be cancelled")); diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 0a52f6cc6f4..bdfad07a9cd 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -165,7 +165,7 @@ public class MoveAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "move_allocation_command", "shard " + shardId + " has not been started")); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", shard is not started (state = " + shardRouting.state() + "]"); } @@ -175,7 +175,7 @@ public class MoveAllocationCommand implements AllocationCommand { if (explain) { return new RerouteExplanation(this, decision); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); } if (decision.type() == Decision.Type.THROTTLE) { // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... @@ -193,7 +193,7 @@ public class MoveAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "move_allocation_command", "shard " + shardId + " not found")); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", failed to find it on node " + fromDiscoNode); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", failed to find it on node " + fromDiscoNode); } return new RerouteExplanation(this, decision); } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index f0480c4af7c..59ef855816d 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,8 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.settings.Validator; @@ -58,7 +58,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { try { ClusterRebalanceType.parseString(value); return null; - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]"; } } @@ -89,7 +89,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { return ClusterRebalanceType.INDICES_ALL_ACTIVE; } - throw new ElasticsearchIllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); + throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); } } @@ -101,7 +101,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active"); try { type = ClusterRebalanceType.parseString(allowRebalance); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); type = ClusterRebalanceType.INDICES_ALL_ACTIVE; } @@ -119,7 +119,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { ClusterRebalanceType newType = null; try { newType = ClusterRebalanceType.parseString(newAllowRebalance); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // ignore } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 4f05fd196fb..7068d3f0fd9 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -117,7 +117,7 @@ public abstract class Decision implements ToXContent { case 2: return THROTTLE; default: - throw new ElasticsearchIllegalArgumentException("No Type for integer [" + i + "]"); + throw new IllegalArgumentException("No Type for integer [" + i + "]"); } } @@ -133,7 +133,7 @@ public abstract class Decision implements ToXContent { out.writeVInt(2); break; default: - throw new ElasticsearchIllegalArgumentException("Invalid Type [" + type + "]"); + throw new IllegalArgumentException("Invalid Type [" + type + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index bcbc1231e4b..08fa64390df 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,8 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -112,7 +112,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden"); } default: - throw new ElasticsearchIllegalStateException("Unknown allocation option"); + throw new IllegalStateException("Unknown allocation option"); } } @@ -148,7 +148,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden"); } default: - throw new ElasticsearchIllegalStateException("Unknown rebalance option"); + throw new IllegalStateException("Unknown rebalance option"); } } @@ -188,7 +188,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe try { return Allocation.valueOf(strValue); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("Illegal allocation.enable value [" + strValue + "]"); + throw new IllegalArgumentException("Illegal allocation.enable value [" + strValue + "]"); } } } @@ -214,7 +214,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe try { return Rebalance.valueOf(strValue); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("Illegal rebalance.enable value [" + strValue + "]"); + throw new IllegalArgumentException("Illegal rebalance.enable value [" + strValue + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index eb527e07fe4..21c6f1361d5 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.service; import com.google.common.collect.Iterables; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterState.Builder; @@ -129,17 +129,17 @@ public class InternalClusterService extends AbstractLifecycleComponent { case 3: return LOW; case 4: return LANGUID; default: - throw new ElasticsearchIllegalArgumentException("can't find priority for [" + b + "]"); + throw new IllegalArgumentException("can't find priority for [" + b + "]"); } } diff --git a/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java b/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java index 3e9f3a74803..7992ddb6935 100644 --- a/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java +++ b/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java @@ -19,7 +19,7 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.io.IOException; import java.util.Random; @@ -62,7 +62,7 @@ class RandomBasedUUIDGenerator implements UUIDGenerator { assert encoded[encoded.length - 2] == '='; return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not be thrown"); + throw new IllegalStateException("should not be thrown"); } } } diff --git a/src/main/java/org/elasticsearch/common/Table.java b/src/main/java/org/elasticsearch/common/Table.java index 18ba34c2487..183e51c7eca 100644 --- a/src/main/java/org/elasticsearch/common/Table.java +++ b/src/main/java/org/elasticsearch/common/Table.java @@ -21,7 +21,7 @@ package org.elasticsearch.common; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.util.ArrayList; import java.util.HashMap; @@ -47,7 +47,7 @@ public class Table { public Table endHeaders() { if (currentCells == null || currentCells.isEmpty()) { - throw new ElasticsearchIllegalStateException("no headers added..."); + throw new IllegalStateException("no headers added..."); } inHeaders = false; headers = currentCells; @@ -73,7 +73,7 @@ public class Table { public Table startRow() { if (headers.isEmpty()) { - throw new ElasticsearchIllegalStateException("no headers added..."); + throw new IllegalStateException("no headers added..."); } currentCells = new ArrayList<>(headers.size()); return this; @@ -81,7 +81,7 @@ public class Table { public Table endRow(boolean check) { if (currentCells == null) { - throw new ElasticsearchIllegalStateException("no row started..."); + throw new IllegalStateException("no row started..."); } if (check && (currentCells.size() != headers.size())) { StringBuilder s = new StringBuilder(); @@ -89,7 +89,7 @@ public class Table { s.append(currentCells.size()); s.append(" in a row compared to header "); s.append(headers.size()); - throw new ElasticsearchIllegalStateException(s.toString()); + throw new IllegalStateException(s.toString()); } rows.add(currentCells); currentCells = null; @@ -107,11 +107,11 @@ public class Table { public Table addCell(Object value, String attributes) { if (currentCells == null) { - throw new ElasticsearchIllegalStateException("no block started..."); + throw new IllegalStateException("no block started..."); } if (!inHeaders) { if (currentCells.size() == headers.size()) { - throw new ElasticsearchIllegalStateException("can't add more cells to a row than the header"); + throw new IllegalStateException("can't add more cells to a row than the header"); } } Map mAttr; diff --git a/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index b354a3c063e..0561956552c 100644 --- a/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -19,12 +19,10 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.io.IOException; -import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but * we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. */ @@ -87,7 +85,7 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { try { encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not be thrown", e); + throw new IllegalStateException("should not be thrown", e); } // We are a multiple of 3 bytes so we should not see any padding: diff --git a/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java b/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java index 6c099cfe014..394d2e6fbcd 100644 --- a/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java +++ b/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.breaker; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.Locale; @@ -50,7 +50,7 @@ public interface CircuitBreaker { case "memory": return Type.MEMORY; default: - throw new ElasticsearchIllegalArgumentException("No CircuitBreaker with type: " + value); + throw new IllegalArgumentException("No CircuitBreaker with type: " + value); } } } diff --git a/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index c748ade21f9..aa2860eb2b3 100644 --- a/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -21,7 +21,7 @@ package org.elasticsearch.common.bytes; import com.google.common.base.Charsets; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.StreamInput; @@ -90,7 +90,7 @@ public class BytesArray implements BytesReference { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > this.length) { - throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, offset + from, length); } diff --git a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 6c74029f9f5..ab3f1e8ae91 100644 --- a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -21,7 +21,7 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.netty.NettyUtils; @@ -74,7 +74,7 @@ public class PagedBytesReference implements BytesReference { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > length()) { - throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new PagedBytesReference(bigarrays, bytearray, offset + from, length); diff --git a/src/main/java/org/elasticsearch/common/cli/CliTool.java b/src/main/java/org/elasticsearch/common/cli/CliTool.java index eb0faf91d0a..092fd0e2e3a 100644 --- a/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ b/src/main/java/org/elasticsearch/common/cli/CliTool.java @@ -23,7 +23,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -146,7 +146,7 @@ public abstract class CliTool { } catch (IOException ioe) { terminal.printError(ioe); return ExitStatus.IO_ERROR.status; - } catch (IllegalArgumentException | ElasticsearchIllegalArgumentException ilae) { + } catch (IllegalArgumentException ilae) { terminal.printError(ilae); return ExitStatus.USAGE.status; } catch (Throwable t) { diff --git a/src/main/java/org/elasticsearch/common/collect/HppcMaps.java b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java index 6d3070cfbab..28cc9a5483c 100644 --- a/src/main/java/org/elasticsearch/common/collect/HppcMaps.java +++ b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java @@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectIntOpenHashMap; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.Iterator; @@ -72,7 +72,7 @@ public final class HppcMaps { @Override public V put(K key, V value) { if (key == null) { - throw new ElasticsearchIllegalArgumentException("Map key must not be null"); + throw new IllegalArgumentException("Map key must not be null"); } return super.put(key, value); } @@ -134,7 +134,7 @@ public final class HppcMaps { @Override public int put(V key, int value) { if (key == null) { - throw new ElasticsearchIllegalArgumentException("Map key must not be null"); + throw new IllegalArgumentException("Map key must not be null"); } return super.put(key, value); } diff --git a/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 92bc7aa4782..146a4adcb20 100644 --- a/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.component; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; /** * Lifecycle state. Allows the following transitions: @@ -105,7 +105,7 @@ public class Lifecycle { return state == State.STOPPED || state == State.CLOSED; } - public boolean canMoveToStarted() throws ElasticsearchIllegalStateException { + public boolean canMoveToStarted() throws IllegalStateException { State localState = this.state; if (localState == State.INITIALIZED || localState == State.STOPPED) { return true; @@ -114,13 +114,13 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean moveToStarted() throws ElasticsearchIllegalStateException { + public boolean moveToStarted() throws IllegalStateException { State localState = this.state; if (localState == State.INITIALIZED || localState == State.STOPPED) { state = State.STARTED; @@ -130,12 +130,12 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean canMoveToStopped() throws ElasticsearchIllegalStateException { + public boolean canMoveToStopped() throws IllegalStateException { State localState = state; if (localState == State.STARTED) { return true; @@ -144,12 +144,12 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean moveToStopped() throws ElasticsearchIllegalStateException { + public boolean moveToStopped() throws IllegalStateException { State localState = state; if (localState == State.STARTED) { state = State.STOPPED; @@ -159,30 +159,30 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean canMoveToClosed() throws ElasticsearchIllegalStateException { + public boolean canMoveToClosed() throws IllegalStateException { State localState = state; if (localState == State.CLOSED) { return false; } if (localState == State.STARTED) { - throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode"); + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); } return true; } - public boolean moveToClosed() throws ElasticsearchIllegalStateException { + public boolean moveToClosed() throws IllegalStateException { State localState = state; if (localState == State.CLOSED) { return false; } if (localState == State.STARTED) { - throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode"); + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); } state = State.CLOSED; return true; diff --git a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java index 96dac682de5..0f44b5738a6 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java @@ -21,7 +21,7 @@ package org.elasticsearch.common.geo; import org.apache.lucene.util.Bits; import org.apache.lucene.util.SloppyMath; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; @@ -203,7 +203,7 @@ public enum GeoDistance { } else if ("factor".equals(name)) { return FACTOR; } - throw new ElasticsearchIllegalArgumentException("No geo distance for [" + name + "]"); + throw new IllegalArgumentException("No geo distance for [" + name + "]"); } public static interface FixedSourceDistance { diff --git a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index d679d2dd943..8e908012d8a 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -17,7 +17,7 @@ package org.elasticsearch.common.geo; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.ArrayList; import java.util.Collection; @@ -285,7 +285,7 @@ public class GeoHashUtils { case 'z': return 31; default: - throw new ElasticsearchIllegalArgumentException("the character '" + geo + "' is not a valid geohash character"); + throw new IllegalArgumentException("the character '" + geo + "' is not a valid geohash character"); } } @@ -354,7 +354,7 @@ public class GeoHashUtils { public static long encodeAsLong(double latitude, double longitude, int precision) { if((precision>12)||(precision<1)) { - throw new ElasticsearchIllegalArgumentException("Illegal precision length of "+precision+ + throw new IllegalArgumentException("Illegal precision length of "+precision+ ". Long-based geohashes only support precisions between 1 and 12"); } double latInterval0 = -90.0; diff --git a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 22b6ee074ff..ead682293b6 100644 --- a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -27,7 +27,7 @@ import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; import org.apache.commons.lang3.tuple.Pair; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -256,7 +256,7 @@ public abstract class ShapeBuilder implements ToXContent { } return new CoordinateNode(new Coordinate(lon, lat)); } else if (token == XContentParser.Token.VALUE_NULL) { - throw new ElasticsearchIllegalArgumentException("coordinates cannot contain NULL values)"); + throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); @@ -703,7 +703,7 @@ public abstract class ShapeBuilder implements ToXContent { return type; } } - throw new ElasticsearchIllegalArgumentException("unknown geo_shape ["+geoshapename+"]"); + throw new IllegalArgumentException("unknown geo_shape ["+geoshapename+"]"); } public static ShapeBuilder parse(XContentParser parser) throws IOException { diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index af21524aa50..991e74d3080 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -61,8 +61,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Counter; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -513,7 +513,7 @@ public class Lucene { public static void writeScoreDoc(StreamOutput out, ScoreDoc scoreDoc) throws IOException { if (!scoreDoc.getClass().equals(ScoreDoc.class)) { - throw new ElasticsearchIllegalArgumentException("This method can only be used to serialize a ScoreDoc, not a " + scoreDoc.getClass()); + throw new IllegalArgumentException("This method can only be used to serialize a ScoreDoc, not a " + scoreDoc.getClass()); } out.writeVInt(scoreDoc.doc); out.writeFloat(scoreDoc.score); @@ -699,27 +699,27 @@ public class Lucene { return new Scorer(null) { @Override public float score() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int freq() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int advance(int arg0) throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public long cost() { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int docID() { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int nextDoc() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java index 2ff71b2f1de..77ef6feeb64 100644 --- a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java +++ b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.lucene.all; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.FastStringReader; @@ -111,7 +111,7 @@ public class AllEntries extends Reader { entry.reader().reset(); } } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not happen"); + throw new IllegalStateException("should not happen"); } it = entries.iterator(); if (it.hasNext()) { diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java index 82b57e4e452..9afdc2dd90b 100644 --- a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java +++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java @@ -28,8 +28,8 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RoaringDocIdSet; import org.apache.lucene.util.SparseFixedBitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import java.io.IOException; @@ -129,7 +129,7 @@ public class DocIdSets { @Override public boolean get(int index) { if (index < previous) { - throw new ElasticsearchIllegalArgumentException("This Bits instance can only be consumed in order. " + throw new IllegalArgumentException("This Bits instance can only be consumed in order. " + "Got called on [" + index + "] while previously called on [" + previous + "]"); } previous = index; @@ -139,7 +139,7 @@ public class DocIdSets { try { doc = iterator.advance(index); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot advance iterator", e); + throw new IllegalStateException("Cannot advance iterator", e); } } return index == doc; diff --git a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 9531f8ea77f..fc956703826 100644 --- a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.docset.DocIdSets; @@ -69,7 +69,7 @@ public class FilterableTermsEnum extends TermsEnum { public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable final Filter filter) throws IOException { if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) { - throw new ElasticsearchIllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag); + throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag); } this.docsEnumFlag = docsEnumFlag; if (filter == null) { diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index 6a36198e386..a1bc0d61108 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -21,7 +21,7 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.io.IOException; @@ -37,7 +37,7 @@ public class WeightFactorFunction extends ScoreFunction { public WeightFactorFunction(float weight, ScoreFunction scoreFunction) { super(CombineFunction.MULT); if (scoreFunction instanceof BoostScoreFunction) { - throw new ElasticsearchIllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); + throw new IllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); } if (scoreFunction == null) { this.scoreFunction = SCORE_ONE; diff --git a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java index 07298b8e3e2..179237a5d27 100644 --- a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.util.Deque; @@ -89,7 +89,7 @@ public class DequeRecycler extends AbstractRecycler { @Override public void close() { if (value == null) { - throw new ElasticsearchIllegalStateException("recycler entry already released..."); + throw new IllegalStateException("recycler entry already released..."); } final boolean recycle = beforeRelease(); if (recycle) { diff --git a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index f347a739607..35a943da48c 100644 --- a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; /** */ @@ -60,7 +60,7 @@ public class NoneRecycler extends AbstractRecycler { @Override public void close() { if (value == null) { - throw new ElasticsearchIllegalStateException("recycler entry already released..."); + throw new IllegalStateException("recycler entry already released..."); } value = null; } diff --git a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index da526664513..67f29c21130 100644 --- a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -22,9 +22,7 @@ package org.elasticsearch.common.recycler; import com.carrotsearch.hppc.hash.MurmurHash3; import com.google.common.collect.Queues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; - -import java.lang.ref.SoftReference; +import java.lang.IllegalArgumentException; public enum Recyclers { ; @@ -155,7 +153,7 @@ public enum Recyclers { */ public static Recycler concurrent(final Recycler.Factory factory, final int concurrencyLevel) { if (concurrencyLevel < 1) { - throw new ElasticsearchIllegalArgumentException("concurrencyLevel must be >= 1"); + throw new IllegalArgumentException("concurrencyLevel must be >= 1"); } if (concurrencyLevel == 1) { return locked(factory.build()); diff --git a/src/main/java/org/elasticsearch/common/regex/Regex.java b/src/main/java/org/elasticsearch/common/regex/Regex.java index 67f4f13c420..63bbb573227 100644 --- a/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.regex; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import java.util.Locale; @@ -154,7 +154,7 @@ public class Regex { } else if ("UNICODE_CHAR_CLASS".equals(s)) { pFlags |= UNICODE_CHARACTER_CLASS; } else { - throw new ElasticsearchIllegalArgumentException("Unknown regex flag [" + s + "]"); + throw new IllegalArgumentException("Unknown regex flag [" + s + "]"); } } return pFlags; diff --git a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 02a9725b37b..98b7f0ecfb7 100644 --- a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.rounding; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -60,7 +60,7 @@ public abstract class TimeZoneRounding extends Rounding { public Builder(TimeValue interval) { this.unit = null; if (interval.millis() < 1) - throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported"); + throw new IllegalArgumentException("Zero or negative time interval not supported"); this.interval = interval.millis(); } @@ -169,7 +169,7 @@ public abstract class TimeZoneRounding extends Rounding { TimeIntervalRounding(long interval, DateTimeZone timeZone) { if (interval < 1) - throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported"); + throw new IllegalArgumentException("Zero or negative time interval not supported"); this.interval = interval; this.timeZone = timeZone; } diff --git a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java index eca908ddae9..296a0e9b513 100644 --- a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java +++ b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java @@ -25,7 +25,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Classes; @@ -506,7 +506,7 @@ public class ImmutableSettings implements Settings { @Override public Map getGroups(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { if (!Strings.hasLength(settingPrefix)) { - throw new ElasticsearchIllegalArgumentException("illegal setting prefix " + settingPrefix); + throw new IllegalArgumentException("illegal setting prefix " + settingPrefix); } if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { settingPrefix = settingPrefix + "."; @@ -695,7 +695,7 @@ public class ImmutableSettings implements Settings { } } if ((settings.length % 2) != 0) { - throw new ElasticsearchIllegalArgumentException("array settings of key + value order doesn't hold correct number of arguments (" + settings.length + ")"); + throw new IllegalArgumentException("array settings of key + value order doesn't hold correct number of arguments (" + settings.length + ")"); } for (int i = 0; i < settings.length; i++) { put(settings[i++].toString(), settings[i].toString()); @@ -919,7 +919,7 @@ public class ImmutableSettings implements Settings { for (String s : values) { int index = s.indexOf('='); if (index == -1) { - throw new ElasticsearchIllegalArgumentException("value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing ="); + throw new IllegalArgumentException("value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing ="); } map.put(s.substring(0, index), s.substring(index + 1)); } diff --git a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java index 242000baad2..c75cedd1d6a 100644 --- a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java +++ b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.transport; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; @@ -57,7 +57,7 @@ public abstract class TransportAddressSerializers { public static synchronized void addAddressType(TransportAddress address) throws Exception { if (addressConstructors.containsKey(address.uniqueAddressTypeId())) { - throw new ElasticsearchIllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound"); + throw new IllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound"); } Constructor constructor = address.getClass().getDeclaredConstructor(); constructor.setAccessible(true); diff --git a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 539b25de9b2..37c76d2b865 100644 --- a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,10 +52,10 @@ public class ByteSizeValue implements Serializable, Streamable { this.sizeUnit = sizeUnit; } - public int bytesAsInt() throws ElasticsearchIllegalArgumentException { + public int bytesAsInt() throws IllegalArgumentException { long bytes = bytes(); if (bytes > Integer.MAX_VALUE) { - throw new ElasticsearchIllegalArgumentException("size [" + toString() + "] is bigger than max int"); + throw new IllegalArgumentException("size [" + toString() + "] is bigger than max int"); } return (int) bytes; } diff --git a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java index 25eb6680aca..8ffec08d4c4 100644 --- a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java +++ b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -179,7 +179,7 @@ public enum DistanceUnit { * * @param unit name of the unit * @return unit matching the given name - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if no unit matches the given name + * @throws IllegalArgumentException if no unit matches the given name */ public static DistanceUnit fromString(String unit) { for (DistanceUnit dunit : values()) { @@ -189,7 +189,7 @@ public enum DistanceUnit { } } } - throw new ElasticsearchIllegalArgumentException("No distance unit match [" + unit + "]"); + throw new IllegalArgumentException("No distance unit match [" + unit + "]"); } /** @@ -233,7 +233,7 @@ public enum DistanceUnit { byte b = in.readByte(); if(b<0 || b>=values().length) { - throw new ElasticsearchIllegalArgumentException("No type for distance unit matching [" + b + "]"); + throw new IllegalArgumentException("No type for distance unit matching [" + b + "]"); } else { return values()[b]; } diff --git a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index 712b37abcf9..a1d1bb76d21 100644 --- a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.unit; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.xcontent.ToXContent; @@ -111,7 +111,7 @@ public final class Fuzziness implements ToXContent { } default: - throw new ElasticsearchIllegalArgumentException("Can't parse fuzziness on token: [" + token + "]"); + throw new IllegalArgumentException("Can't parse fuzziness on token: [" + token + "]"); } } @@ -243,7 +243,7 @@ public final class Fuzziness implements ToXContent { return similarity; } } - throw new ElasticsearchIllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]"); + throw new IllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]"); } private int termLen(String text) { diff --git a/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/src/main/java/org/elasticsearch/common/util/BloomFilter.java index 6df7477a9cb..5137a043210 100644 --- a/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ b/src/main/java/org/elasticsearch/common/util/BloomFilter.java @@ -25,7 +25,7 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.hash.MurmurHash3; @@ -486,7 +486,7 @@ public class BloomFilter { } if (type == 1) { return Hashing.V1; } else { - throw new ElasticsearchIllegalArgumentException("no hashing type matching " + type); + throw new IllegalArgumentException("no hashing type matching " + type); } } } diff --git a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java index 50f494f2d5a..65c99da43c5 100644 --- a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java +++ b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.util; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.Locale; @@ -47,7 +47,7 @@ public class LocaleUtils { // lang return new Locale(parts[0]); default: - throw new ElasticsearchIllegalArgumentException("Can't parse locale: [" + localeStr + "]"); + throw new IllegalArgumentException("Can't parse locale: [" + localeStr + "]"); } } diff --git a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 02d35ac0dcb..527bb58f2b8 100644 --- a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -27,7 +27,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -127,7 +127,7 @@ public class MultiDataPathUpgrader { out.flush(); if (!status.clean) { logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8)); - throw new ElasticsearchIllegalStateException("index check failure"); + throw new IllegalStateException("index check failure"); } } } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java index bfd530c856e..29247e5939c 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -36,7 +36,7 @@ public final class CountDown { public CountDown(int count) { if (count < 0) { - throw new ElasticsearchIllegalArgumentException("count must be greater or equal to 0 but was: " + count); + throw new IllegalArgumentException("count must be greater or equal to 0 but was: " + count); } this.originalCount = count; this.countDown = new AtomicInteger(count); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index cf6239445d8..77f81b85d39 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.metrics.CounterMetric; import java.util.concurrent.BlockingQueue; @@ -38,13 +38,13 @@ public class EsAbortPolicy implements XRejectedExecutionHandler { if (((AbstractRunnable) r).isForceExecution()) { BlockingQueue queue = executor.getQueue(); if (!(queue instanceof SizeBlockingQueue)) { - throw new ElasticsearchIllegalStateException("forced execution, but expected a size queue"); + throw new IllegalStateException("forced execution, but expected a size queue"); } try { ((SizeBlockingQueue) queue).forcePut(r); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new ElasticsearchIllegalStateException("forced execution, but got interrupted", e); + throw new IllegalStateException("forced execution, but got interrupted", e); } return; } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index e727d35304d..94d82b77362 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -19,8 +19,7 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.ActionRunnable; +import java.lang.IllegalStateException; import java.util.concurrent.*; @@ -44,7 +43,7 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { public void shutdown(ShutdownListener listener) { synchronized (monitor) { if (this.listener != null) { - throw new ElasticsearchIllegalStateException("Shutdown was already called on this thread pool"); + throw new IllegalStateException("Shutdown was already called on this thread pool"); } if (isTerminated()) { listener.onTerminated(); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 523f7b92f74..c9fb994985d 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; @@ -60,7 +60,7 @@ public class KeyedLock { while (true) { if (threadLocal.get() != null) { // if we are here, the thread already has the lock - throw new ElasticsearchIllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() + throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() + " for key " + key); } KeyLock perNodeLock = map.get(key); @@ -86,7 +86,7 @@ public class KeyedLock { public void release(T key) { KeyLock lock = threadLocal.get(); if (lock == null) { - throw new ElasticsearchIllegalStateException("Lock not acquired"); + throw new IllegalStateException("Lock not acquired"); } release(key, lock); } @@ -152,7 +152,7 @@ public class KeyedLock { public void release(T key) { KeyLock keyLock = threadLocal.get(); if (keyLock == null) { - throw new ElasticsearchIllegalStateException("Lock not acquired"); + throw new IllegalStateException("Lock not acquired"); } try { release(key, keyLock); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java index f174d2e82b6..4d979698161 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.util.AbstractQueue; import java.util.Collection; @@ -146,13 +146,13 @@ public class SizeBlockingQueue extends AbstractQueue implements BlockingQu @Override public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { // note, not used in ThreadPoolExecutor - throw new ElasticsearchIllegalStateException("offer with timeout not allowed on size queue"); + throw new IllegalStateException("offer with timeout not allowed on size queue"); } @Override public void put(E e) throws InterruptedException { // note, not used in ThreadPoolExecutor - throw new ElasticsearchIllegalStateException("put not allowed on size queue"); + throw new IllegalStateException("put not allowed on size queue"); } @Override diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java index a0e7c9e0742..89b71b60d57 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java @@ -26,10 +26,6 @@ import org.elasticsearch.ElasticsearchException; */ public class UncategorizedExecutionException extends ElasticsearchException { - public UncategorizedExecutionException(String msg) { - super(msg); - } - public UncategorizedExecutionException(String msg, Throwable cause) { super(msg, cause); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index 61cdc84f0b8..e43eb769e65 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -111,7 +111,7 @@ public class XContentFactory { } else if (type == XContentType.CBOR) { return cborBuilder(outputStream); } - throw new ElasticsearchIllegalArgumentException("No matching content type for " + type); + throw new IllegalArgumentException("No matching content type for " + type); } /** @@ -127,7 +127,7 @@ public class XContentFactory { } else if (type == XContentType.CBOR) { return CborXContent.contentBuilder(); } - throw new ElasticsearchIllegalArgumentException("No matching content type for " + type); + throw new IllegalArgumentException("No matching content type for " + type); } /** diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index 321b3bbe225..ccd2ca1ae2c 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.AbstractXContentParser; @@ -204,7 +204,7 @@ public class JsonXContentParser extends AbstractXContentParser { case DOUBLE: return NumberType.DOUBLE; } - throw new ElasticsearchIllegalStateException("No matching token for number_type [" + numberType + "]"); + throw new IllegalStateException("No matching token for number_type [" + numberType + "]"); } private Token convertToken(JsonToken token) { @@ -235,6 +235,6 @@ public class JsonXContentParser extends AbstractXContentParser { case VALUE_EMBEDDED_OBJECT: return Token.VALUE_EMBEDDED_OBJECT; } - throw new ElasticsearchIllegalStateException("No matching token for json_token [" + token + "]"); + throw new IllegalStateException("No matching token for json_token [" + token + "]"); } } diff --git a/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 420e6a399d1..95b1c0ba2d9 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -19,7 +19,7 @@ package org.elasticsearch.discovery; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; @@ -95,7 +95,7 @@ public class DiscoverySettings extends AbstractComponent { case "write": return NO_MASTER_BLOCK_WRITES; default: - throw new ElasticsearchIllegalArgumentException("invalid master block [" + value + "]"); + throw new IllegalArgumentException("invalid master block [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index e3f4b17910b..c04cd6f8194 100644 --- a/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -21,7 +21,7 @@ package org.elasticsearch.discovery.local; import com.google.common.base.Objects; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -276,7 +276,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem @Override public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { if (!master) { - throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); + throw new IllegalStateException("Shouldn't publish state when not master"); } LocalDiscovery[] members = members(); if (members.length > 0) { @@ -379,7 +379,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } catch (Exception e) { // failure to marshal or un-marshal - throw new ElasticsearchIllegalStateException("Cluster state failed to serialize", e); + throw new IllegalStateException("Cluster state failed to serialize", e); } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java index 40f5f56dc80..2f8ac0073d3 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java +++ b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java @@ -18,27 +18,19 @@ */ package org.elasticsearch.discovery.zen; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; /** * Thrown when a node join request or a master ping reaches a node which is not * currently acting as a master. */ -public class NotMasterException extends ElasticsearchIllegalStateException { - - public NotMasterException() { - super(null); - } +public class NotMasterException extends IllegalStateException { public NotMasterException(String msg) { super(msg); } - public NotMasterException(String msg, Throwable cause) { - super(msg, cause); - } - @Override public Throwable fillInStackTrace() { return null; diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 9ad4945844a..d2271a92b6b 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -23,8 +23,8 @@ import com.google.common.base.Objects; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -173,10 +173,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); if (this.joinRetryAttempts < 1) { - throw new ElasticsearchIllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); + throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); } if (this.maxPingsFromAnotherMaster < 1) { - throw new ElasticsearchIllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]"); + throw new IllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]"); } logger.debug("using ping.timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); @@ -333,7 +333,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Override public void publish(ClusterState clusterState, AckListener ackListener) { if (!clusterState.getNodes().localNodeMaster()) { - throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); + throw new IllegalStateException("Shouldn't publish state when not master"); } nodesFD.updateNodesAndPing(clusterState); publishClusterState.publish(clusterState, ackListener); @@ -706,7 +706,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen /* The cluster name can still be null if the state comes from a node that is prev 1.1.1*/ if (incomingClusterName != null && !incomingClusterName.equals(this.clusterName)) { logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]", newClusterState.nodes().masterNode(), incomingClusterName); - newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster")); + newStateProcessed.onNewClusterStateFailed(new IllegalStateException("received state from a node that is not part of the cluster")); return; } if (localNodeMaster()) { @@ -733,7 +733,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } else { if (newClusterState.nodes().localNode() == null) { logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen", newClusterState.nodes().masterNode()); - newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster")); + newStateProcessed.onNewClusterStateFailed(new IllegalStateException("received state from a node that is not part of the cluster")); } else { final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState); @@ -879,7 +879,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) { logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode()); - throw new ElasticsearchIllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); + throw new IllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); } else if (newClusterState.version() < currentState.version()) { // if the new state has a smaller version, and it has the same master node, then no need to process it logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index d713b6d0b39..06a72665130 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -20,7 +20,7 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; @@ -295,7 +295,7 @@ public class MasterFaultDetection extends FaultDetection { } /** Thrown when a ping reaches the wrong node */ - static class ThisIsNotTheMasterYouAreLookingForException extends ElasticsearchIllegalStateException { + static class ThisIsNotTheMasterYouAreLookingForException extends IllegalStateException { ThisIsNotTheMasterYouAreLookingForException(String msg) { super(msg); @@ -310,7 +310,7 @@ public class MasterFaultDetection extends FaultDetection { } } - static class NodeDoesNotExistOnMasterException extends ElasticsearchIllegalStateException { + static class NodeDoesNotExistOnMasterException extends IllegalStateException { @Override public Throwable fillInStackTrace() { return null; @@ -351,7 +351,7 @@ public class MasterFaultDetection extends FaultDetection { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.localNodeMaster()) { - throw new NotMasterException(); + throw new NotMasterException("local node is not master"); } if (!nodes.nodeExists(request.nodeId)) { throw new NodeDoesNotExistOnMasterException(); diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index f367db8ca8c..667c4289921 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -19,7 +19,7 @@ package org.elasticsearch.discovery.zen.fd; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -245,13 +245,13 @@ public class NodesFaultDetection extends FaultDetection { // if we are not the node we are supposed to be pinged, send an exception // this can happen when a kill -9 is sent, and another node is started using the same port if (!localNode.id().equals(request.nodeId)) { - throw new ElasticsearchIllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]"); + throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]"); } // PingRequest will have clusterName set to null if it came from a node of version <1.4.0 if (request.clusterName != null && !request.clusterName.equals(clusterName)) { // Don't introduce new exception for bwc reasons - throw new ElasticsearchIllegalStateException("Got pinged with cluster name [" + request.clusterName + "], but I'm part of cluster [" + clusterName + "]"); + throw new IllegalStateException("Got pinged with cluster name [" + request.clusterName + "], but I'm part of cluster [" + clusterName + "]"); } notifyPingReceived(request); diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java index 18b6899c02d..ed32b22d37f 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java @@ -21,7 +21,7 @@ package org.elasticsearch.discovery.zen.ping; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Nullable; @@ -71,7 +71,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen @Override public void setPingContextProvider(PingContextProvider contextProvider) { if (lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't set nodes provider when started"); + throw new IllegalStateException("Can't set nodes provider when started"); } for (ZenPing zenPing : zenPings) { zenPing.setPingContextProvider(contextProvider); diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index e5f0f2ffad3..8f06abcaca0 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -21,7 +21,7 @@ package org.elasticsearch.discovery.zen.ping.multicast; import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -113,7 +113,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem @Override public void setPingContextProvider(PingContextProvider nodesProvider) { if (lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't set nodes provider when started"); + throw new IllegalStateException("Can't set nodes provider when started"); } this.contextProvider = nodesProvider; } @@ -406,7 +406,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem .createParser(data) .mapAndClose(); } else { - throw new ElasticsearchIllegalStateException("failed multicast message, probably message from previous version"); + throw new IllegalStateException("failed multicast message, probably message from previous version"); } } if (externalPingData != null) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 3dd2ad6a51d..6842808c669 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -23,8 +23,8 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.Lists; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -130,7 +130,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", addresses[i], version.minimumCompatibilityVersion())); } } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Failed to resolve address for [" + host + "]", e); + throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e); } } this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]); @@ -462,7 +462,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("received ping request while not started"); + throw new IllegalStateException("received ping request while not started"); } temporalResponses.add(request.pingResponse); threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() { diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index c9dbf42d3fd..2b7db7d7937 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -23,8 +23,8 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -172,7 +172,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } if (locks[0] == null) { - throw new ElasticsearchIllegalStateException("Failed to obtain node lock, is the following location writable?: " + throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: " + Arrays.toString(environment.dataWithClusterFiles()), lastException); } @@ -536,12 +536,12 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { /** * Returns an array of all of the nodes data locations. - * @throws org.elasticsearch.ElasticsearchIllegalStateException if the node is not configured to store local locations + * @throws IllegalStateException if the node is not configured to store local locations */ public Path[] nodeDataPaths() { assert assertEnvIsLocked(); if (nodePaths == null || locks == null) { - throw new ElasticsearchIllegalStateException("node is not configured to store local location"); + throw new IllegalStateException("node is not configured to store local location"); } Path[] paths = new Path[nodePaths.length]; for(int i=0;i findAllIndices() throws IOException { if (nodePaths == null || locks == null) { - throw new ElasticsearchIllegalStateException("node is not configured to store local location"); + throw new IllegalStateException("node is not configured to store local location"); } assert assertEnvIsLocked(); Set indices = Sets.newHashSet(); @@ -643,7 +643,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { /** * This method tries to write an empty file and moves it using an atomic move operation. - * This method throws an {@link ElasticsearchIllegalStateException} if this operation is + * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ @@ -657,7 +657,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try { Files.move(src, target, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException ex) { - throw new ElasticsearchIllegalStateException("atomic_move is not supported by the filesystem on path [" + throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { @@ -703,7 +703,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { return PathUtils.get(customDataDir); } } else { - throw new ElasticsearchIllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); + throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } } diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 34503b08ad8..c67c91dd6b9 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -20,7 +20,7 @@ package org.elasticsearch.gateway; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -175,7 +175,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } final String name = stateFile.getFileName().toString(); if (name.startsWith("metadata-")) { - throw new ElasticsearchIllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before " + throw new IllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before " + Version.CURRENT.minimumCompatibilityVersion() + " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath()); } @@ -225,7 +225,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) { if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null || indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) { - throw new ElasticsearchIllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION + throw new IllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION + "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in their index settings"); } } @@ -244,7 +244,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (Files.exists(stateLocation)) { try (DirectoryStream stream = Files.newDirectoryStream(stateLocation, "shards-*")) { for (Path stateFile : stream) { - throw new ElasticsearchIllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before " + throw new IllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before " + Version.CURRENT.minimumCompatibilityVersion() + " first to upgrade state structures - shard state found: [" + stateFile.getParent().toAbsolutePath()); } diff --git a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 8bfbc5a4e06..fe7bdd5ef73 100644 --- a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.logging.ESLogger; @@ -300,7 +300,7 @@ public abstract class MetaDataStateFormat { ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); if (files.size() > 0) { // We have some state files but none of them gave us a usable state - throw new ElasticsearchIllegalStateException("Could not find a state file to recover from among " + files); + throw new IllegalStateException("Could not find a state file to recover from among " + files); } return state; } diff --git a/src/main/java/org/elasticsearch/index/IndexException.java b/src/main/java/org/elasticsearch/index/IndexException.java index 0f100dcd4f0..2cd1edcd897 100644 --- a/src/main/java/org/elasticsearch/index/IndexException.java +++ b/src/main/java/org/elasticsearch/index/IndexException.java @@ -35,7 +35,7 @@ public class IndexException extends ElasticsearchException { this(index, msg, null); } - protected IndexException(Index index, String msg, Throwable cause) { + public IndexException(Index index, String msg, Throwable cause) { super(msg, cause); this.index = index; } diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index 878d8162437..cc9a571cd93 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -24,7 +24,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -279,7 +279,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone * keep it synced. */ if (closed.get()) { - throw new ElasticsearchIllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed"); + throw new IllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed"); } final ShardId shardId = new ShardId(index, sShardId); ShardLock lock = null; diff --git a/src/main/java/org/elasticsearch/index/VersionType.java b/src/main/java/org/elasticsearch/index/VersionType.java index 327cd6565db..ad31f4d17d0 100644 --- a/src/main/java/org/elasticsearch/index/VersionType.java +++ b/src/main/java/org/elasticsearch/index/VersionType.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.lucene.uid.Versions; /** @@ -283,7 +283,7 @@ public enum VersionType { } else if ("force".equals(versionType)) { return FORCE; } - throw new ElasticsearchIllegalArgumentException("No version type match [" + versionType + "]"); + throw new IllegalArgumentException("No version type match [" + versionType + "]"); } public static VersionType fromString(String versionType, VersionType defaultVersionType) { @@ -303,6 +303,6 @@ public enum VersionType { } else if (value == 3) { return FORCE; } - throw new ElasticsearchIllegalArgumentException("No version type match [" + value + "]"); + throw new IllegalArgumentException("No version type match [" + value + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 7285840f83c..b994d5c2307 100644 --- a/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -58,8 +58,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import java.lang.IllegalArgumentException; + import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.io.FileSystemUtils; @@ -71,12 +71,8 @@ import org.elasticsearch.index.settings.IndexSettings; import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; import java.io.Reader; import java.net.URL; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.*; /** @@ -216,7 +212,7 @@ public class Analysis { * Fetches a list of words from the specified settings file. The list should either be available at the key * specified by settingsPrefix or in a file specified by settingsPrefix + _path. * - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException + * @throws IllegalArgumentException * If the word list cannot be found at either key. */ public static List getWordList(Environment env, Settings settings, String settingPrefix) { @@ -237,7 +233,7 @@ public class Analysis { return loadWordList(reader, "#"); } catch (IOException ioe) { String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage()); - throw new ElasticsearchIllegalArgumentException(message); + throw new IllegalArgumentException(message); } } @@ -268,7 +264,7 @@ public class Analysis { /** * @return null If no settings set for "settingsPrefix" then return null. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException + * @throws IllegalArgumentException * If the Reader can not be instantiated. */ public static Reader getReaderFromFile(Environment env, Settings settings, String settingPrefix) { @@ -284,7 +280,7 @@ public class Analysis { return FileSystemUtils.newBufferedReader(fileUrl, Charsets.UTF_8); } catch (IOException ioe) { String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage()); - throw new ElasticsearchIllegalArgumentException(message); + throw new IllegalArgumentException(message); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java index 0bd527a2c22..d6c881d2f11 100644 --- a/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java +++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; @@ -186,12 +186,12 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find char filter type [" + charFilterSettings.get("type") + "] for [" + charFilterName + "]", e); + throw new IllegalArgumentException("failed to find char filter type [" + charFilterSettings.get("type") + "] for [" + charFilterName + "]", e); } } if (type == null) { // nothing found, see if its in bindings as a binding name - throw new ElasticsearchIllegalArgumentException("Char Filter [" + charFilterName + "] must have a type associated with it"); + throw new IllegalArgumentException("Char Filter [" + charFilterName + "] must have a type associated with it"); } charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -246,11 +246,11 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter type [" + tokenFilterSettings.get("type") + "] for [" + tokenFilterName + "]", e); + throw new IllegalArgumentException("failed to find token filter type [" + tokenFilterSettings.get("type") + "] for [" + tokenFilterName + "]", e); } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("token filter [" + tokenFilterName + "] must have a type associated with it"); + throw new IllegalArgumentException("token filter [" + tokenFilterName + "] must have a type associated with it"); } tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -305,11 +305,11 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer type [" + tokenizerSettings.get("type") + "] for [" + tokenizerName + "]", e); + throw new IllegalArgumentException("failed to find tokenizer type [" + tokenizerSettings.get("type") + "] for [" + tokenizerName + "]", e); } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("token filter [" + tokenizerName + "] must have a type associated with it"); + throw new IllegalArgumentException("token filter [" + tokenizerName + "] must have a type associated with it"); } tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -369,7 +369,7 @@ public class AnalysisModule extends AbstractModule { // we have a tokenizer, use the CustomAnalyzer type = CustomAnalyzerProvider.class; } else { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]", e); + throw new IllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]", e); } } } @@ -380,7 +380,7 @@ public class AnalysisModule extends AbstractModule { // we have a tokenizer, use the CustomAnalyzer type = CustomAnalyzerProvider.class; } else { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]"); + throw new IllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]"); } } analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON); diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java index 3fe8cbd27e5..055f3993936 100644 --- a/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java +++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import com.google.common.collect.ImmutableMap; import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -222,7 +222,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable } Analyzer analyzerF = analyzerFactory.get(); if (analyzerF == null) { - throw new ElasticsearchIllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); + throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); } NamedAnalyzer analyzer; // if we got a named analyzer back, use it... @@ -247,7 +247,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable defaultAnalyzer = analyzers.get("default"); if (defaultAnalyzer == null) { - throw new ElasticsearchIllegalArgumentException("no default analyzer configured"); + throw new IllegalArgumentException("no default analyzer configured"); } defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : analyzers.get("default"); defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : analyzers.get("default"); diff --git a/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java index fe56627554f..93a3e0f21bf 100644 --- a/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java +++ b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java @@ -19,9 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.util.Version; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java index 3215bec1d23..82cbd7316f0 100644 --- a/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * @@ -51,7 +51,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory { this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase); if (this.words == null) { - throw new ElasticsearchIllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter"); + throw new IllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter"); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java index 6b14aad567e..d54b4097490 100644 --- a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -24,16 +24,14 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import java.lang.IllegalArgumentException; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import java.io.Reader; - import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars; /** @@ -71,7 +69,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version. */ if (side == Lucene43EdgeNGramTokenizer.Side.BACK) { - throw new ElasticsearchIllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs." + " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back" + " in combination with a \"keyword\" tokenizer"); diff --git a/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java index 6bdba6617f4..9cfa70f284d 100644 --- a/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.analysis.hunspell.HunspellStemFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -44,12 +44,12 @@ public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory { String locale = settings.get("locale", settings.get("language", settings.get("lang", null))); if (locale == null) { - throw new ElasticsearchIllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter"); + throw new IllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter"); } dictionary = hunspellService.getDictionary(locale); if (dictionary == null) { - throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale)); + throw new IllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale)); } dedup = settings.getAsBoolean("dedup", true); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java index 47e03fdcbf0..60d907ee9d4 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.TypeTokenFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -56,7 +56,7 @@ public class KeepTypesFilterFactory extends AbstractTokenFilterFactory { final String[] arrayKeepTypes = settings.getAsArray(KEEP_TYPES_KEY, null); if ((arrayKeepTypes == null)) { - throw new ElasticsearchIllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); + throw new IllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); } this.keepTypes = new HashSet<>(Arrays.asList(arrayKeepTypes)); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java index e2605d8ea23..eda197eb210 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java @@ -24,7 +24,7 @@ import org.apache.lucene.analysis.miscellaneous.KeepWordFilter; import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -75,11 +75,11 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory { final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null); if ((arrayKeepWords == null && keepWordsPath == null) || (arrayKeepWords != null && keepWordsPath != null)) { // we don't allow both or none - throw new ElasticsearchIllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + KEEP_WORDS_PATH_KEY + "` to be configured"); } if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java index 73f41cdc305..af5ee8baa1b 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java @@ -24,7 +24,7 @@ import java.util.Set; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -44,7 +44,7 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory boolean ignoreCase = settings.getAsBoolean("ignore_case", false); Set rules = Analysis.getWordSet(env, settings, "keywords"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); + throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); } keywordLookup = new CharArraySet(rules, ignoreCase); } diff --git a/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java index aa9868a4fef..1b38f8f36a6 100644 --- a/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.LengthFilter; import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -46,7 +46,7 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { min = settings.getAsInt("min", 0); max = settings.getAsInt("max", Integer.MAX_VALUE); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); diff --git a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java index 8c08b82a7bd..9a5b26184c8 100644 --- a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java @@ -24,7 +24,7 @@ import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.el.GreekLowerCaseFilter; import org.apache.lucene.analysis.ga.IrishLowerCaseFilter; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -61,7 +61,7 @@ public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory { } else if (lang.equalsIgnoreCase("turkish")) { return new TurkishLowerCaseFilter(tokenStream); } else { - throw new ElasticsearchIllegalArgumentException("language [" + lang + "] not support for lower case"); + throw new IllegalArgumentException("language [" + lang + "] not support for lower case"); } } } diff --git a/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java index 3b298e65e56..d5a05155619 100644 --- a/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.charfilter.MappingCharFilter; import org.apache.lucene.analysis.charfilter.NormalizeCharMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -45,7 +45,7 @@ public class MappingCharFilterFactory extends AbstractCharFilterFactory { List rules = Analysis.getWordList(env, settings, "mappings"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); + throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } NormalizeCharMap.Builder normMapBuilder = new NormalizeCharMap.Builder(); diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java index 99da029a98e..8a9eaebf52e 100644 --- a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java @@ -24,15 +24,14 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import java.lang.IllegalArgumentException; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import java.io.Reader; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.Locale; @@ -83,7 +82,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { characterClass = characterClass.toLowerCase(Locale.ROOT).trim(); CharMatcher matcher = MATCHERS.get(characterClass); if (matcher == null) { - throw new ElasticsearchIllegalArgumentException("Unknown token type: '" + characterClass + "', must be one of " + MATCHERS.keySet()); + throw new IllegalArgumentException("Unknown token type: '" + characterClass + "', must be one of " + MATCHERS.keySet()); } builder.or(matcher); } diff --git a/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java index fb1fda8ac9d..000b2b31d4a 100644 --- a/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.path.PathHierarchyTokenizer; import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -46,7 +46,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { if (delimiter == null) { this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER; } else if (delimiter.length() > 1) { - throw new ElasticsearchIllegalArgumentException("delimiter can only be a one char value"); + throw new IllegalArgumentException("delimiter can only be a one char value"); } else { this.delimiter = delimiter.charAt(0); } @@ -55,7 +55,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { if (replacement == null) { this.replacement = this.delimiter; } else if (replacement.length() > 1) { - throw new ElasticsearchIllegalArgumentException("replacement can only be a one char value"); + throw new IllegalArgumentException("replacement can only be a one char value"); } else { this.replacement = replacement.charAt(0); } diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java index 1996aff8c92..3b06a96baf2 100644 --- a/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java +++ b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -57,7 +57,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider rules = Analysis.getWordList(env, settings, "rules"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("stemmer override filter requires either `rules` or `rules_path` to be configured"); + throw new IllegalArgumentException("stemmer override filter requires either `rules` or `rules_path` to be configured"); } StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(false); diff --git a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java index 745044d0019..07fda901e1d 100644 --- a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -26,7 +26,7 @@ import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -56,7 +56,7 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) { - throw new ElasticsearchIllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." + throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." + " Please fix your analysis chain or use an older compatibility version (<= 4.3)."); } this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true); diff --git a/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index 87ee3e9adf2..5ef4de1ea45 100644 --- a/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -28,11 +28,10 @@ import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.io.FastStringReader; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -66,7 +65,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { } else if (settings.get("synonyms_path") != null) { rulesReader = Analysis.getReaderFromFile(env, settings, "synonyms_path"); } else { - throw new ElasticsearchIllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); + throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); } this.ignoreCase = settings.getAsBoolean("ignore_case", false); @@ -79,7 +78,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(tokenizerName); } if (tokenizerFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); + throw new IllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); } final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, ImmutableSettings.builder().put(indexSettings).put(settings).build()); @@ -106,7 +105,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { synonymMap = parser.build(); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to build synonyms", e); + throw new IllegalArgumentException("failed to build synonyms", e); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java index a3d0d4b0fb5..0a9972b3577 100644 --- a/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.TrimFilter; @@ -44,7 +44,7 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public TrimTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettings, name, settings); if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } this.updateOffsets = settings.getAsBoolean("update_offsets", false); diff --git a/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java index 69a37bc0a6b..56d43acdfdb 100644 --- a/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -42,7 +42,7 @@ public class TruncateTokenFilterFactory extends AbstractTokenFilterFactory { super(index, indexSettings, name, settings); this.length = settings.getAsInt("length", -1); if (length <= 0) { - throw new ElasticsearchIllegalArgumentException("length parameter must be provided"); + throw new IllegalArgumentException("length parameter must be provided"); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java index 0d5ceb77a99..4f1513c9fb7 100644 --- a/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis.compound; import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok onlyLongestMatch = settings.getAsBoolean("only_longest_match", false); wordList = Analysis.getWordSet(env, settings, "word_list"); if (wordList == null) { - throw new ElasticsearchIllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly"); + throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly"); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java index acea0c2abc8..394e97b7025 100644 --- a/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java @@ -25,7 +25,7 @@ import org.apache.lucene.analysis.compound.Lucene43HyphenationCompoundWordTokenF import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -36,8 +36,6 @@ import org.elasticsearch.index.settings.IndexSettings; import org.xml.sax.InputSource; import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; /** * Uses the {@link org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter} to decompound tokens based on hyphenation rules. @@ -55,7 +53,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { - throw new ElasticsearchIllegalArgumentException("hyphenation_patterns_path is a required setting."); + throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); } URL hyphenationPatternsFile = env.resolveConfig(hyphenationPatternsPath); @@ -63,7 +61,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW try { hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(hyphenationPatternsFile.toExternalForm())); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage()); + throw new IllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage()); } } diff --git a/src/main/java/org/elasticsearch/index/codec/CodecService.java b/src/main/java/org/elasticsearch/index/codec/CodecService.java index cd1940eb8da..783f304346f 100644 --- a/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -24,7 +24,7 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; @@ -86,10 +86,10 @@ public class CodecService extends AbstractIndexComponent { return mapperService; } - public Codec codec(String name) throws ElasticsearchIllegalArgumentException { + public Codec codec(String name) throws IllegalArgumentException { Codec codec = codecs.get(name); if (codec == null) { - throw new ElasticsearchIllegalArgumentException("failed to find codec [" + name + "]"); + throw new IllegalArgumentException("failed to find codec [" + name + "]"); } return codec; } diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index 01398aa74ee..3409138a4d8 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Preconditions; @@ -105,7 +105,7 @@ public abstract class Engine implements Closeable { /** * Tries to extract a segment reader from the given index reader. - * If no SegmentReader can be extracted an {@link org.elasticsearch.ElasticsearchIllegalStateException} is thrown. + * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown. */ protected static SegmentReader segmentReader(LeafReader reader) { if (reader instanceof SegmentReader) { @@ -115,7 +115,7 @@ public abstract class Engine implements Closeable { return segmentReader(FilterLeafReader.unwrap(fReader)); } // hard fail - we can't get a SegmentReader - throw new ElasticsearchIllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); + throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); } /** diff --git a/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java b/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java index 53bff1d8ef6..d43a8268de3 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java @@ -23,9 +23,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.store.Store; import java.io.IOException; @@ -54,13 +53,13 @@ public class EngineSearcher extends Engine.Searcher { * when it might happen though: when the request and the Reaper thread would both try to release it in a very short amount * of time, this is why we only log a warning instead of throwing an exception. */ - logger.warn("Searcher was released twice", new ElasticsearchIllegalStateException("Double release")); + logger.warn("Searcher was released twice", new IllegalStateException("Double release")); return; } try { manager.release(this.searcher()); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot close", e); + throw new IllegalStateException("Cannot close", e); } catch (AlreadyClosedException e) { /* this one can happen if we already closed the * underlying store / directory and we call into the diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 35c578ac4a7..63885c4c539 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.fielddata; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; @@ -230,7 +230,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { final FieldMapper.Names fieldNames = mapper.names(); final FieldDataType type = mapper.fieldDataType(); if (type == null) { - throw new ElasticsearchIllegalArgumentException("found no fielddata type for field [" + fieldNames.fullName() + "]"); + throw new IllegalArgumentException("found no fielddata type for field [" + fieldNames.fullName() + "]"); } final boolean docValues = mapper.hasDocValues(); final String key = fieldNames.indexName(); @@ -259,7 +259,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { builder = buildersByType.get(type.getType()); } if (builder == null) { - throw new ElasticsearchIllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType()); + throw new IllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType()); } IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName()); @@ -272,7 +272,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { } else if ("none".equals(cacheType)){ cache = new IndexFieldDataCache.None(); } else { - throw new ElasticsearchIllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]"); + throw new IllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]"); } fieldDataCaches.put(fieldNames.indexName(), cache); } diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java index cd20656fa51..fb0a1ab4385 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -41,7 +41,7 @@ public interface IndexNumericFieldData extends IndexFieldData { @@ -61,8 +61,8 @@ public final class DisabledIndexFieldData extends AbstractIndexFieldData filter = fdSettings.getGroups("filter"); if (filter != null && !filter.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Doc values field data doesn't support filters [" + fieldNames.name() + "]"); + throw new IllegalArgumentException("Doc values field data doesn't support filters [" + fieldNames.name() + "]"); } if (BINARY_INDEX_FIELD_NAMES.contains(fieldNames.indexName())) { diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java index d308f98e3f9..62a70b101d0 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java @@ -21,8 +21,8 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -44,7 +44,7 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl @Override public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); + throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } @Override @@ -52,7 +52,7 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl try { return new GeoPointBinaryDVAtomicFieldData(DocValues.getBinary(context.reader(), fieldNames.indexName())); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot load doc values", e); + throw new IllegalStateException("Cannot load doc values", e); } } diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java index 3aad56e27cd..5c400fb0d73 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -57,7 +57,7 @@ public class NumericDVIndexFieldData extends DocValuesIndexFieldData implements final Bits docsWithField = DocValues.getDocsWithField(reader, field); return DocValues.singleton(values, docsWithField); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot load doc values", e); + throw new IllegalStateException("Cannot load doc values", e); } } diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 98cc061f05a..7a4201d45cf 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -39,7 +39,7 @@ import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -505,7 +505,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData values = searchLookup.source().extractRawValues(field); @@ -367,7 +367,7 @@ public class ShardGetService extends AbstractIndexShardComponent { if (fieldMapper == null) { if (docMapper.objectMappers().get(field) != null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. - throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field"); + throw new IllegalArgumentException("field [" + field + "] isn't a leaf field"); } } else if (!fieldMapper.fieldType().stored() && !fieldMapper.isGenerated()) { if (searchLookup == null) { diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 51b8119e59c..5c9cea416ec 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; @@ -562,7 +562,7 @@ public class DocumentMapper implements ToXContent { ctx = (Map) executable.unwrap(ctx); return (Map) ctx.get("_source"); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 2cd958fce7b..9520bb117f1 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -25,7 +25,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; @@ -576,7 +575,7 @@ class DocumentParser implements Closeable { mapper = builder.build(builderContext); } else { // TODO how do we identify dynamically that its a binary value? - throw new ElasticsearchIllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); + throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); } } @@ -652,7 +651,7 @@ class DocumentParser implements Closeable { parent = context.docMapper().objectMappers().get(parentPath); } if (parent == null) { - throw new ElasticsearchIllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); + throw new IllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); } update = parent.mappingUpdate(update); objectPath = parentPath; diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 236083f1e77..b820aad9820 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -39,7 +39,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; @@ -717,7 +717,7 @@ public class MapperService extends AbstractIndexComponent { final Mapper.TypeParser.ParserContext parserContext = documentMapperParser().parserContext(); Mapper.TypeParser typeParser = parserContext.typeParser(type); if (typeParser == null) { - throw new ElasticsearchIllegalArgumentException("No mapper found for type [" + type + "]"); + throw new IllegalArgumentException("No mapper found for type [" + type + "]"); } final Mapper.Builder builder = typeParser.parse("__anonymous_" + type, ImmutableMap.of(), parserContext); final BuilderContext builderContext = new BuilderContext(indexSettings, new ContentPath(1)); diff --git a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 6530af5e0c6..3ddb3276b24 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -27,8 +27,8 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.all.AllEntries; @@ -110,7 +110,7 @@ public abstract class ParseContext { if (keyedFields == null) { keyedFields = new ObjectObjectOpenHashMap<>(); } else if (keyedFields.containsKey(key)) { - throw new ElasticsearchIllegalStateException("Only one field can be stored per key"); + throw new IllegalStateException("Only one field can be stored per key"); } keyedFields.put(key, field); add(field); @@ -790,7 +790,7 @@ public abstract class ParseContext { } public Object externalValue() { - throw new ElasticsearchIllegalStateException("External value is not set"); + throw new IllegalStateException("External value is not set"); } /** @@ -804,7 +804,7 @@ public abstract class ParseContext { } if (!clazz.isInstance(externalValue())) { - throw new ElasticsearchIllegalArgumentException("illegal external value class [" + throw new IllegalArgumentException("illegal external value class [" + externalValue().getClass().getName() + "]. Should be " + clazz.getName()); } return clazz.cast(externalValue()); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 40dc4a77313..7e97c86b4b6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -40,8 +40,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; @@ -767,7 +765,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { case DOCS: return TypeParsers.INDEX_OPTIONS_DOCS; default: - throw new ElasticsearchIllegalArgumentException("Unknown IndexOptions [" + indexOption + "]"); + throw new IllegalArgumentException("Unknown IndexOptions [" + indexOption + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index d3c5eae809f..ae914de0597 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -94,7 +94,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper { @Override public Builder tokenized(boolean tokenized) { if (tokenized) { - throw new ElasticsearchIllegalArgumentException("bool field can't be tokenized"); + throw new IllegalArgumentException("bool field can't be tokenized"); } return super.tokenized(tokenized); } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 5c54934ad5b..64b9bbde985 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -30,7 +30,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -296,7 +296,7 @@ public class ByteFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index baba0f42f87..0d6e4f4bee3 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -28,8 +28,8 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -137,7 +137,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { public Builder maxInputLength(int maxInputLength) { if (maxInputLength <= 0) { - throw new ElasticsearchIllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]"); + throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]"); } this.maxInputLength = maxInputLength; return this; @@ -215,7 +215,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) { NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]"); + throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]"); } return analyzer; } @@ -257,7 +257,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { public synchronized PostingsFormat postingsFormat(PostingsFormat in) { if (in instanceof Completion090PostingsFormat) { - throw new ElasticsearchIllegalStateException("Double wrapping of " + Completion090PostingsFormat.class); + throw new IllegalStateException("Double wrapping of " + Completion090PostingsFormat.class); } if (postingsFormat == null) { postingsFormat = new Completion090PostingsFormat(in, analyzingSuggestLookupProvider); @@ -286,7 +286,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) { - throw new ElasticsearchIllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); + throw new IllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); } } else if (Fields.CONTEXT.equals(currentFieldName)) { SortedMap configs = Maps.newTreeMap(); @@ -335,7 +335,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { try { weightValue = Long.parseLong(parser.text()); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Weight must be a string representing a numeric value, but was [" + parser.text() + "]"); + throw new IllegalArgumentException("Weight must be a string representing a numeric value, but was [" + parser.text() + "]"); } weight = weightValue.longValue(); // always parse a long to make sure we don't get overflow checkWeight(weight); @@ -344,7 +344,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { if (Fields.CONTENT_FIELD_NAME_WEIGHT.equals(currentFieldName)) { NumberType numberType = parser.numberType(); if (NumberType.LONG != numberType && NumberType.INT != numberType) { - throw new ElasticsearchIllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]"); + throw new IllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]"); } weight = parser.longValue(); // always parse a long to make sure we don't get overflow checkWeight(weight); @@ -387,7 +387,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { private void checkWeight(long weight) { if (weight < 0 || weight > Integer.MAX_VALUE) { - throw new ElasticsearchIllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]"); + throw new IllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]"); } } @@ -415,7 +415,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { } for (int i = 0; i < input.length(); i++) { if (isReservedChar(input.charAt(i))) { - throw new ElasticsearchIllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x" + throw new IllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x" + Integer.toHexString((int) input.charAt(i)).toUpperCase(Locale.ROOT) + "] at position " + i + " is a reserved character"); } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index b10c4aff1da..75c1e8ac483 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -31,7 +31,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.ToStringUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -458,7 +458,7 @@ public class DateFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 7381855fb1a..490455431e8 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -34,7 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -290,7 +290,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index b87da803a7e..d1259d24598 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -34,7 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -296,7 +296,7 @@ public class FloatFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index c6ebc8ed640..05015e25793 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -290,7 +290,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index e08f84c2192..afaefb4a31e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -280,7 +280,7 @@ public class LongFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 280f139d459..edb3a12291a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -39,7 +39,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -238,7 +238,7 @@ public abstract class NumberFieldMapper extends AbstractFieldM RuntimeException e = null; try { innerParseCreateField(context, fields); - } catch (IllegalArgumentException | ElasticsearchIllegalArgumentException e1) { + } catch (IllegalArgumentException e1) { e = e1; } catch (MapperParsingException e2) { e = e2; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 9d0b960c040..99478906195 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -296,7 +296,7 @@ public class ShortFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index fdd994fa671..3d9d871f3a8 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -26,7 +26,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -339,7 +339,7 @@ public class StringFieldMapper extends AbstractFieldMapper implements Al } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index a17941802b4..634b3c5d943 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -28,8 +28,8 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -301,7 +301,7 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement public static final Encoding of(int numBytesPerValue) { final Encoding instance = INSTANCES[numBytesPerValue]; if (instance == null) { - throw new ElasticsearchIllegalStateException("No encoding for " + numBytesPerValue + " bytes per value"); + throw new IllegalStateException("No encoding for " + numBytesPerValue + " bytes per value"); } return instance; } @@ -593,12 +593,12 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement if (validateLat) { if (point.lat() > 90.0 || point.lat() < -90.0) { - throw new ElasticsearchIllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name()); + throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name()); } } if (validateLon) { if (point.lon() > 180.0 || point.lon() < -180) { - throw new ElasticsearchIllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name()); + throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name()); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index d54cb4e2e6b..b34d4fea36d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -29,7 +29,7 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -168,7 +168,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { .QUADTREE_LEVELS, false)); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown prefix tree type [" + tree + "]"); + throw new IllegalArgumentException("Unknown prefix tree type [" + tree + "]"); } return new GeoShapeFieldMapper(names, prefixTree, strategyName, distanceErrorPct, orientation, fieldType, @@ -384,7 +384,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { return termStrategy; } - throw new ElasticsearchIllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]"); + throw new IllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 46cef4cfeb5..565f1dcc00b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -26,7 +26,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -110,7 +110,7 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { if (parserContext.indexVersionCreated().before(Version.V_1_3_0)) { - throw new ElasticsearchIllegalArgumentException("type="+CONTENT_TYPE+" is not supported on indices created before version 1.3.0. Is your cluster running multiple datanode versions?"); + throw new IllegalArgumentException("type="+CONTENT_TYPE+" is not supported on indices created before version 1.3.0. Is your cluster running multiple datanode versions?"); } FieldNamesFieldMapper.Builder builder = fieldNames(); diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 61188cd4bbf..1abf2d38c66 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -83,22 +83,22 @@ public class IpFieldMapper extends NumberFieldMapper { private static final Pattern pattern = Pattern.compile("\\."); - public static long ipToLong(String ip) throws ElasticsearchIllegalArgumentException { + public static long ipToLong(String ip) throws IllegalArgumentException { try { if (!InetAddresses.isInetAddress(ip)) { - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address"); + throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address"); } String[] octets = pattern.split(ip); if (octets.length != 4) { - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "], not a valid ipv4 address (4 dots)"); + throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ipv4 address (4 dots)"); } return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) + (Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]); } catch (Exception e) { - if (e instanceof ElasticsearchIllegalArgumentException) { - throw (ElasticsearchIllegalArgumentException) e; + if (e instanceof IllegalArgumentException) { + throw (IllegalArgumentException) e; } - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "]", e); + throw new IllegalArgumentException("failed to parse ip [" + ip + "]", e); } } @@ -235,7 +235,7 @@ public class IpFieldMapper extends NumberFieldMapper { long iSim; try { iSim = ipToLong(fuzziness.asString()); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { iSim = fuzziness.asLong(); } return NumericRangeQuery.newLongRange(names.indexName(), precisionStep, diff --git a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index 08888e9aea2..2edd773a81c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.ContentPath; @@ -46,7 +46,7 @@ public class DynamicTemplate { } else if ("regex".equals(value)) { return REGEX; } - throw new ElasticsearchIllegalArgumentException("No matching pattern matched on [" + value + "]"); + throw new IllegalArgumentException("No matching pattern matched on [" + value + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 67c3636be7d..9d591156b99 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -241,7 +241,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea builder.pathType(parsePathType(name, fieldNode.toString())); return true; } - return false; + return false; } protected static void parseNested(String name, Map node, ObjectMapper.Builder builder) { diff --git a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java index c3aa5e16606..6ea8d71c4f2 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.merge.policy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.TieredMergePolicy; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.store.Store; @@ -46,11 +46,11 @@ public abstract class AbstractMergePolicyProvider extend try { double value = Double.parseDouble(noCFSRatio); if (value < 0.0 || value > 1.0) { - throw new ElasticsearchIllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); + throw new IllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); } return value; } catch (NumberFormatException ex) { - throw new ElasticsearchIllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex); + throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex); } } } diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index aaf36915577..57f444e4eab 100644 --- a/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -74,13 +74,13 @@ public class BoostingQueryBuilder extends BaseQueryBuilder implements BoostableQ @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (positiveQuery == null) { - throw new ElasticsearchIllegalArgumentException("boosting query requires positive query to be set"); + throw new IllegalArgumentException("boosting query requires positive query to be set"); } if (negativeQuery == null) { - throw new ElasticsearchIllegalArgumentException("boosting query requires negative query to be set"); + throw new IllegalArgumentException("boosting query requires negative query to be set"); } if (negativeBoost == -1) { - throw new ElasticsearchIllegalArgumentException("boosting query requires negativeBoost to be set"); + throw new IllegalArgumentException("boosting query requires negativeBoost to be set"); } builder.startObject(BoostingQueryParser.NAME); builder.field("positive"); diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 16cb0bf7e57..62b73012fb6 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -73,10 +73,10 @@ public class CommonTermsQueryBuilder extends BaseQueryBuilder implements Boostab */ public CommonTermsQueryBuilder(String name, Object text) { if (name == null) { - throw new ElasticsearchIllegalArgumentException("Field name must not be null"); + throw new IllegalArgumentException("Field name must not be null"); } if (text == null) { - throw new ElasticsearchIllegalArgumentException("Query must not be null"); + throw new IllegalArgumentException("Query must not be null"); } this.text = text; this.name = name; diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java index 46997ffb7fd..eb2038e8a55 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; @@ -187,7 +187,7 @@ public class CommonTermsQueryParser implements QueryParser { } else { analyzer = parseContext.mapperService().analysisService().analyzer(queryAnalyzer); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]"); + throw new IllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java index c2133f10d77..3083056cc42 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -167,13 +167,13 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { protected void doXContent(XContentBuilder builder, Params params) throws IOException { // check values if(Double.isNaN(box[TOP])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires top latitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires top latitude to be set"); } else if(Double.isNaN(box[BOTTOM])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires bottom latitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires bottom latitude to be set"); } else if(Double.isNaN(box[RIGHT])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires right longitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires right longitude to be set"); } else if(Double.isNaN(box[LEFT])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires left longitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires left longitude to be set"); } builder.startObject(GeoBoundingBoxFilterParser.NAME); diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java index ac732bc99aa..b26eaee2848 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java @@ -28,7 +28,7 @@ import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; @@ -191,7 +191,7 @@ public class GeoShapeQueryParser implements QueryParser { case WITHIN: return new SpatialArgs(SpatialOperation.IsWithin, shape.build()); default: - throw new ElasticsearchIllegalArgumentException(""); + throw new IllegalArgumentException(""); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java index 9e69bc25a89..18d6ffead1c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java +++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -78,7 +78,7 @@ public class GeohashCellFilter { */ public static Filter create(QueryParseContext context, GeoPointFieldMapper fieldMapper, String geohash, @Nullable List geohashes) { if (fieldMapper.geoHashStringMapper() == null) { - throw new ElasticsearchIllegalArgumentException("geohash filter needs geohash_prefix to be enabled"); + throw new IllegalArgumentException("geohash filter needs geohash_prefix to be enabled"); } StringFieldMapper geoHashMapper = fieldMapper.geoHashStringMapper(); diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index ea814e6a15f..9c964d0bce5 100644 --- a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; @@ -383,7 +383,7 @@ public class MoreLikeThisQueryBuilder extends BaseQueryBuilder implements Boosta builder.endArray(); } if (this.docs.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("more_like_this requires '" + likeFieldName + "' to be provided"); + throw new IllegalArgumentException("more_like_this requires '" + likeFieldName + "' to be provided"); } else { builder.field(likeFieldName, docs); } diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java index 9ef53961a9e..9325d0f72f8 100644 --- a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; @@ -173,14 +173,14 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (Fields.DOCUMENT_IDS.match(currentFieldName, parseContext.parseFlags())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (!token.isValue()) { - throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids"); + throw new IllegalArgumentException("ids array element should only contain ids"); } likeItems.add(newTermVectorsRequest().id(parser.text())); } } else if (Fields.DOCUMENTS.match(currentFieldName, parseContext.parseFlags())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("docs array element should include an object"); + throw new IllegalArgumentException("docs array element should include an object"); } likeItems.add(parseDocument(parser)); } @@ -313,7 +313,7 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { items.add(parseDocument(parser)); } else { - throw new ElasticsearchIllegalArgumentException("Content of 'like' parameter should either be a string or an object"); + throw new IllegalArgumentException("Content of 'like' parameter should either be a string or an object"); } } @@ -331,7 +331,7 @@ public class MoreLikeThisQueryParser implements QueryParser { final String fieldName = it.next(); if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) { if (failOnUnsupportedField) { - throw new ElasticsearchIllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]"); + throw new IllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]"); } else { it.remove(); } diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFlag.java b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java index 454d0264b67..68c024b032a 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFlag.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.query; import java.util.Locale; import org.apache.lucene.util.automaton.RegExp; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; /** @@ -128,7 +128,7 @@ public enum RegexpFlag { } magic |= flag.value(); } catch (IllegalArgumentException iae) { - throw new ElasticsearchIllegalArgumentException("Unknown regexp flag [" + s + "]"); + throw new IllegalArgumentException("Unknown regexp flag [" + s + "]"); } } return magic; diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index b52b6c7544e..e821fab8e0a 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -197,7 +197,7 @@ public class ScriptFilterParser implements FilterParser { if (val instanceof Number) { return ((Number) val).longValue() != 0; } - throw new ElasticsearchIllegalArgumentException("Can't handle type [" + val + "] in script filter"); + throw new IllegalArgumentException("Can't handle type [" + val + "] in script filter"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java index 54c6291951d..7fe973ab8c2 100644 --- a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java +++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import java.util.Locale; @@ -72,7 +72,7 @@ public enum SimpleQueryStringFlag { magic |= flag.value(); } } catch (IllegalArgumentException iae) { - throw new ElasticsearchIllegalArgumentException("Unknown " + SimpleQueryStringParser.NAME + " flag [" + s + "]"); + throw new IllegalArgumentException("Unknown " + SimpleQueryStringParser.NAME + " flag [" + s + "]"); } } return magic; diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java index 286fab968eb..f99253f1bb6 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -79,10 +79,10 @@ public class SpanNearQueryBuilder extends BaseQueryBuilder implements SpanQueryB @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (clauses.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanNear query"); + throw new IllegalArgumentException("Must have at least one clause when building a spanNear query"); } if (slop == null) { - throw new ElasticsearchIllegalArgumentException("Must set the slop when building a spanNear query"); + throw new IllegalArgumentException("Must set the slop when building a spanNear query"); } builder.startObject(SpanNearQueryParser.NAME); builder.startArray("clauses"); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java index cb14dae11d2..bd69ac6dca1 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -87,14 +87,14 @@ public class SpanNotQueryBuilder extends BaseQueryBuilder implements SpanQueryBu @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (include == null) { - throw new ElasticsearchIllegalArgumentException("Must specify include when using spanNot query"); + throw new IllegalArgumentException("Must specify include when using spanNot query"); } if (exclude == null) { - throw new ElasticsearchIllegalArgumentException("Must specify exclude when using spanNot query"); + throw new IllegalArgumentException("Must specify exclude when using spanNot query"); } if (dist != null && (pre != null || post != null)) { - throw new ElasticsearchIllegalArgumentException("spanNot can either use [dist] or [pre] & [post] (or none)"); + throw new IllegalArgumentException("spanNot can either use [dist] or [pre] & [post] (or none)"); } builder.startObject(SpanNotQueryParser.NAME); diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java index f780ed8b05d..150a3ab6f60 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -58,7 +58,7 @@ public class SpanOrQueryBuilder extends BaseQueryBuilder implements SpanQueryBui @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (clauses.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanOr query"); + throw new IllegalArgumentException("Must have at least one clause when building a spanOr query"); } builder.startObject(SpanOrQueryParser.NAME); builder.startArray("clauses"); diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java index 176f97f4fff..ce133743fb7 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.ScriptService; @@ -71,7 +71,7 @@ public class TemplateQueryBuilder extends BaseQueryBuilder { fieldname = TemplateQueryParser.QUERY; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown template type " + templateType); + throw new IllegalArgumentException("Unknown template type " + templateType); } builder.field(fieldname, template); builder.field(TemplateQueryParser.PARAMS, vars); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index ce9f5231949..8c3e8cc8fc9 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query.functionscore; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.MultiValueMode; @@ -48,7 +48,7 @@ public abstract class DecayFunctionBuilder extends ScoreFunctionBuilder { public DecayFunctionBuilder setDecay(double decay) { if (decay <= 0 || decay >= 1.0) { - throw new ElasticsearchIllegalStateException("scale weight parameter must be in range 0..1!"); + throw new IllegalStateException("scale weight parameter must be in range 0..1!"); } this.decay = decay; return this; diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index 9129a33d064..0c9a779fc7d 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.query.functionscore; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoDistance; @@ -428,16 +428,16 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { super(CombineFunction.MULT); this.mode = mode; if (userSuppiedScale <= 0.0) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0."); + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0."); } if (decay <= 0.0 || decay >= 1.0) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : decay must be in the range [0..1]."); } this.scale = func.processScale(userSuppiedScale, decay); this.func = func; if (offset < 0.0d) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0"); + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0"); } this.offset = offset; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 53804abfe6c..8c6897d75b1 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.query.functionscore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.lucene.search.function.CombineFunction; -import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BaseQueryBuilder; import org.elasticsearch.index.query.BoostableQueryBuilder; @@ -99,7 +98,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder(ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } queryBuilder = null; filterBuilder = null; @@ -115,7 +114,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder add(FilterBuilder filter, ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } this.filters.add(filter); this.scoreFunctions.add(scoreFunctionBuilder); @@ -129,7 +128,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder add(ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } this.filters.add(null); this.scoreFunctions.add(scoreFunctionBuilder); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java index 286b32c7f70..352ea1127de 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query.functionscore.factor; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.lucene.search.function.BoostScoreFunction; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -58,7 +58,7 @@ public class FactorBuilder extends ScoreFunctionBuilder { @Override public ScoreFunctionBuilder setWeight(float weight) { - throw new ElasticsearchIllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); + throw new IllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java index 4dfba5fb3ec..cf8f629d6e7 100644 --- a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java +++ b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; @@ -95,7 +95,7 @@ public class InnerHitsQueryParserHelper { scriptFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { switch (fieldName) { @@ -115,7 +115,7 @@ public class InnerHitsQueryParserHelper { } break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } else if (token.isValue()) { switch (fieldName) { @@ -139,7 +139,7 @@ public class InnerHitsQueryParserHelper { subSearchContext.fieldNames().add(parser.text()); break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } } diff --git a/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java index 86b9e371784..2a0efa8aadb 100644 --- a/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java +++ b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java @@ -19,16 +19,10 @@ package org.elasticsearch.index.query.support; -import com.google.common.collect.ImmutableList; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MultiTermQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; + +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.QueryParseContext; /** * @@ -89,7 +83,7 @@ public final class QueryParsers { int size = Integer.parseInt(rewriteMethod.substring("topTerms".length())); return new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(size); } - throw new ElasticsearchIllegalArgumentException("Failed to parse rewrite_method [" + rewriteMethod + "]"); + throw new IllegalArgumentException("Failed to parse rewrite_method [" + rewriteMethod + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 730719739cd..baecbf6f265 100644 --- a/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -25,8 +25,8 @@ import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.*; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.util.QueryBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.Queries; @@ -156,7 +156,7 @@ public class MatchQuery { } else { analyzer = parseContext.mapperService().analysisService().analyzer(this.analyzer); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + this.analyzer + "]"); + throw new IllegalArgumentException("No analyzer found for [" + this.analyzer + "]"); } } return analyzer; @@ -204,7 +204,7 @@ public class MatchQuery { query = builder.createPhrasePrefixQuery(field, value.toString(), phraseSlop, maxExpansions); break; default: - throw new ElasticsearchIllegalStateException("No type found for [" + type + "]"); + throw new IllegalStateException("No type found for [" + type + "]"); } if (query == null) { diff --git a/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 0fdf44add48..2d315c214fc 100644 --- a/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -27,7 +27,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +83,7 @@ public class MultiMatchQuery extends MatchQuery { queryBuilder = new CrossFieldsQueryBuilder(tieBreaker); break; default: - throw new ElasticsearchIllegalStateException("No such type: " + type); + throw new IllegalStateException("No such type: " + type); } final List queries = queryBuilder.buildGroupedQueries(type, fieldNames, value, minimumShouldMatch); return queryBuilder.conbineGrouped(queries); diff --git a/src/main/java/org/elasticsearch/index/search/child/ScoreType.java b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java index e0b84a14274..736744f7de2 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ScoreType.java +++ b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.search.child; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * Defines how scores from child documents are mapped into the parent document. @@ -66,7 +66,7 @@ public enum ScoreType { } else if ("total".equals(type)) { // This name is consistent with: ScoreMode.Total return SUM; } - throw new ElasticsearchIllegalArgumentException("No score type for child query [" + type + "] found"); + throw new IllegalArgumentException("No score type for child query [" + type + "] found"); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 330ebe33f09..3c4d28b90d9 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.search.EmptyScorer; import org.apache.lucene.search.join.BitDocIdSetFilter; @@ -361,7 +361,7 @@ public class TopChildrenQuery extends Query { }; } - throw new ElasticsearchIllegalStateException("No support for score type [" + scoreType + "]"); + throw new IllegalStateException("No support for score type [" + scoreType + "]"); } return new EmptyScorer(this); } diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java index d3b9a4c0757..ec1580ec7ab 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -73,7 +73,7 @@ public class GeoDistanceFilter extends Filter { boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper); distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter } else { - throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); + throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); } } else { distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java index dcc08a01b63..59cb14364a3 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java @@ -27,7 +27,7 @@ import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -92,7 +92,7 @@ public class GeoDistanceRangeFilter extends Filter { boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper); distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter } else { - throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); + throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); } } else { distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; diff --git a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java index 6c57c251771..f0b4cd64545 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.search.geo; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; @@ -33,7 +33,7 @@ public class IndexedGeoBoundingBoxFilter { public static Filter create(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) { if (!fieldMapper.isEnableLatLon()) { - throw new ElasticsearchIllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldMapper.name() + "], can't use indexed filter on it"); + throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldMapper.name() + "], can't use indexed filter on it"); } //checks to see if bounding box crosses 180 degrees if (topLeft.lon() > bottomRight.lon()) { diff --git a/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java index bf8aa8eb4b3..feb4cea8f00 100644 --- a/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java +++ b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java @@ -19,8 +19,8 @@ package org.elasticsearch.index.search.shape; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; @@ -60,7 +60,7 @@ public class ShapeFetchService extends AbstractComponent { public ShapeBuilder fetch(String id, String type, String index, String path) throws IOException { GetResponse response = client.get(new GetRequest(index, type, id).preference("_local").operationThreaded(false)).actionGet(); if (!response.isExists()) { - throw new ElasticsearchIllegalArgumentException("Shape with ID [" + id + "] in type [" + type + "] not found"); + throw new IllegalArgumentException("Shape with ID [" + id + "] in type [" + type + "] not found"); } String[] pathElements = Strings.splitStringToArray(path, '.'); @@ -83,7 +83,7 @@ public class ShapeFetchService extends AbstractComponent { } } } - throw new ElasticsearchIllegalStateException("Shape with name [" + id + "] found but missing " + path + " field"); + throw new IllegalStateException("Shape with name [" + id + "] found but missing " + path + " field"); } finally { if (parser != null) { parser.close(); diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index e7b2bbf9ec6..4768b4eef8b 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -31,8 +31,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.ThreadInterruptedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; @@ -340,7 +340,7 @@ public class IndexShard extends AbstractIndexShardComponent { public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) { final ShardRouting currentRouting = this.shardRouting; if (!newRouting.shardId().equals(shardId())) { - throw new ElasticsearchIllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); + throw new IllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); } try { if (currentRouting != null) { @@ -997,7 +997,7 @@ public class IndexShard extends AbstractIndexShardComponent { */ public void deleteShardState() throws IOException { if (this.routingEntry() != null && this.routingEntry().active()) { - throw new ElasticsearchIllegalStateException("Can't delete shard state on an active shard"); + throw new IllegalStateException("Can't delete shard state on an active shard"); } MetaDataStateFormat.deleteMetaState(shardPath().getDataPath()); } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index 1b4edcb10ba..a97cc450f97 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; /** * @@ -51,9 +51,9 @@ public enum IndexShardState { return this.id; } - public static IndexShardState fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static IndexShardState fromId(byte id) throws IllegalArgumentException { if (id < 0 || id >= IDS.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return IDS[id]; } diff --git a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index a68d1289ff1..64f250678ac 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; @@ -98,7 +98,7 @@ public final class ShadowIndexShard extends IndexShard { @Override public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) { if (newRouting.primary() == true) {// becoming a primary - throw new ElasticsearchIllegalStateException("can't promote shard to primary"); + throw new IllegalStateException("can't promote shard to primary"); } super.updateRoutingEntry(newRouting, persistState); } diff --git a/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/src/main/java/org/elasticsearch/index/shard/ShardPath.java index f519aa546aa..102d565d679 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; @@ -91,12 +91,12 @@ public final class ShardPath { ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, path); if (load != null) { if ((load.indexUUID.equals(indexUUID) || IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID)) == false) { - throw new ElasticsearchIllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " excepted: " + indexUUID + " on shard path: " + path); + throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " excepted: " + indexUUID + " on shard path: " + path); } if (loadedPath == null) { loadedPath = path; } else{ - throw new ElasticsearchIllegalStateException(shardId + " more than one shard state found"); + throw new IllegalStateException(shardId + " more than one shard state found"); } } diff --git a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index cf933e8cf0c..7d63b2cdd17 100644 --- a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -19,14 +19,13 @@ package org.elasticsearch.index.shard; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperUtils; @@ -128,7 +127,7 @@ public class TranslogRecoveryPerformer { deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types())); break; default: - throw new ElasticsearchIllegalStateException("No operation defined for [" + operation + "]"); + throw new IllegalStateException("No operation defined for [" + operation + "]"); } } catch (ElasticsearchException e) { boolean hasIgnoreOnRecoveryException = false; diff --git a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java index 43f6091a70d..48c5dc6f521 100644 --- a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; /** @@ -73,7 +73,7 @@ public abstract class AbstractSimilarityProvider implements SimilarityProvider { float z = settings.getAsFloat("normalization.z.z", 0.30f); return new NormalizationZ(z); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported Normalization [" + normalization + "]"); + throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java index c6c6a48a77b..9c81bf0415a 100644 --- a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -82,7 +82,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider { String basicModel = settings.get("basic_model"); BasicModel model = MODEL_CACHE.get(basicModel); if (model == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); + throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); } return model; } @@ -97,7 +97,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider { String afterEffect = settings.get("after_effect"); AfterEffect effect = EFFECT_CACHE.get(afterEffect); if (effect == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); + throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); } return effect; } diff --git a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java index c5e9d2901e5..68eaccc741b 100644 --- a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -76,7 +76,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider { String rawDistribution = settings.get("distribution"); Distribution distribution = DISTRIBUTION_CACHE.get(rawDistribution); if (distribution == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); + throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); } return distribution; } @@ -91,7 +91,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider { String rawLambda = settings.get("lambda"); Lambda lambda = LAMBDA_CACHE.get(rawLambda); if (lambda == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); + throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); } return lambda; } diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java index cbd7729e105..4a1c4578cdb 100644 --- a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java +++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.inject.assistedinject.FactoryProvider; @@ -71,7 +71,7 @@ public class SimilarityModule extends AbstractModule { Class type = settings.getAsClass("type", null, "org.elasticsearch.index.similarity.", "SimilarityProvider"); if (type == null) { - throw new ElasticsearchIllegalArgumentException("SimilarityProvider [" + name + "] must have an associated type"); + throw new IllegalArgumentException("SimilarityProvider [" + name + "] must have an associated type"); } providers.put(name, type); } diff --git a/src/main/java/org/elasticsearch/index/store/Store.java b/src/main/java/org/elasticsearch/index/store/Store.java index 117547c98df..166bd7d96c3 100644 --- a/src/main/java/org/elasticsearch/index/store/Store.java +++ b/src/main/java/org/elasticsearch/index/store/Store.java @@ -27,7 +27,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -532,7 +532,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * @param reason the reason for this cleanup operation logged for each deleted file * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around. * @throws IOException if an IOException occurs - * @throws ElasticsearchIllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. + * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); @@ -553,7 +553,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { // TODO do we need to also fail this if we can't delete the pending commit file? // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? - throw new ElasticsearchIllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); + throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } logger.debug("failed to delete file [{}]", ex, existingFile); // ignore, we don't really care, will get deleted later on @@ -592,12 +592,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref final boolean consistent = hashAndLengthEqual || same; if (consistent == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); - throw new ElasticsearchIllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); + throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); } } } else { logger.debug("Files are missing on the recovery target: {} ", recoveryDiff); - throw new ElasticsearchIllegalStateException("Files are missing on the recovery target: [different=" + throw new IllegalStateException("Files are missing on the recovery target: [different=" + recoveryDiff.different + ", missing=" + recoveryDiff.missing + ']', null); } } diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java index 7d6a4b0a02a..bcf707c795b 100644 --- a/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,8 +23,8 @@ import org.apache.lucene.index.Term; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -269,7 +269,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { case 4: return DELETE_BY_QUERY; default: - throw new ElasticsearchIllegalArgumentException("No type mapped for [" + id + "]"); + throw new IllegalArgumentException("No type mapped for [" + id + "]"); } } } @@ -635,7 +635,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { @Override public Source getSource(){ - throw new ElasticsearchIllegalStateException("trying to read doc source from delete operation"); + throw new IllegalStateException("trying to read doc source from delete operation"); } @Override @@ -709,7 +709,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { @Override public Source getSource() { - throw new ElasticsearchIllegalStateException("trying to read doc source from delete_by_query operation"); + throw new IllegalStateException("trying to read doc source from delete_by_query operation"); } @Override diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java index a6539847c45..775c5cd7c47 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.translog.fs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -49,13 +49,13 @@ public interface FsTranslogFile extends Closeable { public abstract FsTranslogFile create(ShardId shardId, long id, ChannelReference raf, int bufferSize) throws IOException; - public static Type fromString(String type) throws ElasticsearchIllegalArgumentException { + public static Type fromString(String type) throws IllegalArgumentException { if (SIMPLE.name().equalsIgnoreCase(type)) { return SIMPLE; } else if (BUFFERED.name().equalsIgnoreCase(type)) { return BUFFERED; } - throw new ElasticsearchIllegalArgumentException("No translog fs type [" + type + "]"); + throw new IllegalArgumentException("No translog fs type [" + type + "]"); } } diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index a78a0b4fd79..579eca91b72 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -25,8 +25,8 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -282,7 +282,7 @@ public class IndicesService extends AbstractLifecycleComponent i public synchronized IndexService createIndex(String sIndexName, @IndexSettings Settings settings, String localNodeId) throws ElasticsearchException { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't create an index [" + sIndexName + "], node is closed"); + throw new IllegalStateException("Can't create an index [" + sIndexName + "], node is closed"); } Index index = new Index(sIndexName); if (indices.containsKey(index.name())) { @@ -449,7 +449,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); - throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (IOException e) { @@ -468,13 +468,13 @@ public class IndicesService extends AbstractLifecycleComponent i String indexName = metaData.index(); if (indices.containsKey(indexName)) { String localUUid = indices.get(indexName).v1().indexUUID(); - throw new ElasticsearchIllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); } if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here final IndexMetaData index = clusterState.metaData().index(indexName); - throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } } Index index = new Index(metaData.index()); @@ -533,7 +533,7 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteShardStore(String reason, ShardId shardId, IndexMetaData metaData) throws IOException { final Settings indexSettings = buildIndexSettings(metaData); if (canDeleteShardContent(shardId, indexSettings) == false) { - throw new ElasticsearchIllegalStateException("Can't delete shard " + shardId); + throw new IllegalStateException("Can't delete shard " + shardId); } nodeEnv.deleteShardDirectorySafe(shardId, indexSettings); logger.trace("{} deleting shard reason [{}]", shardId, reason); @@ -614,10 +614,10 @@ public class IndicesService extends AbstractLifecycleComponent i */ public void addPendingDelete(ShardId shardId, @IndexSettings Settings settings) { if (shardId == null) { - throw new ElasticsearchIllegalArgumentException("shardId must not be null"); + throw new IllegalArgumentException("shardId must not be null"); } if (settings == null) { - throw new ElasticsearchIllegalArgumentException("settings must not be null"); + throw new IllegalArgumentException("settings must not be null"); } PendingDelete pendingDelete = new PendingDelete(shardId, settings, false); addPendingDelete(shardId.index(), pendingDelete); diff --git a/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index db9717356ac..34e1feec607 100644 --- a/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -19,7 +19,7 @@ package org.elasticsearch.indices.breaker; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; @@ -164,9 +164,9 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { /** * Validate that child settings are valid - * @throws ElasticsearchIllegalStateException + * @throws IllegalStateException */ - public static void validateSettings(BreakerSettings[] childrenSettings) throws ElasticsearchIllegalStateException { + public static void validateSettings(BreakerSettings[] childrenSettings) throws IllegalStateException { for (BreakerSettings childSettings : childrenSettings) { // If the child is disabled, ignore it if (childSettings.getLimit() == -1) { @@ -174,7 +174,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { } if (childSettings.getOverhead() < 0) { - throw new ElasticsearchIllegalStateException("Child breaker overhead " + childSettings + " must be non-negative"); + throw new IllegalStateException("Child breaker overhead " + childSettings + " must be non-negative"); } } } diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java index 9fdc0c8ea34..c0b9458f952 100644 --- a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java +++ b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java @@ -26,7 +26,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import org.apache.lucene.search.DocIdSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -87,7 +87,7 @@ public class IndicesFilterCache extends AbstractComponent implements RemovalList } final int concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel); if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } if (!Objects.equal(concurrencyLevel, IndicesFilterCache.this.concurrencyLevel)) { logger.info("updating [{}] from [{}] to [{}]", @@ -112,13 +112,13 @@ public class IndicesFilterCache extends AbstractComponent implements RemovalList this.expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, null); this.minimumEntryWeight = settings.getAsInt(INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT, 1024); // 1k per entry minimum if (minimumEntryWeight <= 0) { - throw new ElasticsearchIllegalArgumentException("minimum_entry_weight must be > 0 but was: " + minimumEntryWeight); + throw new IllegalArgumentException("minimum_entry_weight must be > 0 but was: " + minimumEntryWeight); } this.cleanInterval = settings.getAsTime(INDICES_CACHE_FILTER_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); // defaults to 4, but this is a busy map for all indices, increase it a bit this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, 16); if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } computeSizeInBytes(); buildCache(); diff --git a/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 5daf3682a1f..d8a2b101fcc 100644 --- a/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -31,7 +31,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -120,7 +120,7 @@ public class IndicesQueryCache extends AbstractComponent implements RemovalListe // defaults to 4, but this is a busy map for all indices, increase it a bit by default this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16); if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } buildCache(); diff --git a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index b10ff5d5bea..7a068641c51 100644 --- a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -25,8 +25,8 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.ExceptionsHelper; +import java.lang.IllegalStateException; + import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -783,7 +783,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } cacheBuilder.concurrencyLevel(concurrencyLevel); if (expire != null && expire.millis() > 0) { diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 2e9f11efa82..c465aeae70e 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -20,8 +20,8 @@ package org.elasticsearch.indices.recovery; import com.google.common.collect.ImmutableList; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.common.Nullable; @@ -82,9 +82,9 @@ public class RecoveryState implements ToXContent, Streamable { return id; } - public static Stage fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Stage fromId(byte id) throws IllegalArgumentException { if (id < 0 || id >= STAGES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return STAGES[id]; } @@ -115,9 +115,9 @@ public class RecoveryState implements ToXContent, Streamable { return id; } - public static Type fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Type fromId(byte id) throws IllegalArgumentException { if (id < 0 || id >= TYPES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return TYPES[id]; } @@ -170,7 +170,7 @@ public class RecoveryState implements ToXContent, Streamable { private void validateAndSetStage(Stage expected, Stage next) { if (stage != expected) { - throw new ElasticsearchIllegalStateException("can't move recovery to stage [" + next + "]. current stage: [" + throw new IllegalStateException("can't move recovery to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])"); } stage = next; @@ -209,7 +209,7 @@ public class RecoveryState implements ToXContent, Streamable { getTimer().stop(); break; default: - throw new ElasticsearchIllegalArgumentException("unknown RecoveryState.Stage [" + stage + "]"); + throw new IllegalArgumentException("unknown RecoveryState.Stage [" + stage + "]"); } return this; } diff --git a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index b347f662a1e..0fc7c209111 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -20,7 +20,7 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.common.logging.ESLogger; @@ -51,7 +51,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { // here we simply fail the primary shard since we can't move them (have 2 writers open at the same time) // by failing the shard we play safe and just go through the entire reallocation procedure of the primary // it would be ideal to make sure we flushed the translog here but that is not possible in the current design. - ElasticsearchIllegalStateException exception = new ElasticsearchIllegalStateException("Can't relocate primary - failing"); + IllegalStateException exception = new IllegalStateException("Can't relocate primary - failing"); shard.failShard("primary_relocation", exception); throw exception; } diff --git a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index d9b4f5c2462..911d6a6445a 100644 --- a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -20,7 +20,7 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.unit.TimeValue; @@ -75,7 +75,7 @@ public class HotThreads { if ("cpu".equals(type) || "wait".equals(type) || "block".equals(type)) { this.type = type; } else { - throw new ElasticsearchIllegalArgumentException("type not supported [" + type + "]"); + throw new IllegalArgumentException("type not supported [" + type + "]"); } return this; } diff --git a/src/main/java/org/elasticsearch/node/Node.java b/src/main/java/org/elasticsearch/node/Node.java index b3be8cd5a87..603898dcbae 100644 --- a/src/main/java/org/elasticsearch/node/Node.java +++ b/src/main/java/org/elasticsearch/node/Node.java @@ -21,7 +21,7 @@ package org.elasticsearch.node; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; import org.elasticsearch.cache.recycler.PageCacheRecycler; @@ -157,7 +157,7 @@ public class Node implements Releasable { try { nodeEnvironment = new NodeEnvironment(this.settings, this.environment); } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Failed to created node environment", ex); + throw new IllegalStateException("Failed to created node environment", ex); } boolean success = false; diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 6723372f1c8..958b1a55963 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -36,7 +36,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateShardRequest; @@ -196,7 +196,7 @@ public class PercolatorService extends AbstractComponent { if (request.docSource() != null && request.docSource().length() != 0) { parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.documentType()); } else if (parsedDocument == null) { - throw new ElasticsearchIllegalArgumentException("Nothing to percolate"); + throw new IllegalArgumentException("Nothing to percolate"); } if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) { @@ -204,11 +204,11 @@ public class PercolatorService extends AbstractComponent { } if (context.doSort && !context.limit) { - throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified"); + throw new IllegalArgumentException("Can't sort if size isn't specified"); } if (context.highlight() != null && !context.limit) { - throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified"); + throw new IllegalArgumentException("Can't highlight if size isn't specified"); } if (context.size() < 0) { diff --git a/src/main/java/org/elasticsearch/plugins/PluginManager.java b/src/main/java/org/elasticsearch/plugins/PluginManager.java index fbd89b9f27a..5678c03fdb9 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -119,7 +119,7 @@ public class PluginManager { public void downloadAndExtract(String name) throws IOException { if (name == null) { - throw new ElasticsearchIllegalArgumentException("plugin name must be supplied with --install [name]."); + throw new IllegalArgumentException("plugin name must be supplied with --install [name]."); } HttpDownloadHelper downloadHelper = new HttpDownloadHelper(); boolean downloaded = false; @@ -293,7 +293,7 @@ public class PluginManager { public void removePlugin(String name) throws IOException { if (name == null) { - throw new ElasticsearchIllegalArgumentException("plugin name must be supplied with --remove [name]."); + throw new IllegalArgumentException("plugin name must be supplied with --remove [name]."); } PluginHandle pluginHandle = PluginHandle.parse(name); boolean removed = false; @@ -342,7 +342,7 @@ public class PluginManager { private static void checkForForbiddenName(String name) { if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) { - throw new ElasticsearchIllegalArgumentException("Illegal plugin name: " + name); + throw new IllegalArgumentException("Illegal plugin name: " + name); } } @@ -485,7 +485,7 @@ public class PluginManager { pluginManager.log("-> Removing " + Strings.nullToEmpty(pluginName) + "..."); pluginManager.removePlugin(pluginName); exitCode = EXIT_CODE_OK; - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { exitCode = EXIT_CODE_CMD_USAGE; pluginManager.log("Failed to remove " + pluginName + ", reason: " + e.getMessage()); } catch (IOException e) { @@ -526,12 +526,12 @@ public class PluginManager { * @return Never {@code null}. The trimmed value. * @throws NullPointerException if {@code args} is {@code null}. * @throws ArrayIndexOutOfBoundsException if {@code arg} is negative. - * @throws ElasticsearchIllegalStateException if {@code arg} is >= {@code args.length}. - * @throws ElasticsearchIllegalArgumentException if the value evaluates to blank ({@code null} or only whitespace) + * @throws IllegalStateException if {@code arg} is >= {@code args.length}. + * @throws IllegalArgumentException if the value evaluates to blank ({@code null} or only whitespace) */ private static String getCommandValue(String[] args, int arg, String flag) { if (arg >= args.length) { - throw new ElasticsearchIllegalStateException("missing value for " + flag + ". Usage: " + flag + " [value]"); + throw new IllegalStateException("missing value for " + flag + ". Usage: " + flag + " [value]"); } // avoid having to interpret multiple forms of unset @@ -539,7 +539,7 @@ public class PluginManager { // If we had a value that is blank, then fail immediately if (trimmedValue == null) { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "value for " + flag + "('" + args[arg] + "') must be set. Usage: " + flag + " [value]"); } diff --git a/src/main/java/org/elasticsearch/plugins/PluginsService.java b/src/main/java/org/elasticsearch/plugins/PluginsService.java index 7a7c569acbe..a260a997ced 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -25,7 +25,7 @@ import com.google.common.collect.*; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo; @@ -123,7 +123,7 @@ public class PluginsService extends AbstractComponent { try { loadPluginsIntoClassLoader(); } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Can't load plugins into classloader", ex); + throw new IllegalStateException("Can't load plugins into classloader", ex); } if (loadClasspathPlugins) { tupleBuilder.addAll(loadPluginsFromClasspath(settings)); @@ -147,7 +147,7 @@ public class PluginsService extends AbstractComponent { sitePlugins.add(tuple.v1().getName()); } } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Can't load site plugins", ex); + throw new IllegalStateException("Can't load site plugins", ex); } // Checking expected plugins diff --git a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 36fbf05c2ec..08454424ab0 100644 --- a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -21,8 +21,8 @@ package org.elasticsearch.repositories; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; + +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; @@ -34,7 +34,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Injectors; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; @@ -413,7 +412,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta private void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { if (SnapshotsService.isRepositoryInUse(clusterState, repository) || RestoreService.isRepositoryInUse(clusterState, repository)) { - throw new ElasticsearchIllegalStateException("trying to modify or unregister repository that is currently used "); + throw new IllegalStateException("trying to modify or unregister repository that is currently used "); } } diff --git a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d8e3569c3d7..d7018947ac3 100644 --- a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,7 +26,6 @@ import com.google.common.collect.Maps; import com.google.common.io.ByteStreams; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; diff --git a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index d3939bb8c60..9b83c2648b3 100644 --- a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,19 +19,13 @@ package org.elasticsearch.rest; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.client.Client; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.FilterClient; import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.action.support.RestActions; import java.util.Set; diff --git a/src/main/java/org/elasticsearch/rest/RestController.java b/src/main/java/org/elasticsearch/rest/RestController.java index 3aae11c30f2..9535fa7237a 100644 --- a/src/main/java/org/elasticsearch/rest/RestController.java +++ b/src/main/java/org/elasticsearch/rest/RestController.java @@ -21,8 +21,8 @@ package org.elasticsearch.rest; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -136,7 +136,7 @@ public class RestController extends AbstractLifecycleComponent { headHandlers.insert(path, handler); break; default: - throw new ElasticsearchIllegalArgumentException("Can't handle [" + method + "] for path [" + path + "]"); + throw new IllegalArgumentException("Can't handle [" + method + "] for path [" + path + "]"); } } @@ -260,7 +260,7 @@ public class RestController extends AbstractLifecycleComponent { try { int loc = index.getAndIncrement(); if (loc > filters.length) { - throw new ElasticsearchIllegalStateException("filter continueProcessing was called more than expected"); + throw new IllegalStateException("filter continueProcessing was called more than expected"); } else if (loc == filters.length) { executionFilter.process(request, channel, this); } else { diff --git a/src/main/java/org/elasticsearch/rest/RestRequest.java b/src/main/java/org/elasticsearch/rest/RestRequest.java index a908e5c55a7..d54195068be 100644 --- a/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.ContextHolder; import org.elasticsearch.common.Nullable; @@ -97,7 +97,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Float.parseFloat(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse float parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse float parameter [" + key + "] with value [" + sValue + "]", e); } } @@ -109,7 +109,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Integer.parseInt(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); } } @@ -121,7 +121,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Long.parseLong(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 423d1c0f73b..9c697afdfe1 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.admin.indices.alias; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.Client; @@ -62,7 +62,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler { indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); XContentParser.Token token = parser.nextToken(); if (token == null) { - throw new ElasticsearchIllegalArgumentException("No action is specified"); + throw new IllegalArgumentException("No action is specified"); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.START_ARRAY) { @@ -75,7 +75,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler { } else if ("remove".equals(action)) { type = AliasAction.Type.REMOVE; } else { - throw new ElasticsearchIllegalArgumentException("Alias action [" + action + "] not supported"); + throw new IllegalArgumentException("Alias action [" + action + "] not supported"); } String index = null; String alias = null; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java index 227deb11918..0e0e2736b0d 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.admin.indices.alias.put; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; @@ -75,7 +75,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler { try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { XContentParser.Token token = parser.nextToken(); if (token == null) { - throw new ElasticsearchIllegalArgumentException("No index alias is specified"); + throw new IllegalArgumentException("No index alias is specified"); } String currentFieldName = null; while ((token = parser.nextToken()) != null) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index f5967596809..77fd1b89fd3 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -19,15 +19,13 @@ package org.elasticsearch.rest.action.admin.indices.analyze; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -86,10 +84,10 @@ public class RestAnalyzeAction extends BaseRestHandler { client.admin().indices().analyze(analyzeRequest, new RestToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) throws IllegalArgumentException { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malforrmed content, must start with an object"); + throw new IllegalArgumentException("Malforrmed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -110,7 +108,7 @@ public class RestAnalyzeAction extends BaseRestHandler { List filters = Lists.newArrayList(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException(currentFieldName + " array element should only contain token filter's name"); + throw new IllegalArgumentException(currentFieldName + " array element should only contain token filter's name"); } filters.add(parser.text()); } @@ -119,18 +117,18 @@ public class RestAnalyzeAction extends BaseRestHandler { List charFilters = Lists.newArrayList(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException(currentFieldName + " array element should only contain char filter's name"); + throw new IllegalArgumentException(currentFieldName + " array element should only contain char filter's name"); } charFilters.add(parser.text()); } analyzeRequest.tokenFilters(charFilters.toArray(new String[0])); } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index dc800f37062..678965adc85 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -21,7 +21,7 @@ package org.elasticsearch.rest.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -105,7 +105,7 @@ public class RestGetIndicesAction extends BaseRestHandler { writeWarmers(response.warmers().get(index), builder, request); break; default: - throw new ElasticsearchIllegalStateException("feature [" + feature + "] is not valid"); + throw new IllegalStateException("feature [" + feature + "] is not valid"); } } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 091ab27e387..a5d5b89d1a0 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.count; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -75,7 +75,7 @@ public class RestCountAction extends BaseRestHandler { final int terminateAfter = request.paramAsInt("terminate_after", DEFAULT_TERMINATE_AFTER); if (terminateAfter < 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } else if (terminateAfter > 0) { countRequest.terminateAfter(terminateAfter); } diff --git a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java index 39fe00c663a..aeb903aafd2 100644 --- a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java +++ b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java @@ -20,13 +20,12 @@ package org.elasticsearch.rest.action.explain; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -81,7 +80,7 @@ public class RestExplainAction extends BaseRestHandler { } else if ("AND".equals(defaultOperator)) { queryStringBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index e196d954a7c..c519238ea8c 100644 --- a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.index; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; @@ -88,7 +88,7 @@ public class RestIndexAction extends BaseRestHandler { if (sOpType != null) { try { indexRequest.opType(IndexRequest.OpType.fromString(sOpType)); - } catch (ElasticsearchIllegalArgumentException eia){ + } catch (IllegalArgumentException eia){ try { XContentBuilder builder = channel.newBuilder(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", eia.getMessage()).endObject())); diff --git a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java index aab5774af9a..46504cbf6bf 100644 --- a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; @@ -84,7 +84,7 @@ public class RestPutIndexedScriptAction extends BaseRestHandler { if (sOpType != null) { try { putRequest.opType(IndexRequest.OpType.fromString(sOpType)); - } catch (ElasticsearchIllegalArgumentException eia){ + } catch (IllegalArgumentException eia){ try { XContentBuilder builder = channel.newBuilder(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", eia.getMessage()).endObject())); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index 1dfff7cf4fe..4fc2c2f00ad 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.client.Client; @@ -27,7 +27,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -79,10 +78,10 @@ public class RestClearScrollAction extends BaseRestHandler { return Strings.splitStringByCommaToArray(scrollIds); } - public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) throws IllegalArgumentException { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malformed content, must start with an object"); + throw new IllegalArgumentException("Malformed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -92,17 +91,17 @@ public class RestClearScrollAction extends BaseRestHandler { } else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException("scroll_id array element should only contain scroll_id"); + throw new IllegalArgumentException("scroll_id array element should only contain scroll_id"); } clearScrollRequest.addScrollId(parser.text()); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index f578d3baa6a..b5037204639 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -130,7 +130,7 @@ public class RestSearchAction extends BaseRestHandler { } else if ("AND".equals(defaultOperator)) { queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } if (searchSourceBuilder == null) { @@ -179,7 +179,7 @@ public class RestSearchAction extends BaseRestHandler { int terminateAfter = request.paramAsInt("terminate_after", SearchContext.DEFAULT_TERMINATE_AFTER); if (terminateAfter < 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } else if (terminateAfter > 0) { searchSourceBuilder.terminateAfter(terminateAfter); } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 98edaa2ccea..86549b93fad 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -19,11 +19,10 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -84,10 +83,10 @@ public class RestSearchScrollAction extends BaseRestHandler { client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) throws IllegalArgumentException { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malforrmed content, must start with an object"); + throw new IllegalArgumentException("Malforrmed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -99,12 +98,12 @@ public class RestSearchScrollAction extends BaseRestHandler { } else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { searchScrollRequest.scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null))); } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } } diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index eebea7973cd..4af6e7b59e6 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.suggest; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -64,7 +64,7 @@ public class RestSuggestAction extends BaseRestHandler { if (RestActions.hasBodyContent(request)) { suggestRequest.suggest(RestActions.getRestContent(request)); } else { - throw new ElasticsearchIllegalArgumentException("no content or source provided to execute suggestion"); + throw new IllegalArgumentException("no content or source provided to execute suggestion"); } suggestRequest.routing(request.param("routing")); suggestRequest.preference(request.param("preference")); diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 38dd78bf403..4a17a1b17dc 100644 --- a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; @@ -106,7 +106,7 @@ public class RestActions { } else if ("AND".equals(defaultOperator)) { queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } return new QuerySourceBuilder().setQuery(queryBuilder); diff --git a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 45b8ee26712..e91567d8e3e 100644 --- a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -22,7 +22,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -68,7 +68,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri if (scriptFactory != null) { return scriptFactory; } - throw new ElasticsearchIllegalArgumentException("Native script [" + script + "] not found"); + throw new IllegalArgumentException("Native script [" + script + "] not found"); } @Override diff --git a/src/main/java/org/elasticsearch/script/Script.java b/src/main/java/org/elasticsearch/script/Script.java index 4e5e2027f9b..23cc02f529e 100644 --- a/src/main/java/org/elasticsearch/script/Script.java +++ b/src/main/java/org/elasticsearch/script/Script.java @@ -21,7 +21,7 @@ package org.elasticsearch.script; import java.util.Map; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import static org.elasticsearch.script.ScriptService.ScriptType; @@ -46,10 +46,10 @@ public class Script { */ public Script(String lang, String script, ScriptType type, Map params) { if (script == null) { - throw new ElasticsearchIllegalArgumentException("The parameter script (String) must not be null in Script."); + throw new IllegalArgumentException("The parameter script (String) must not be null in Script."); } if (type == null) { - throw new ElasticsearchIllegalArgumentException("The parameter type (ScriptType) must not be null in Script."); + throw new IllegalArgumentException("The parameter type (ScriptType) must not be null in Script."); } this.lang = lang; diff --git a/src/main/java/org/elasticsearch/script/ScriptContext.java b/src/main/java/org/elasticsearch/script/ScriptContext.java index 18224e81483..7fbaaf84684 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContext.java +++ b/src/main/java/org/elasticsearch/script/ScriptContext.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; /** @@ -75,10 +75,10 @@ public interface ScriptContext { */ public Plugin(String pluginName, String operation) { if (Strings.hasLength(pluginName) == false) { - throw new ElasticsearchIllegalArgumentException("plugin name cannot be empty when registering a custom script context"); + throw new IllegalArgumentException("plugin name cannot be empty when registering a custom script context"); } if (Strings.hasLength(operation) == false) { - throw new ElasticsearchIllegalArgumentException("operation name cannot be empty when registering a custom script context"); + throw new IllegalArgumentException("operation name cannot be empty when registering a custom script context"); } this.pluginName = pluginName; this.operation = operation; diff --git a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java index 40596870b99..643a06ad4bf 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java +++ b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java @@ -23,7 +23,7 @@ import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.Map; @@ -46,7 +46,7 @@ public final class ScriptContextRegistry { validateScriptContext(customScriptContext); ScriptContext previousContext = scriptContexts.put(customScriptContext.getKey(), customScriptContext); if (previousContext != null) { - throw new ElasticsearchIllegalArgumentException("script context [" + customScriptContext.getKey() + "] cannot be registered twice"); + throw new IllegalArgumentException("script context [" + customScriptContext.getKey() + "] cannot be registered twice"); } } this.scriptContexts = ImmutableMap.copyOf(scriptContexts); @@ -69,10 +69,10 @@ public final class ScriptContextRegistry { //script contexts can be used in fine-grained settings, we need to be careful with what we allow here private void validateScriptContext(ScriptContext.Plugin scriptContext) { if (RESERVED_SCRIPT_CONTEXTS.contains(scriptContext.getPluginName())) { - throw new ElasticsearchIllegalArgumentException("[" + scriptContext.getPluginName() + "] is a reserved name, it cannot be registered as a custom script context"); + throw new IllegalArgumentException("[" + scriptContext.getPluginName() + "] is a reserved name, it cannot be registered as a custom script context"); } if (RESERVED_SCRIPT_CONTEXTS.contains(scriptContext.getOperation())) { - throw new ElasticsearchIllegalArgumentException("[" + scriptContext.getOperation() + "] is a reserved name, it cannot be registered as a custom script context"); + throw new IllegalArgumentException("[" + scriptContext.getOperation() + "] is a reserved name, it cannot be registered as a custom script context"); } } diff --git a/src/main/java/org/elasticsearch/script/ScriptMode.java b/src/main/java/org/elasticsearch/script/ScriptMode.java index 855690b794b..b7fb7474e73 100644 --- a/src/main/java/org/elasticsearch/script/ScriptMode.java +++ b/src/main/java/org/elasticsearch/script/ScriptMode.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import java.util.Locale; @@ -45,7 +45,7 @@ enum ScriptMode { if (SANDBOX.toString().equals(input)) { return SANDBOX; } - throw new ElasticsearchIllegalArgumentException("script mode [" + input + "] not supported"); + throw new IllegalArgumentException("script mode [" + input + "] not supported"); } diff --git a/src/main/java/org/elasticsearch/script/ScriptModes.java b/src/main/java/org/elasticsearch/script/ScriptModes.java index 854a7ec2e40..36c3c457697 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModes.java +++ b/src/main/java/org/elasticsearch/script/ScriptModes.java @@ -21,7 +21,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService.ScriptType; @@ -159,7 +159,7 @@ public class ScriptModes { } ScriptMode scriptMode = scriptModes.get(ENGINE_SETTINGS_PREFIX + "." + lang + "." + scriptType + "." + scriptContext.getKey()); if (scriptMode == null) { - throw new ElasticsearchIllegalArgumentException("script mode not found for lang [" + lang + "], script_type [" + scriptType + "], operation [" + scriptContext.getKey() + "]"); + throw new IllegalArgumentException("script mode not found for lang [" + lang + "], script_type [" + scriptType + "], operation [" + scriptContext.getKey() + "]"); } return scriptMode; } diff --git a/src/main/java/org/elasticsearch/script/ScriptModule.java b/src/main/java/org/elasticsearch/script/ScriptModule.java index a5a4707db04..92f15cf2f65 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -21,7 +21,7 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; @@ -82,7 +82,7 @@ public class ScriptModule extends AbstractModule { String name = entry.getKey(); Class type = entry.getValue().getAsClass("type", NativeScriptFactory.class); if (type == NativeScriptFactory.class) { - throw new ElasticsearchIllegalArgumentException("type is missing for native script [" + name + "]"); + throw new IllegalArgumentException("type is missing for native script [" + name + "]"); } scriptsBinder.addBinding(name).to(type).asEagerSingleton(); } diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index cbe38f12958..53173e639ca 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -27,8 +27,8 @@ import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; @@ -116,7 +116,7 @@ public class ScriptService extends AbstractComponent implements Closeable { super(settings); if (Strings.hasLength(settings.get(DISABLE_DYNAMIC_SCRIPTING_SETTING))) { - throw new ElasticsearchIllegalArgumentException(DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings. \n" + + throw new IllegalArgumentException(DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings. \n" + "Dynamic scripts can be enabled for all languages and all operations by replacing `script.disable_dynamic: false` with `script.inline: on` and `script.indexed: on` in elasticsearch.yml"); } @@ -183,7 +183,7 @@ public class ScriptService extends AbstractComponent implements Closeable { private ScriptEngineService getScriptEngineServiceForLang(String lang) { ScriptEngineService scriptEngineService = scriptEnginesByLang.get(lang); if (scriptEngineService == null) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]"); + throw new IllegalArgumentException("script_lang not supported [" + lang + "]"); } return scriptEngineService; } @@ -191,7 +191,7 @@ public class ScriptService extends AbstractComponent implements Closeable { private ScriptEngineService getScriptEngineServiceForFileExt(String fileExtension) { ScriptEngineService scriptEngineService = scriptEnginesByExt.get(fileExtension); if (scriptEngineService == null) { - throw new ElasticsearchIllegalArgumentException("script file extension not supported [" + fileExtension + "]"); + throw new IllegalArgumentException("script file extension not supported [" + fileExtension + "]"); } return scriptEngineService; } @@ -201,10 +201,10 @@ public class ScriptService extends AbstractComponent implements Closeable { */ public CompiledScript compile(Script script, ScriptContext scriptContext) { if (script == null) { - throw new ElasticsearchIllegalArgumentException("The parameter script (Script) must not be null."); + throw new IllegalArgumentException("The parameter script (Script) must not be null."); } if (scriptContext == null) { - throw new ElasticsearchIllegalArgumentException("The parameter scriptContext (ScriptContext) must not be null."); + throw new IllegalArgumentException("The parameter scriptContext (ScriptContext) must not be null."); } String lang = script.getLang(); @@ -225,7 +225,7 @@ public class ScriptService extends AbstractComponent implements Closeable { */ public CompiledScript compileInternal(Script script) { if (script == null) { - throw new ElasticsearchIllegalArgumentException("The parameter script (Script) must not be null."); + throw new IllegalArgumentException("The parameter script (Script) must not be null."); } String lang = script.getLang(); @@ -243,7 +243,7 @@ public class ScriptService extends AbstractComponent implements Closeable { if (script.getType() == ScriptType.FILE) { CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener if (compiled == null) { - throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + script.getScript()); + throw new IllegalArgumentException("Unable to find on disk script " + script.getScript()); } return compiled; } @@ -279,14 +279,14 @@ public class ScriptService extends AbstractComponent implements Closeable { if (scriptLang == null) { scriptLang = defaultLang; } else if (scriptEnginesByLang.containsKey(scriptLang) == false) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + scriptLang + "]"); + throw new IllegalArgumentException("script_lang not supported [" + scriptLang + "]"); } return scriptLang; } String getScriptFromIndex(String scriptLang, String id) { if (client == null) { - throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered."); + throw new IllegalArgumentException("Got an indexed script with no Client registered."); } scriptLang = validateScriptLanguage(scriptLang); GetRequest getRequest = new GetRequest(SCRIPT_INDEX, scriptLang, id); @@ -294,7 +294,7 @@ public class ScriptService extends AbstractComponent implements Closeable { if (responseFields.isExists()) { return getScriptFromResponse(responseFields); } - throw new ElasticsearchIllegalArgumentException("Unable to find script [" + SCRIPT_INDEX + "/" + throw new IllegalArgumentException("Unable to find script [" + SCRIPT_INDEX + "/" + scriptLang + "/" + id + "]"); } @@ -311,21 +311,21 @@ public class ScriptService extends AbstractComponent implements Closeable { if (isAnyScriptContextEnabled(scriptLang, getScriptEngineServiceForLang(scriptLang), ScriptType.INDEXED)) { CompiledScript compiledScript = compileInternal(new Script(scriptLang, context.template(), ScriptType.INLINE, null)); if (compiledScript == null) { - throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + + throw new IllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); } } else { logger.warn("skipping compile of script [{}], lang [{}] as all scripted operations are disabled for indexed scripts", context.template(), scriptLang); } } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + + throw new IllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "]", e); } } else { - throw new ElasticsearchIllegalArgumentException("Unable to find script in : " + scriptBytes.toUtf8()); + throw new IllegalArgumentException("Unable to find script in : " + scriptBytes.toUtf8()); } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("failed to parse template script", e); + throw new IllegalArgumentException("failed to parse template script", e); } } @@ -361,7 +361,7 @@ public class ScriptService extends AbstractComponent implements Closeable { return template.toString(); } } catch (IOException | ClassCastException e) { - throw new ElasticsearchIllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); + throw new IllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); } } else if (source.containsKey("script")) { return source.get("script").toString(); @@ -371,7 +371,7 @@ public class ScriptService extends AbstractComponent implements Closeable { builder.map(responseFields.getSource()); return builder.string(); } catch (IOException|ClassCastException e) { - throw new ElasticsearchIllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); + throw new IllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); } } } @@ -410,7 +410,7 @@ public class ScriptService extends AbstractComponent implements Closeable { private boolean canExecuteScript(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType, ScriptContext scriptContext) { assert lang != null; if (scriptContextRegistry.isSupportedContext(scriptContext) == false) { - throw new ElasticsearchIllegalArgumentException("script context [" + scriptContext.getKey() + "] not supported"); + throw new IllegalArgumentException("script context [" + scriptContext.getKey() + "] not supported"); } ScriptMode mode = scriptModes.getScriptMode(lang, scriptType, scriptContext); switch (mode) { @@ -421,7 +421,7 @@ public class ScriptService extends AbstractComponent implements Closeable { case SANDBOX: return scriptEngineService.sandboxed(); default: - throw new ElasticsearchIllegalArgumentException("script mode [" + mode + "] not supported"); + throw new IllegalArgumentException("script mode [" + mode + "] not supported"); } } @@ -543,7 +543,7 @@ public class ScriptService extends AbstractComponent implements Closeable { case FILE_VAL: return FILE; default: - throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + + throw new IllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + "] expected one of [" + INLINE_VAL + "," + INDEXED_VAL + "," + FILE_VAL + "]"); } } @@ -561,7 +561,7 @@ public class ScriptService extends AbstractComponent implements Closeable { out.writeVInt(FILE_VAL); return; default: - throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType); + throw new IllegalStateException("Unknown ScriptType " + scriptType); } } else { out.writeVInt(INLINE_VAL); //Default to inline @@ -613,11 +613,11 @@ public class ScriptService extends AbstractComponent implements Closeable { this.id = script; } else { if (parts.length != 3) { - throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" + + throw new IllegalArgumentException("Illegal index script format [" + script + "]" + " should be /lang/id"); } else { if (!parts[1].equals(this.lang)) { - throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); + throw new IllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); } this.id = parts[2]; } diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java index d7f43222ec2..d37efc68201 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Scorer; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.SearchScript; @@ -92,7 +92,7 @@ class ExpressionScript implements SearchScript { // We have a new binding for the scorer so we need to reset the values values = source.getValues(Collections.singletonMap("scorer", scorer), leaf); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Can't get values", e); + throw new IllegalStateException("Can't get values", e); } } diff --git a/src/main/java/org/elasticsearch/search/MultiValueMode.java b/src/main/java/org/elasticsearch/search/MultiValueMode.java index c1f41cb92e0..30f2d7504ab 100644 --- a/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -27,7 +27,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -375,13 +375,13 @@ public enum MultiValueMode { /** * A case insensitive version of {@link #valueOf(String)} * - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the given string doesn't match a sort mode or is null. + * @throws IllegalArgumentException if the given string doesn't match a sort mode or is null. */ public static MultiValueMode fromString(String sortMode) { try { return valueOf(sortMode.toUpperCase(Locale.ROOT)); } catch (Throwable t) { - throw new ElasticsearchIllegalArgumentException("Illegal sort_mode " + sortMode); + throw new IllegalArgumentException("Illegal sort_mode " + sortMode); } } @@ -615,7 +615,7 @@ public enum MultiValueMode { } protected BytesRef pick(SortedBinaryDocValues values, BytesRef missingValue) { - throw new ElasticsearchIllegalArgumentException("Unsupported sort mode: " + this); + throw new IllegalArgumentException("Unsupported sort mode: " + this); } /** @@ -735,7 +735,7 @@ public enum MultiValueMode { } protected int pick(RandomAccessOrds values) { - throw new ElasticsearchIllegalArgumentException("Unsupported sort mode: " + this); + throw new IllegalArgumentException("Unsupported sort mode: " + this); } /** diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index d8d85deb0a3..dfbfb84e0fe 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -29,7 +29,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchType; @@ -230,7 +230,7 @@ public class SearchService extends AbstractLifecycleComponent { final int originalSize = context.size(); try { if (context.aggregations() != null) { - throw new ElasticsearchIllegalArgumentException("aggregations are not supported with search_type=scan"); + throw new IllegalArgumentException("aggregations are not supported with search_type=scan"); } if (context.scroll() == null) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 98cc7e39e1a..08c9cef997d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -115,7 +115,7 @@ public class AggregatorFactories { public Builder add(AggregatorFactory factory) { if (!names.add(factory.name)) { - throw new ElasticsearchIllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); + throw new IllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); } factories.add(factory); return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 7537878ae04..091c1361634 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -25,7 +25,7 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -133,7 +133,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl String aggName = path.get(0); InternalAggregation aggregation = get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "]"); + throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 572680c9318..035db21d59d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import java.util.List; @@ -58,18 +58,18 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation String aggName = path.get(0); if (aggName.equals("_count")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_count must be the last element in the path"); + throw new IllegalArgumentException("_count must be the last element in the path"); } return getDocCount(); } else if (aggName.equals("_key")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_key must be the last element in the path"); + throw new IllegalArgumentException("_key must be the last element in the path"); } return getKey(); } InternalAggregation aggregation = aggregations.get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + "]"); + throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index f0c0294b4d7..f4e2c169c9c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -22,7 +22,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongHash; @@ -71,7 +71,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public boolean needsScores() { if (collector == null) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } return collector.needsScores(); } @@ -127,10 +127,10 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { if (!finished) { - throw new ElasticsearchIllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); + throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (this.selectedBuckets != null) { - throw new ElasticsearchIllegalStateException("Already been replayed"); + throw new IllegalStateException("Already been replayed"); } final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); @@ -141,7 +141,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { collector.preCollection(); if (collector.needsScores()) { - throw new ElasticsearchIllegalStateException("Cannot defer if scores are needed"); + throw new IllegalStateException("Cannot defer if scores are needed"); } for (Entry entry : entries) { @@ -176,11 +176,11 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public InternalAggregation buildAggregation(long bucket) throws IOException { if (selectedBuckets == null) { - throw new ElasticsearchIllegalStateException("Collection has not been replayed yet."); + throw new IllegalStateException("Collection has not been replayed yet."); } final long rebasedBucket = selectedBuckets.find(bucket); if (rebasedBucket == -1) { - throw new ElasticsearchIllegalStateException("Cannot build for a bucket which has not been collected"); + throw new IllegalStateException("Cannot build for a bucket which has not been collected"); } return in.buildAggregation(rebasedBucket); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java index 437e642d7e6..51b7e968f0f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java @@ -27,7 +27,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -122,10 +122,10 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector { @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { if (!finished) { - throw new ElasticsearchIllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); + throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (selectedBuckets.length > 1) { - throw new ElasticsearchIllegalStateException("Collection only supported on a single bucket"); + throw new IllegalStateException("Collection only supported on a single bucket"); } deferred.preCollection(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index b0f2693e9eb..9f0ce6c3c33 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -110,19 +110,19 @@ public abstract class DeferringBucketCollector extends BucketCollector { @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void preCollection() throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void postCollection() throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index f278be9f663..a212d107083 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -89,13 +89,13 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio String aggName = path.get(0); if (aggName.equals("_count")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_count must be the last element in the path"); + throw new IllegalArgumentException("_count must be the last element in the path"); } return getDocCount(); } InternalAggregation aggregation = aggregations.get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + getName() + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 40db0d054c1..0c70f787247 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.util.LongArray; @@ -93,7 +93,7 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { return LeafBucketCollector.NO_OP_COLLECTOR; } if (replay == null) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java index f2c058a8e1c..557b6b85971 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -56,7 +56,7 @@ public class GeoHashGridBuilder extends AggregationBuilder { */ public GeoHashGridBuilder precision(int precision) { if ((precision < 1) || (precision > 12)) { - throw new ElasticsearchIllegalArgumentException("Invalid geohash aggregation precision of " + precision + throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision + "must be between 1 and 12"); } this.precision = precision; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 7862eade5d6..36b32d08d39 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.global; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -78,7 +78,7 @@ public class GlobalAggregator extends SingleBucketAggregator { "sub-aggregation [" + name + "]. Global aggregations can only be defined as top level aggregations"); } if (collectsFromSingleBucket == false) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } return new GlobalAggregator(name, factories, context, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 491422d20cf..fb1d2973201 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -22,7 +22,7 @@ import com.google.common.collect.Lists; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -68,7 +68,7 @@ public class InternalHistogram extends Inter public Bucket readResult(StreamInput in, BucketStreamContext context) throws IOException { Factory factory = (Factory) context.attributes().get("factory"); if (factory == null) { - throw new ElasticsearchIllegalStateException("No factory found for histogram buckets"); + throw new IllegalStateException("No factory found for histogram buckets"); } Bucket histogram = new Bucket(context.keyed(), context.formatter(), factory); histogram.readFrom(in); @@ -461,7 +461,7 @@ public class InternalHistogram extends Inter } else if (factoryType.equals(TYPE.name())) { return new Factory<>(); } else { - throw new ElasticsearchIllegalStateException("Invalid histogram factory type [" + factoryType + "]"); + throw new IllegalStateException("Invalid histogram factory type [" + factoryType + "]"); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java index c74df049d12..5a657154684 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -103,7 +103,7 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { final BytesRef bytes = values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java index bf196245ce1..3f967400d9b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -110,7 +110,7 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { final BytesRef bytes = values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java index e5f963ed3ef..750b6740260 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; @@ -94,7 +94,7 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { return values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java index 808acc49883..6986fef2587 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java @@ -27,7 +27,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; @@ -100,7 +100,7 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator { globalOrds.setDocument(doc); final long valuesCount = globalOrds.cardinality(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { long result = globalOrds.ordAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 27bfc8666c5..87897b12e8f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -109,7 +109,7 @@ public class SamplerAggregator extends SingleBucketAggregator { return mode; } } - throw new ElasticsearchIllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); } private final ParseField parseField; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index ef837cfab82..adf97c5a65e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.FilterableTermsEnum; @@ -32,7 +32,6 @@ import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; @@ -100,7 +99,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac return mode; } } - throw new ElasticsearchIllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); } private final ParseField parseField; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java index 48d70c0f9c2..be5de56ec4a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.significant; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -133,7 +133,7 @@ public class SignificantTermsBuilder extends AggregationBuilder supersetFreq) { - throw new ElasticsearchIllegalArgumentException("subsetFreq > supersetFreq" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("subsetFreq > supersetFreq" + SCORE_ERROR_MESSAGE); } if (subsetSize > supersetSize) { - throw new ElasticsearchIllegalArgumentException("subsetSize > supersetSize" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("subsetSize > supersetSize" + SCORE_ERROR_MESSAGE); } if (supersetFreq - subsetFreq > supersetSize - subsetSize) { - throw new ElasticsearchIllegalArgumentException("supersetFreq - subsetFreq > supersetSize - subsetSize" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("supersetFreq - subsetFreq > supersetSize - subsetSize" + SCORE_ERROR_MESSAGE); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index b3da709323b..f57b1d5ba78 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -40,13 +40,13 @@ public abstract class SignificanceHeuristic { protected void checkFrequencyValidity(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize, String scoreFunctionName) { if (subsetFreq < 0 || subsetSize < 0 || supersetFreq < 0 || supersetSize < 0) { - throw new ElasticsearchIllegalArgumentException("Frequencies of subset and superset must be positive in " + scoreFunctionName + ".getScore()"); + throw new IllegalArgumentException("Frequencies of subset and superset must be positive in " + scoreFunctionName + ".getScore()"); } if (subsetFreq > subsetSize) { - throw new ElasticsearchIllegalArgumentException("subsetFreq > subsetSize, in " + scoreFunctionName); + throw new IllegalArgumentException("subsetFreq > subsetSize, in " + scoreFunctionName); } if (supersetFreq > supersetSize) { - throw new ElasticsearchIllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName); + throw new IllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index ff7cf1ab78d..93985a39d22 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -22,7 +22,7 @@ import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; @@ -78,7 +78,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple @Override public long getDocCountError() { if (!showDocCountError) { - throw new ElasticsearchIllegalStateException("show_terms_doc_count_error is false"); + throw new IllegalStateException("show_terms_doc_count_error is false"); } return docCountError; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 3fa99d2b7fd..3d6c47b63b8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -19,12 +19,11 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; @@ -115,7 +114,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { */ public TermsBuilder include(String regex) { if (includeTerms != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of strings or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of strings or a regex, not both"); } this.includePattern = regex; return this; @@ -104,7 +104,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(String [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = terms; return this; @@ -115,7 +115,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(long [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = longsArrToStringArr(terms); return this; @@ -135,7 +135,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(double [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = doubleArrToStringArr(terms); return this; @@ -157,7 +157,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(String regex) { if (excludeTerms != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludePattern = regex; return this; @@ -168,7 +168,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(String [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = terms; return this; @@ -180,7 +180,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(long [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = longsArrToStringArr(terms); return this; @@ -191,7 +191,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(double [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = doubleArrToStringArr(terms); return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java index cb1fc48be00..6f92bb85970 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java @@ -34,7 +34,7 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -293,7 +293,7 @@ public class IncludeExclude { RegExp excludePattern = exclude != null ? new RegExp(exclude) : null; if (includePattern != null || excludePattern != null) { if (includeValues != null || excludeValues != null) { - throw new ElasticsearchIllegalArgumentException("Can only use regular expression include/exclude or a set of values, not both"); + throw new IllegalArgumentException("Can only use regular expression include/exclude or a set of values, not both"); } return new IncludeExclude(includePattern, excludePattern); } else if (includeValues != null || excludeValues != null) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index 2f94c38d464..e6659358b64 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.List; @@ -55,7 +55,7 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA } else if (path.size() == 1 && "value".equals(path.get(0))) { return value(); } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } @@ -86,7 +86,7 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA } else if (path.size() == 1) { return value(path.get(0)); } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index a59abe1ad2b..972ab7eacd5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.geobounds; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -123,7 +123,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo case "right": return boundingBox.bottomRight.lon(); default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + bBoxSide + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + bBoxSide + "] in [" + getName() + "]"); } } else if (path.size() == 2) { BoundingBox boundingBox = resolveBoundingBox(); @@ -137,7 +137,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo cornerPoint = boundingBox.bottomRight; break; default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + cornerString + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + cornerString + "] in [" + getName() + "]"); } String latLonString = path.get(1); switch (latLonString) { @@ -146,10 +146,10 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo case "lon": return cornerPoint.lon(); default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + latLonString + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + latLonString + "] in [" + getName() + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index b4916cdd8c9..31bc8d8ac27 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -80,7 +80,7 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega if (in.getVersion().before(Version.V_1_2_0)) { final byte id = in.readByte(); if (id != 0) { - throw new ElasticsearchIllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]"); + throw new IllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]"); } } keys = new double[in.readInt()]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index 877b632dbdb..d1508174a64 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.scripted; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -122,7 +122,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement } else if (path.size() == 1 && "value".equals(path.get(0))) { return aggregation; } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java index 8f431578fef..d0af5fa671b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -135,7 +135,7 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd); case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd); default: - throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 75dc354f874..b5863d8e352 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -154,7 +154,7 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue if (valuesSource == null) { return Double.NaN; } return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) * this.sigma); default: - throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index b33f8bb092e..9bc2b42f2a5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -22,7 +22,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -132,7 +132,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi if (path.isEmpty()) { return this; } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index 6dae0975d91..db4b9590036 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -158,7 +158,7 @@ public class AggregationPath { public AggregationPath(List tokens) { this.pathElements = tokens; if (tokens == null || tokens.size() == 0) { - throw new ElasticsearchIllegalArgumentException("Invalid path [" + this + "]"); + throw new IllegalArgumentException("Invalid path [" + this + "]"); } } @@ -206,13 +206,13 @@ public class AggregationPath { Aggregation agg = parent.getAggregations().get(token.name); if (agg == null) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Cannot find aggregation named [" + token.name + "]"); } if (agg instanceof SingleBucketAggregation) { if (token.key != null && !token.key.equals("doc_count")) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Unknown value key [" + token.key + "] for single-bucket aggregation [" + token.name + "]. Either use [doc_count] as key or drop the key all together"); } @@ -223,13 +223,13 @@ public class AggregationPath { // the agg can only be a metrics agg, and a metrics agg must be at the end of the path if (i != pathElements.size() - 1) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Metrics aggregations cannot have sub-aggregations (at [" + token + ">" + pathElements.get(i + 1) + "]"); } if (agg instanceof InternalNumericMetricsAggregation.SingleValue) { if (token.key != null && !token.key.equals("value")) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Unknown value key [" + token.key + "] for single-value metric aggregation [" + token.name + "]. Either use [value] as key or drop the key all together"); } @@ -240,7 +240,7 @@ public class AggregationPath { // we're left with a multi-value metric agg if (token.key == null) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Missing value key in [" + token + "] which refers to a multi-value metric aggregation"); } parent = null; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java index 55176b7c7fc..7f42d0b684e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.format; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +39,7 @@ public class ValueFormatterStreams { case ValueFormatter.Number.Pattern.ID: formatter = new ValueFormatter.Number.Pattern(); break; case ValueFormatter.GeoHash.ID: formatter = ValueFormatter.GEOHASH; break; case ValueFormatter.BooleanFormatter.ID: formatter = ValueFormatter.BOOLEAN; break; - default: throw new ElasticsearchIllegalArgumentException("Unknown value formatter with id [" + id + "]"); + default: throw new IllegalArgumentException("Unknown value formatter with id [" + id + "]"); } formatter.readFrom(in); return formatter; diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 61132aa28ab..b77f86432ca 100644 --- a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -24,7 +24,7 @@ import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; @@ -316,7 +316,7 @@ public class SearchSourceBuilder implements ToXContent { */ public SearchSourceBuilder terminateAfter(int terminateAfter) { if (terminateAfter <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } this.terminateAfter = terminateAfter; return this; diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5e44f4c4914..b02ce1584a1 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -28,8 +28,8 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -147,7 +147,7 @@ public class FetchPhase implements SearchPhase { if (x == null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. if (context.smartNameObjectMapper(fieldName) != null) { - throw new ElasticsearchIllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); + throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); } } else if (x.mapper().fieldType().stored()) { if (fieldNames == null) { @@ -313,7 +313,7 @@ public class FetchPhase implements SearchPhase { // nested field has an object value in the _source. This just means the nested field has just one inner object, which is valid, but uncommon. nestedParsedSource = ImmutableList.of((Map < String, Object >) extractedValue); } else { - throw new ElasticsearchIllegalStateException("extracted source isn't an object or an array"); + throw new IllegalStateException("extracted source isn't an object or an array"); } sourceAsMap = nestedParsedSource.get(nested.getOffset()); nested = nested.getChild(); diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java index c35398152dc..7951e52bc18 100644 --- a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.fetch.fielddata; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; @@ -47,7 +47,7 @@ public class FieldDataFieldsParseElement implements SearchParseElement { String fieldName = parser.text(); context.fieldDataFields().add(new FieldDataFieldsContext.FieldDataField(fieldName)); } else { - throw new ElasticsearchIllegalStateException("Expected either a VALUE_STRING or an START_ARRAY but got " + token); + throw new IllegalStateException("Expected either a VALUE_STRING or an START_ARRAY but got " + token); } } } diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java index f282439b733..931a00df7d5 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.fetch.innerhits; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -74,12 +74,12 @@ public class InnerHitsParseElement implements SearchParseElement { Map innerHitsMap = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " in [inner_hits]: inner_hit definitions must start with the name of the inner_hit."); + throw new IllegalArgumentException("Unexpected token " + token + " in [inner_hits]: inner_hit definitions must start with the name of the inner_hit."); } final String innerHitName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } InnerHitsContext.BaseInnerHits innerHits = parseInnerHit(parser, parseContext, searchContext, innerHitName); if (innerHitsMap == null) { @@ -93,12 +93,12 @@ public class InnerHitsParseElement implements SearchParseElement { private InnerHitsContext.BaseInnerHits parseInnerHit(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String innerHitName) throws Exception { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); + throw new IllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); } String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } String nestedPath = null; @@ -111,16 +111,16 @@ public class InnerHitsParseElement implements SearchParseElement { type = parser.currentName(); break; default: - throw new ElasticsearchIllegalArgumentException("Either path or type object must be defined"); + throw new IllegalArgumentException("Either path or type object must be defined"); } token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); + throw new IllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); } fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } final InnerHitsContext.BaseInnerHits innerHits; @@ -129,17 +129,17 @@ public class InnerHitsParseElement implements SearchParseElement { } else if (type != null) { innerHits = parseParentChild(parser, parseContext, searchContext, fieldName); } else { - throw new ElasticsearchIllegalArgumentException("Either [path] or [type] must be defined"); + throw new IllegalArgumentException("Either [path] or [type] must be defined"); } // Completely consume all json objects: token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); + throw new IllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); } token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); + throw new IllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); } return innerHits; @@ -149,7 +149,7 @@ public class InnerHitsParseElement implements SearchParseElement { ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); DocumentMapper documentMapper = searchContext.mapperService().documentMapper(type); if (documentMapper == null) { - throw new ElasticsearchIllegalArgumentException("type [" + type + "] doesn't exist"); + throw new IllegalArgumentException("type [" + type + "] doesn't exist"); } return new InnerHitsContext.ParentChildInnerHits(parseResult.context(), parseResult.query(), parseResult.childInnerHits(), documentMapper); } @@ -157,11 +157,11 @@ public class InnerHitsParseElement implements SearchParseElement { private InnerHitsContext.NestedInnerHits parseNested(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String nestedPath) throws Exception { MapperService.SmartNameObjectMapper smartNameObjectMapper = searchContext.smartNameObjectMapper(nestedPath); if (smartNameObjectMapper == null || !smartNameObjectMapper.hasMapper()) { - throw new ElasticsearchIllegalArgumentException("path [" + nestedPath +"] doesn't exist"); + throw new IllegalArgumentException("path [" + nestedPath +"] doesn't exist"); } ObjectMapper childObjectMapper = smartNameObjectMapper.mapper(); if (!childObjectMapper.nested().isNested()) { - throw new ElasticsearchIllegalArgumentException("path [" + nestedPath +"] isn't nested"); + throw new IllegalArgumentException("path [" + nestedPath +"] isn't nested"); } ObjectMapper parentObjectMapper = parseContext.nestedScope().nextLevel(childObjectMapper); ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java index fcfe3a99946..b43cf006a60 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.fetch.script; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.search.SearchHitField; @@ -77,7 +77,7 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase { try { leafScript = scriptField.script().getLeafSearchScript(hitContext.readerContext()); } catch (IOException e1) { - throw new ElasticsearchIllegalStateException("Failed to load script", e1); + throw new IllegalStateException("Failed to load script", e1); } leafScript.setDocument(hitContext.docId()); diff --git a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java index 6d34c7a58ee..5a6a934d184 100644 --- a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java @@ -22,7 +22,7 @@ import com.google.common.collect.Maps; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.vectorhighlight.*; import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; @@ -65,7 +65,7 @@ public class FastVectorHighlighter implements Highlighter { FieldMapper mapper = highlighterContext.mapper; if (!(mapper.fieldType().storeTermVectors() && mapper.fieldType().storeTermVectorOffsets() && mapper.fieldType().storeTermVectorPositions())) { - throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter"); + throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter"); } Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index 042146addca..a80357a18f4 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -23,14 +23,13 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -89,7 +88,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { if (context.highlight().forceSource(field)) { SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).sourceMapper(); if (!sourceFieldMapper.enabled()) { - throw new ElasticsearchIllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source"); + throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source"); } } @@ -113,7 +112,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { Highlighter highlighter = highlighters.get(highlighterType); if (highlighter == null) { - throw new ElasticsearchIllegalArgumentException("unknown highlighter type [" + highlighterType + "] for the field [" + fieldName + "]"); + throw new IllegalArgumentException("unknown highlighter type [" + highlighterType + "] for the field [" + fieldName + "]"); } HighlighterContext.HighlightQuery highlightQuery; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java index 24adef3bba5..d05c35988e8 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java @@ -22,7 +22,7 @@ package org.elasticsearch.search.highlight; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.IndexQueryParserService; @@ -70,7 +70,7 @@ public class HighlighterParseElement implements SearchParseElement { public void parse(XContentParser parser, SearchContext context) throws Exception { try { context.highlight(parse(parser, context.queryParserService())); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { throw new SearchParseException(context, "Error while trying to parse Highlighter element in request"); } } @@ -110,7 +110,7 @@ public class HighlighterParseElement implements SearchParseElement { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { if (highlightFieldName != null) { - throw new ElasticsearchIllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); + throw new IllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); } highlightFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { @@ -118,7 +118,7 @@ public class HighlighterParseElement implements SearchParseElement { } } } else { - throw new ElasticsearchIllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); + throw new IllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); } } } @@ -181,7 +181,7 @@ public class HighlighterParseElement implements SearchParseElement { final SearchContextHighlight.FieldOptions globalOptions = globalOptionsBuilder.build(); if (globalOptions.preTags() != null && globalOptions.postTags() == null) { - throw new ElasticsearchIllegalArgumentException("Highlighter global preTags are set, but global postTags are not set"); + throw new IllegalArgumentException("Highlighter global preTags are set, but global postTags are not set"); } final List fields = Lists.newArrayList(); diff --git a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index e246fd214b9..d1de1f5de8a 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -26,7 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.*; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +83,7 @@ public class PlainHighlighter implements Highlighter { } else if ("span".equals(field.fieldOptions().fragmenter())) { fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize()); } else { - throw new ElasticsearchIllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]"); + throw new IllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]"); } Formatter formatter = new SimpleHTMLFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0]); diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index 5da34fe5d5e..8ad5a060a73 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -41,7 +41,7 @@ import org.apache.lucene.search.postingshighlight.WholeBreakIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.UnicodeUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.text.StringText; @@ -75,7 +75,7 @@ public class PostingsHighlighter implements Highlighter { FieldMapper fieldMapper = highlighterContext.mapper; SearchContextHighlight.Field field = highlighterContext.field; if (fieldMapper.fieldType().indexOptions() != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) { - throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter"); + throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter"); } SearchContext context = highlighterContext.context; diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index bbdeece6ff0..4c7a1906723 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.lookup; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -76,7 +76,7 @@ public class LeafDocLookup implements Map { if (scriptValues == null) { FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + ""); + throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + ""); } scriptValues = fieldDataService.getForField(mapper).load(reader).getScriptValues(); localCacheFieldData.put(fieldName, scriptValues); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index af246b8ba15..ccc81a1b684 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -22,7 +22,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; @@ -139,7 +139,7 @@ public class LeafFieldsLookup implements Map { if (data == null) { FieldMapper mapper = mapperService.smartNameFieldMapper(name, types); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types) + ""); + throw new IllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types) + ""); } data = new FieldLookup(mapper); cachedFieldData.put(name, data); diff --git a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java index f4c531e2f27..1d39b432008 100644 --- a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; @@ -35,7 +35,7 @@ public class TerminateAfterParseElement implements SearchParseElement { if (token == XContentParser.Token.VALUE_NUMBER) { int terminateAfterCount = parser.intValue(); if (terminateAfterCount <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } context.terminateAfter(parser.intValue()); } diff --git a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index e26a6ebc58b..9f63a59cd6b 100644 --- a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -24,8 +24,8 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.ParsedQuery; @@ -211,10 +211,10 @@ public final class QueryRescorer implements Rescorer { } else if ("multiply".equals(sScoreMode)) { rescoreContext.setScoreMode(ScoreMode.Multiply); } else { - throw new ElasticsearchIllegalArgumentException("[rescore] illegal score_mode [" + sScoreMode + "]"); + throw new IllegalArgumentException("[rescore] illegal score_mode [" + sScoreMode + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("rescore doesn't support [" + fieldName + "]"); } } } @@ -314,7 +314,7 @@ public final class QueryRescorer implements Rescorer { try { context.searcher().createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Failed to extract terms", e); + throw new IllegalStateException("Failed to extract terms", e); } } diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java index f87165e584b..ecabfe6856f 100644 --- a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java +++ b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.rescore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; @@ -62,12 +62,12 @@ public class RescoreParseElement implements SearchParseElement { if ("window_size".equals(fieldName)) { windowSize = parser.intValue(); } else { - throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("rescore doesn't support [" + fieldName + "]"); } } } if (rescoreContext == null) { - throw new ElasticsearchIllegalArgumentException("missing rescore type"); + throw new IllegalArgumentException("missing rescore type"); } if (windowSize != null) { rescoreContext.setWindowSize(windowSize.intValue()); diff --git a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 1e1445ec480..fa8fb679097 100644 --- a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; @@ -53,7 +53,7 @@ public class FieldSortBuilder extends SortBuilder { */ public FieldSortBuilder(String fieldName) { if (fieldName == null) { - throw new ElasticsearchIllegalArgumentException("fieldName must not be null"); + throw new IllegalArgumentException("fieldName must not be null"); } this.fieldName = fieldName; } diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 7f3aaf63f49..e78d90be38f 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -27,7 +27,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; @@ -136,12 +136,12 @@ public class GeoDistanceSortParser implements SortParser { } if (sortMode == MultiValueMode.SUM) { - throw new ElasticsearchIllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); + throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } FieldMapper mapper = context.smartNameFieldMapper(fieldName); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); + throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(mapper); diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index 17eebafae09..bb7656a9ebc 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lucene.search.Queries; @@ -89,7 +89,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.VALUE_STRING) { addSortField(context, sortFields, parser.text(), false, null, null, null, null); } else { - throw new ElasticsearchIllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); + throw new IllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); } } } else if (token == XContentParser.Token.VALUE_STRING) { @@ -97,7 +97,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.START_OBJECT) { addCompoundSortField(parser, context, sortFields); } else { - throw new ElasticsearchIllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); + throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); } if (!sortFields.isEmpty()) { // optimize if we just sort on score non reversed, we don't really need sorting @@ -137,7 +137,7 @@ public class SortParseElement implements SearchParseElement { } else if (direction.equals("desc")) { reverse = !SCORE_FIELD_NAME.equals(fieldName); } else { - throw new ElasticsearchIllegalArgumentException("sort direction [" + fieldName + "] not supported"); + throw new IllegalArgumentException("sort direction [" + fieldName + "] not supported"); } addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); } else { @@ -174,7 +174,7 @@ public class SortParseElement implements SearchParseElement { } nestedFilterParseHelper.setPath(parser.text()); } else { - throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported"); + throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); } } else if (token == XContentParser.Token.START_OBJECT) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { @@ -183,7 +183,7 @@ public class SortParseElement implements SearchParseElement { } nestedFilterParseHelper.filter(); } else { - throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported"); + throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); } } } diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/src/main/java/org/elasticsearch/search/suggest/Suggest.java index 84b324c92c5..f4057cacc0c 100644 --- a/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -20,8 +20,8 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.Version; +import java.lang.IllegalStateException; + import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -253,7 +253,7 @@ public class Suggest implements Iterable suggestion : toReduce) { if(suggestion.entries.size() != size) { - throw new ElasticsearchIllegalStateException("Can't merge suggest result, this might be caused by suggest calls " + + throw new IllegalStateException("Can't merge suggest result, this might be caused by suggest calls " + "across multiple indices with different analysis chains. Suggest entries have different sizes actual [" + suggestion.entries.size() + "] expected [" + size +"]"); } @@ -375,7 +375,7 @@ public class Suggest implements Iterable leader = toReduce.get(0); for (Entry entry : toReduce) { if (!leader.text.equals(entry.text)) { - throw new ElasticsearchIllegalStateException("Can't merge suggest entries, this might be caused by suggest calls " + + throw new IllegalStateException("Can't merge suggest entries, this might be caused by suggest calls " + "across multiple indices with different analysis chains. Suggest entries have different text actual [" + entry.text + "] expected [" + leader.text +"]"); } diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index fa0cfbef139..652d7e3a7ed 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; @@ -32,8 +32,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.suggest.context.ContextMapping.ContextQuery; import org.elasticsearch.search.suggest.context.CategoryContextMapping; import org.elasticsearch.search.suggest.context.GeolocationContextMapping; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; /** * Defines how to perform suggesting. This builders allows a number of global options to be specified and @@ -288,7 +286,7 @@ public class SuggestBuilder implements ToXContent { @SuppressWarnings("unchecked") public T size(int size) { if (size <= 0) { - throw new ElasticsearchIllegalArgumentException("Size must be positive"); + throw new IllegalArgumentException("Size must be positive"); } this.size = size; return (T)this; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index 728eabb065c..2a25215eb9a 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -63,7 +63,7 @@ public final class SuggestParseElement implements SearchParseElement { if ("text".equals(fieldName)) { globalText = parser.utf8Bytes(); } else { - throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]"); + throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { String suggestionName = fieldName; @@ -77,14 +77,14 @@ public final class SuggestParseElement implements SearchParseElement { if ("text".equals(fieldName)) { suggestText = parser.utf8Bytes(); } else { - throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]"); + throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { if (suggestionName == null) { - throw new ElasticsearchIllegalArgumentException("Suggestion must have name"); + throw new IllegalArgumentException("Suggestion must have name"); } if (suggesters.get(fieldName) == null) { - throw new ElasticsearchIllegalArgumentException("Suggester[" + fieldName + "] not supported"); + throw new IllegalArgumentException("Suggester[" + fieldName + "] not supported"); } final SuggestContextParser contextParser = suggesters.get(fieldName).getContextParser(); suggestionContext = contextParser.parse(parser, mapperService); diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java index 1e355ef3fcc..c28201bfd92 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java @@ -29,7 +29,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.FastCharArrayReader; import org.elasticsearch.common.xcontent.XContentParser; @@ -65,7 +65,7 @@ public final class SuggestUtils { comparator = LUCENE_FREQUENCY; break; default: - throw new ElasticsearchIllegalArgumentException("Illegal suggest sort: " + suggestion.sort()); + throw new IllegalArgumentException("Illegal suggest sort: " + suggestion.sort()); } directSpellChecker.setComparator(comparator); directSpellChecker.setDistance(suggestion.stringDistance()); @@ -144,7 +144,7 @@ public final class SuggestUtils { } else if ("always".equals(suggestMode)) { return SuggestMode.SUGGEST_ALWAYS; } else { - throw new ElasticsearchIllegalArgumentException("Illegal suggest mode " + suggestMode); + throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); } } @@ -154,7 +154,7 @@ public final class SuggestUtils { } else if ("frequency".equals(sortVal)) { return Suggest.Suggestion.Sort.FREQUENCY; } else { - throw new ElasticsearchIllegalArgumentException("Illegal suggest sort " + sortVal); + throw new IllegalArgumentException("Illegal suggest sort " + sortVal); } } @@ -171,7 +171,7 @@ public final class SuggestUtils { } else if ("ngram".equals(distanceVal)) { return new NGramDistance(); } else { - throw new ElasticsearchIllegalArgumentException("Illegal distance option " + distanceVal); + throw new IllegalArgumentException("Illegal distance option " + distanceVal); } } @@ -203,7 +203,7 @@ public final class SuggestUtils { } else if (Fields.MAX_EDITS.match(fieldName)) { suggestion.maxEdits(parser.intValue()); if (suggestion.maxEdits() < 1 || suggestion.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { - throw new ElasticsearchIllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits()); + throw new IllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits()); } } else if (Fields.MAX_INSPECTIONS.match(fieldName)) { suggestion.maxInspections(parser.intValue()); @@ -228,7 +228,7 @@ public final class SuggestUtils { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } suggestion.setAnalyzer(analyzer); } else if ("field".equals(fieldName)) { @@ -248,11 +248,11 @@ public final class SuggestUtils { public static void verifySuggestion(MapperService mapperService, BytesRef globalText, SuggestionContext suggestion) { // Verify options and set defaults if (suggestion.getField() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } if (suggestion.getText() == null) { if (globalText == null) { - throw new ElasticsearchIllegalArgumentException("The required text option is missing"); + throw new IllegalArgumentException("The required text option is missing"); } suggestion.setText(globalText); } diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index bf03527f698..59aecba1e06 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import java.util.LinkedHashMap; import java.util.Map; @@ -87,7 +87,7 @@ public class SuggestionSearchContext { public void setSize(int size) { if (size <= 0) { - throw new ElasticsearchIllegalArgumentException("Size must be positive but was: " + size); + throw new IllegalArgumentException("Size must be positive but was: " + size); } this.size = size; } @@ -98,7 +98,7 @@ public class SuggestionSearchContext { public void setShardSize(int shardSize) { if (shardSize <= 0) { - throw new ElasticsearchIllegalArgumentException("ShardSize must be positive but was: " + shardSize); + throw new IllegalArgumentException("ShardSize must be positive but was: " + shardSize); } this.shardSize = shardSize; } diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java index 4606d824f72..88be9d2a22f 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java @@ -44,7 +44,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; @@ -176,7 +176,7 @@ public class Completion090PostingsFormat extends PostingsFormat { String providerName = input.readString(); CompletionLookupProvider completionLookupProvider = providers.get(providerName); if (completionLookupProvider == null) { - throw new ElasticsearchIllegalStateException("no provider with name [" + providerName + "] registered"); + throw new IllegalStateException("no provider with name [" + providerName + "] registered"); } // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage? delegateProducer = delegatePostingsFormat.fieldsProducer(state); diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java index aad44e31da5..bbf65de6a45 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.suggest.completion; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; @@ -93,10 +93,10 @@ public class CompletionSuggestParser implements SuggestContextParser { BytesReference bytes = builder.bytes(); contextParser = parser.contentType().xContent().createParser(bytes); } else { - throw new ElasticsearchIllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); } } @@ -106,14 +106,14 @@ public class CompletionSuggestParser implements SuggestContextParser { if (mapper != null) { if (mapper.requiresContext()) { if (contextParser == null) { - throw new ElasticsearchIllegalArgumentException("suggester [completion] requires context to be setup"); + throw new IllegalArgumentException("suggester [completion] requires context to be setup"); } else { contextParser.nextToken(); List contextQueries = ContextQuery.parseQueries(mapper.getContextMapping(), contextParser); suggestion.setContextQuery(contextQueries); } } else if (contextParser != null) { - throw new ElasticsearchIllegalArgumentException("suggester [completion] doesn't expect any context"); + throw new IllegalArgumentException("suggester [completion] doesn't expect any context"); } } return suggestion; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 03ce464b785..9f615eb9f6d 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.spell.SuggestWord; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.search.suggest.SuggestUtils; import java.io.IOException; @@ -58,7 +58,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates, Analyzer preFilter, Analyzer postFilter, Terms terms) throws IOException { if (terms == null) { - throw new ElasticsearchIllegalArgumentException("generator field [" + field + "] doesn't exist"); + throw new IllegalArgumentException("generator field [" + field + "] doesn't exist"); } this.spellchecker = spellchecker; this.field = field; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 5fe964017d9..18e974336b5 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -22,7 +22,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -63,24 +63,24 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("real_word_error_likelihood".equals(fieldName) || "realWorldErrorLikelihood".equals(fieldName)) { suggestion.setRealWordErrorLikelihood(parser.floatValue()); if (suggestion.realworldErrorLikelyhood() <= 0.0) { - throw new ElasticsearchIllegalArgumentException("real_word_error_likelihood must be > 0.0"); + throw new IllegalArgumentException("real_word_error_likelihood must be > 0.0"); } } else if ("confidence".equals(fieldName)) { suggestion.setConfidence(parser.floatValue()); if (suggestion.confidence() < 0.0) { - throw new ElasticsearchIllegalArgumentException("confidence must be >= 0.0"); + throw new IllegalArgumentException("confidence must be >= 0.0"); } } else if ("separator".equals(fieldName)) { suggestion.setSeparator(new BytesRef(parser.text())); } else if ("max_errors".equals(fieldName) || "maxErrors".equals(fieldName)) { suggestion.setMaxErrors(parser.floatValue()); if (suggestion.maxErrors() <= 0.0) { - throw new ElasticsearchIllegalArgumentException("max_error must be > 0.0"); + throw new IllegalArgumentException("max_error must be > 0.0"); } } else if ("gram_size".equals(fieldName) || "gramSize".equals(fieldName)) { suggestion.setGramSize(parser.intValue()); if (suggestion.gramSize() < 1) { - throw new ElasticsearchIllegalArgumentException("gram_size must be >= 1"); + throw new IllegalArgumentException("gram_size must be >= 1"); } gramSizeSet = true; } else if ("force_unigrams".equals(fieldName) || "forceUnigrams".equals(fieldName)) { @@ -88,11 +88,11 @@ public final class PhraseSuggestParser implements SuggestContextParser { } else if ("token_limit".equals(fieldName) || "tokenLimit".equals(fieldName)) { int tokenLimit = parser.intValue(); if (tokenLimit <= 0) { - throw new ElasticsearchIllegalArgumentException("token_limit must be >= 1"); + throw new IllegalArgumentException("token_limit must be >= 1"); } suggestion.setTokenLimit(tokenLimit); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } } else if (token == Token.START_ARRAY) { @@ -112,7 +112,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.addGenerator(generator); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); } } else if (token == Token.START_OBJECT) { if ("smoothing".equals(fieldName)) { @@ -127,7 +127,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { } else if ("post_tag".equals(fieldName) || "postTag".equals(fieldName)) { suggestion.setPostTag(parser.utf8Bytes()); } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][highlight] doesn't support field [" + fieldName + "]"); } } @@ -146,13 +146,13 @@ public final class PhraseSuggestParser implements SuggestContextParser { templateNameOrTemplateContent = parser.text(); } if (templateNameOrTemplateContent == null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] no query/filter found in collate object"); + throw new IllegalArgumentException("suggester[phrase][collate] no query/filter found in collate object"); } if (suggestion.getCollateFilterScript() != null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] filter already set, doesn't support additional [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase][collate] filter already set, doesn't support additional [" + fieldName + "]"); } if (suggestion.getCollateQueryScript() != null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } CompiledScript compiledScript = suggester.scriptService().compile(new Script(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptType.INLINE, null), ScriptContext.Standard.SEARCH); if ("query".equals(fieldName)) { @@ -168,28 +168,28 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (parser.isBooleanValue()) { suggestion.setCollatePrune(parser.booleanValue()); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] prune must be either 'true' or 'false'"); + throw new IllegalArgumentException("suggester[phrase][collate] prune must be either 'true' or 'false'"); } } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][collate] doesn't support field [" + fieldName + "]"); } } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } if (suggestion.getField() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } FieldMapper fieldMapper = mapperService.smartNameFieldMapper(suggestion.getField()); if (fieldMapper == null) { - throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]"); + throw new IllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]"); } else if (suggestion.getAnalyzer() == null) { // no analyzer name passed in, so try the field's analyzer, or the default analyzer if (fieldMapper.searchAnalyzer() == null) { @@ -210,13 +210,13 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (shingleFilterFactory != null) { suggestion.setGramSize(shingleFilterFactory.getMaxShingleSize()); if (suggestion.getAnalyzer() == null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams()) { - throw new ElasticsearchIllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); + throw new IllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); } } } if (suggestion.generators().isEmpty()) { if (shingleFilterFactory != null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams() && suggestion.getRequireUnigram()) { - throw new ElasticsearchIllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly"); + throw new IllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly"); } // use a default generator on the same field DirectCandidateGenerator generator = new DirectCandidateGenerator(); @@ -246,20 +246,20 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("trigram_lambda".equals(fieldName) || "trigramLambda".equals(fieldName)) { lambdas[0] = parser.doubleValue(); if (lambdas[0] < 0) { - throw new ElasticsearchIllegalArgumentException("trigram_lambda must be positive"); + throw new IllegalArgumentException("trigram_lambda must be positive"); } } else if ("bigram_lambda".equals(fieldName) || "bigramLambda".equals(fieldName)) { lambdas[1] = parser.doubleValue(); if (lambdas[1] < 0) { - throw new ElasticsearchIllegalArgumentException("bigram_lambda must be positive"); + throw new IllegalArgumentException("bigram_lambda must be positive"); } } else if ("unigram_lambda".equals(fieldName) || "unigramLambda".equals(fieldName)) { lambdas[2] = parser.doubleValue(); if (lambdas[2] < 0) { - throw new ElasticsearchIllegalArgumentException("unigram_lambda must be positive"); + throw new IllegalArgumentException("unigram_lambda must be positive"); } } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]"); } } @@ -269,7 +269,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { sum += lambdas[i]; } if (Math.abs(sum - 1.0) > 0.001) { - throw new ElasticsearchIllegalArgumentException("linear smoothing lambdas must sum to 1"); + throw new IllegalArgumentException("linear smoothing lambdas must sum to 1"); } suggestion.setModel(new WordScorer.WordScorerFactory() { @Override @@ -321,7 +321,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { }); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); } } } @@ -329,14 +329,14 @@ public final class PhraseSuggestParser implements SuggestContextParser { private void ensureNoSmoothing(PhraseSuggestionContext suggestion) { if (suggestion.model() != null) { - throw new ElasticsearchIllegalArgumentException("only one smoothing model supported"); + throw new IllegalArgumentException("only one smoothing model supported"); } } private void verifyGenerator(PhraseSuggestionContext.DirectCandidateGenerator suggestion) { // Verify options and set defaults if (suggestion.field() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } } @@ -346,7 +346,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("field".equals(fieldName)) { generator.setField(parser.text()); if (mapperService.smartNameFieldMapper(generator.field()) == null) { - throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + generator.field() + "]"); + throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]"); } } else if ("size".equals(fieldName)) { generator.size(parser.intValue()); @@ -354,18 +354,18 @@ public final class PhraseSuggestParser implements SuggestContextParser { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } generator.preFilter(analyzer); } else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } generator.postFilter(analyzer); } else { - throw new ElasticsearchIllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index afa5595688a..a66c715f3bc 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; @@ -59,7 +59,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder= 1"); + throw new IllegalArgumentException("gramSize must be >= 1"); } this.gramSize = gramSize; return this; @@ -164,7 +164,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder extends BaseFutur return get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { throw (ElasticsearchException) e.getCause(); @@ -62,7 +62,7 @@ public class PlainTransportFuture extends BaseFutur } catch (TimeoutException e) { throw new ElasticsearchTimeoutException(e.getMessage()); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { throw (ElasticsearchException) e.getCause(); diff --git a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 24cea195224..10be853ccc7 100644 --- a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -19,7 +19,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import java.lang.reflect.Constructor; @@ -40,7 +40,7 @@ public class RequestHandlerRegistry { try { this.requestConstructor = request.getDeclaredConstructor(); } catch (NoSuchMethodException e) { - throw new ElasticsearchIllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); + throw new IllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); } this.requestConstructor.setAccessible(true); assert newRequest() != null; @@ -57,7 +57,7 @@ public class RequestHandlerRegistry { try { return requestConstructor.newInstance(); } catch (Exception e) { - throw new ElasticsearchIllegalStateException("failed to instantiate request ", e); + throw new IllegalStateException("failed to instantiate request ", e); } } diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java index 5452b020be0..f8a1e1fb544 100644 --- a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java +++ b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java @@ -19,7 +19,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.unit.TimeValue; /** @@ -52,7 +52,7 @@ public class TransportRequestOptions { } else if ("ping".equalsIgnoreCase(type)) { return PING; } else { - throw new ElasticsearchIllegalArgumentException("failed to match transport type for [" + type + "]"); + throw new IllegalArgumentException("failed to match transport type for [" + type + "]"); } } } diff --git a/src/main/java/org/elasticsearch/transport/TransportService.java b/src/main/java/org/elasticsearch/transport/TransportService.java index ad17baa908d..bc031d71d9a 100644 --- a/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/src/main/java/org/elasticsearch/transport/TransportService.java @@ -21,7 +21,7 @@ package org.elasticsearch.transport; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; @@ -275,7 +275,7 @@ public class TransportService extends AbstractLifecycleComponent void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { if (node == null) { - throw new ElasticsearchIllegalStateException("can't send request to a null node"); + throw new IllegalStateException("can't send request to a null node"); } final long requestId = newRequestId(); final TimeoutHandler timeoutHandler; diff --git a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 0201829fe08..0a8e7187385 100644 --- a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.transport.netty; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; @@ -101,7 +101,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { sb.append(buffer.getByte(offset + i)).append(","); } sb.append("]"); - throw new ElasticsearchIllegalStateException(sb.toString()); + throw new IllegalStateException(sb.toString()); } wrappedStream = compressor.streamInput(streamIn); } else { diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 1d9914e4b81..f098f7d8556 100644 --- a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -184,13 +184,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem // we want to have at least 1 for reg/state/ping if (this.connectionsPerNodeReg == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.reg] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.reg] to 0"); } if (this.connectionsPerNodePing == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.ping] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.ping] to 0"); } if (this.connectionsPerNodeState == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.state] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.state] to 0"); } long defaultReceiverPredictor = 512 * 1024; @@ -736,7 +736,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem public void connectToNode(DiscoveryNode node, boolean light) { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport"); + throw new IllegalStateException("can't add nodes to a stopped transport"); } if (node == null) { throw new ConnectTransportException(null, "can't connect to a null node"); @@ -746,7 +746,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem connectionLock.acquire(node.id()); try { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport"); + throw new IllegalStateException("can't add nodes to a stopped transport"); } NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null) { @@ -1107,7 +1107,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } else if (type == TransportRequestOptions.Type.RECOVERY) { return recovery[MathUtils.mod(recoveryCounter.incrementAndGet(), recovery.length)]; } else { - throw new ElasticsearchIllegalArgumentException("no type channel for [" + type + "]"); + throw new IllegalArgumentException("no type channel for [" + type + "]"); } } diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index 0e59fdae14c..a1f99108883 100644 --- a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -19,7 +19,7 @@ package org.elasticsearch.watcher; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -135,7 +135,7 @@ public class ResourceWatcherService extends AbstractLifecycleComponent allocating to non-data node, should fail"); try { rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node4", true))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocating with primary flag set to true"); @@ -158,7 +158,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocate the replica shard on on the second node"); @@ -183,7 +183,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node3", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } } @@ -225,7 +225,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> start the primary shard"); @@ -239,7 +239,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocate the replica shard on on the second node"); @@ -273,7 +273,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> start the replica shard"); diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index c298067b250..2b782f92d2c 100644 --- a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; @@ -44,7 +44,6 @@ import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchAllocationTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.Arrays; @@ -787,7 +786,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase { // throw an exception about not being able to complete strategy.reroute(clusterState, cmds).routingTable(); fail("should not have been able to reroute the shard"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat("can't allocated because there isn't enough room: " + e.getMessage(), e.getMessage().contains("less than required [30.0%] free disk on node, free: [26.0%]"), equalTo(true)); } diff --git a/src/test/java/org/elasticsearch/common/BooleansTests.java b/src/test/java/org/elasticsearch/common/BooleansTests.java index 24058ac5438..0869f3ba772 100644 --- a/src/test/java/org/elasticsearch/common/BooleansTests.java +++ b/src/test/java/org/elasticsearch/common/BooleansTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; @@ -78,7 +78,7 @@ public class BooleansTests extends ElasticsearchTestCase { Booleans.parseBooleanExact(randomFrom(null, "fred", "foo", "barney")); fail("Expected exception while parsing invalid boolean value "); } catch (Exception ex) { - assertTrue(ex instanceof ElasticsearchIllegalArgumentException); + assertTrue(ex instanceof IllegalArgumentException); } } diff --git a/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/src/test/java/org/elasticsearch/common/ParseFieldTests.java index ef4d2e093f6..8897e60e5d7 100644 --- a/src/test/java/org/elasticsearch/common/ParseFieldTests.java +++ b/src/test/java/org/elasticsearch/common/ParseFieldTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common; import org.apache.commons.lang3.ArrayUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -58,14 +58,14 @@ public class ParseFieldTests extends ElasticsearchTestCase { try { withDeprecations.match(randomFrom(deprecated), flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { withDeprecations.match("barFoo", flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } @@ -96,7 +96,7 @@ public class ParseFieldTests extends ElasticsearchTestCase { try { field.match(randomFrom(allValues), flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } } diff --git a/src/test/java/org/elasticsearch/common/PidFileTests.java b/src/test/java/org/elasticsearch/common/PidFileTests.java index 0c66b411c60..1593ffe512c 100644 --- a/src/test/java/org/elasticsearch/common/PidFileTests.java +++ b/src/test/java/org/elasticsearch/common/PidFileTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.common; import com.google.common.base.Charsets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -35,7 +35,7 @@ import java.nio.file.StandardOpenOption; */ public class PidFileTests extends ElasticsearchTestCase { - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testParentIsFile() throws IOException { Path dir = createTempDir(); Path parent = dir.resolve("foo"); diff --git a/src/test/java/org/elasticsearch/common/TableTests.java b/src/test/java/org/elasticsearch/common/TableTests.java index 919e1c4dd51..d9c608881e6 100644 --- a/src/test/java/org/elasticsearch/common/TableTests.java +++ b/src/test/java/org/elasticsearch/common/TableTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -28,37 +28,37 @@ import java.util.Map; public class TableTests extends ElasticsearchTestCase { - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnStartRowWithoutHeader() { Table table = new Table(); table.startRow(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnEndHeadersWithoutStart() { Table table = new Table(); table.endHeaders(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnAddCellWithoutHeader() { Table table = new Table(); table.addCell("error"); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnAddCellWithoutRow() { Table table = this.getTableWithHeaders(); table.addCell("error"); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnEndRowWithoutStart() { Table table = this.getTableWithHeaders(); table.endRow(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnLessCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); @@ -74,7 +74,7 @@ public class TableTests extends ElasticsearchTestCase { table.endRow(false); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnMoreCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java index 077970941f3..66127c0618b 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.collect; import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashMap; @@ -138,14 +138,14 @@ public class CopyOnWriteHashMapTests extends ElasticsearchTestCase { try { new CopyOnWriteHashMap<>().copyAndPut("a", null); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } try { new CopyOnWriteHashMap<>().copyAndPut(null, "b"); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } } diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java index 86e4611c398..8028781bda6 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.collect; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashSet; @@ -120,7 +120,7 @@ public class CopyOnWriteHashSetTests extends ElasticsearchTestCase { try { new CopyOnWriteHashSet<>().copyAndAdd(null); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } } diff --git a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java index 2c767d645bd..4da1582f090 100644 --- a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java +++ b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java @@ -27,7 +27,7 @@ import com.spatial4j.core.shape.ShapeCollection; import com.spatial4j.core.shape.jts.JtsGeometry; import com.spatial4j.core.shape.jts.JtsPoint; import com.vividsolutions.jts.geom.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -35,7 +35,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; -import org.junit.Test; import java.io.IOException; import java.util.ArrayList; @@ -539,7 +538,7 @@ public class GeoJSONShapeParserTests extends ElasticsearchTestCase { parser = JsonXContent.jsonXContent.createParser(invalidPoly); parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchIllegalArgumentException.class); + ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class); // test case 5: create an invalid polygon with 1 invalid LinearRing invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon") @@ -550,7 +549,7 @@ public class GeoJSONShapeParserTests extends ElasticsearchTestCase { parser = JsonXContent.jsonXContent.createParser(invalidPoly); parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchIllegalArgumentException.class); + ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class); // test case 6: create an invalid polygon with 0 LinearRings invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon") diff --git a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java index 524ae7367a6..20c327410f8 100644 --- a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java +++ b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.recycler.Recycler.V; import org.elasticsearch.test.ElasticsearchTestCase; @@ -121,7 +121,7 @@ public abstract class AbstractRecyclerTests extends ElasticsearchTestCase { v1.close(); try { v1.close(); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { // impl has protection against double release: ok return; } diff --git a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java index e52b9f8ff81..9bfc5af98f6 100644 --- a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java +++ b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -63,7 +63,7 @@ public class SizeValueTests extends ElasticsearchTestCase { assertThat(SizeValue.parseSizeValue("1G").toString(), is(new SizeValue(1, SizeUnit.GIGA).toString())); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testThatNegativeValuesThrowException() { new SizeValue(-1); } diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index b276f5787a9..08ffca233a2 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -19,7 +19,7 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -75,7 +75,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { try { shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()); fail("should ignore, because current state's master is not equal to new state's master"); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { assertThat(e.getMessage(), containsString("cluster state from a different master then the current one, rejecting")); } diff --git a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 154a043c996..31045dcb451 100644 --- a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.env; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; @@ -35,7 +35,6 @@ import org.junit.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -61,7 +60,7 @@ public class NodeEnvironmentTests extends ElasticsearchTestCase { try { new NodeEnvironment(settings, new Environment(settings)); fail("env is already locked"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { } env.close(); diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java index 491b281b9f9..ef845b08885 100644 --- a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.fieldstats; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStatsResponse; @@ -193,7 +193,7 @@ public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { try { client().prepareFieldStats().setFields("value").get(); fail(); - } catch (ElasticsearchIllegalStateException e){ + } catch (IllegalStateException e){ assertThat(e.getMessage(), containsString("trying to merge the field stats of field [value]")); } diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index 12b699a989b..1eb92f203da 100644 --- a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -21,7 +21,6 @@ package org.elasticsearch.gateway; import com.google.common.collect.Iterators; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -29,9 +28,8 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestRuleMarkFailure; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -44,10 +42,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Assert; import org.junit.Test; -import java.io.Closeable; import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; @@ -272,7 +268,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { try { format.loadLatestState(logger, dirs); fail("latest version can not be read"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), startsWith("Could not find a state file to recover from among ")); } // write the next state file in the new format and ensure it get's a higher ID diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java index 822ab88194c..94b83d94424 100644 --- a/src/test/java/org/elasticsearch/get/GetActionTests.java +++ b/src/test/java/org/elasticsearch/get/GetActionTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.get; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -785,7 +784,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { try { client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { //all well } @@ -794,7 +793,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { try { client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { //all well } } diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java index cd4ababaaf4..3db8c5039b7 100644 --- a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.FailedToResolveConfigException; @@ -57,7 +57,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Assert.fail("path and array are configured"); } catch (Exception e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } @@ -83,7 +83,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] "); } catch (Throwable e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } diff --git a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java index 669613b4ebe..1500377fc18 100644 --- a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java @@ -24,7 +24,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.*; import org.apache.lucene.analysis.reverse.ReverseStringFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings.Builder; @@ -58,7 +58,7 @@ public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase try { new NGramTokenizerFactory(index, indexSettings, name, settings).create(); fail(); - } catch (ElasticsearchIllegalArgumentException expected) { + } catch (IllegalArgumentException expected) { // OK } } @@ -153,7 +153,7 @@ public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase try { new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(); fail("should fail side:back is not supported anymore"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java index 09a3478d752..0aceb27bc02 100644 --- a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java @@ -19,12 +19,11 @@ package org.elasticsearch.index.analysis; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; @@ -68,7 +67,7 @@ public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTest } - @Test(expected=ElasticsearchIllegalArgumentException.class) + @Test(expected=IllegalArgumentException.class) public void testNoPatterns() { new PatternCaptureGroupTokenFilterFactory(new Index("test"), settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), "pattern_capture", settingsBuilder().put("pattern", "foobar").build()); } diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java index 9947a9bd36d..2e7e09f4ab9 100644 --- a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.analysis.commongrams; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalysisService; @@ -46,7 +46,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Assert.fail("[common_words] or [common_words_path] is set"); } catch (Exception e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } @Test diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java index 8d3e7280c26..b4f074498a3 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.LiveIndexWriterConfig; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -108,7 +108,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } @@ -118,7 +118,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } @@ -128,7 +128,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } } diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index 9036ed97736..eda3d9fd9c8 100644 --- a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.QueryWrapperFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LocaleUtils; @@ -110,7 +110,7 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { try { LocaleUtils.parse("de_DE_DE_DE"); fail(); - } catch(ElasticsearchIllegalArgumentException ex) { + } catch(IllegalArgumentException ex) { // expected } assertThat(Locale.ROOT, equalTo(LocaleUtils.parse(""))); diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java index 94af55f7f3d..f97bb8e7b80 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.mapper.ip; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; @@ -68,7 +68,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { IpFieldMapper.ipToLong("127.0.011.1111111"); fail("Expected ip address parsing to fail but did not happen"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("not a valid ip address")); } } @@ -78,7 +78,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { IpFieldMapper.ipToLong("2001:db8:0:8d3:0:8a2e:70:7344"); fail("Expected ip address parsing to fail but did not happen"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("not a valid ipv4 address")); } } @@ -100,14 +100,14 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } // Verify that the default is false try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } // Unless the global ignore_malformed option is set to true @@ -120,7 +120,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } diff --git a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java index a045f3bbb6e..41c98311b8e 100644 --- a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java +++ b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java @@ -20,10 +20,9 @@ package org.elasticsearch.index.merge.policy; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; -import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -94,19 +93,19 @@ public class MergePolicySettingsTest extends ElasticsearchTestCase { try { new LogDocMergePolicyProvider(createStore(build(-0.1)), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { new LogDocMergePolicyProvider(createStore(build(1.1)), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { new LogDocMergePolicyProvider(createStore(build("Falsch")), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 3b8a3512aeb..03ed89f4c79 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -64,7 +64,7 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -2411,8 +2411,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { try { functionScoreQuery().add(factorFunction(2.0f).setWeight(2.0f)); fail("Expect exception here because boost_factor must not have a weight"); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getDetailedMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE)); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE)); } query = jsonBuilder().startObject().startObject("function_score") .startArray("functions") diff --git a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java index 65727baed2b..ad5b2b1d389 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.search.child; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -57,9 +57,9 @@ public class ScoreTypeTests extends ElasticsearchTestCase { } /** - * Should throw {@link ElasticsearchIllegalArgumentException} instead of NPE. + * Should throw {@link IllegalArgumentException} instead of NPE. */ - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void nullFromString_throwsException() { ScoreType.fromString(null); } @@ -67,7 +67,7 @@ public class ScoreTypeTests extends ElasticsearchTestCase { /** * Failure should not change (and the value should never match anything...). */ - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void unrecognizedFromString_throwsException() { ScoreType.fromString("unrecognized value"); } diff --git a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index d109e558beb..42761ca7a04 100644 --- a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.ShardRouting; @@ -58,7 +58,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, "FOOBAR").build())); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } assertEquals(newValue, shard.isFlushOnClose()); @@ -147,7 +147,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { shard.deleteShardState(); fail("shard is active metadata delete must fail"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // fine - only delete if non-active } diff --git a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 39f48f8d637..eed8769475d 100644 --- a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ public class ShardPathTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailLoadShardPathOnMultiState() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { ImmutableSettings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF"); @@ -66,7 +66,7 @@ public class ShardPathTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { ImmutableSettings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "foobar"); diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index 265155f74be..c0dd7d1b834 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.indices; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; @@ -774,7 +774,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest assertAcked(client().admin().indices().prepareOpen("_all").get()); try { verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(ImmutableSettings.builder().put("e", "f")), false); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices[[barbaz]]")); } verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(ImmutableSettings.builder().put("a", "b")), true); diff --git a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java index 0488c02041e..67ca29e9461 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java +++ b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java @@ -19,7 +19,7 @@ package org.elasticsearch.indices; import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -74,7 +74,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } @@ -101,7 +101,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } @@ -111,7 +111,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } assertAcked(client().admin().indices().prepareOpen("test")); diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java index 7541de9132f..f65aeb64b2c 100644 --- a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java +++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.indices.analyze; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; @@ -80,13 +80,13 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { try { client().admin().indices().prepareAnalyze(indexOrAlias(), "123").setField("long").get(); fail("shouldn't get here"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { //all good } try { client().admin().indices().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get(); fail("shouldn't get here"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { //all good } } @@ -235,7 +235,7 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest); fail("shouldn't get here"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -254,7 +254,7 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest); fail("shouldn't get here"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } diff --git a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java index e1b5c04368b..6b42cbad84b 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java +++ b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.indices.recovery; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -344,7 +344,7 @@ public class RecoveryStateTest extends ElasticsearchTestCase { state.setStage(stage); } fail("succeeded in performing the illegal sequence [" + Strings.arrayToCommaDelimitedString(stages) + "]"); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { // cool } diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java index c7e047265c0..8653c16da3e 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.indices.settings; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.count.CountResponse; @@ -278,7 +278,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest { ) .execute().actionGet(); fail("should have thrown an exception about the replica shard count"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("the value of the setting index.number_of_replicas must be a non negative integer"), equalTo(true)); } diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 0d00ea5291e..263a3394044 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -23,7 +23,7 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -61,7 +61,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { ) .execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // all is well } diff --git a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java index 3d9e676d545..a4794649c8f 100644 --- a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java +++ b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.indices.state; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -54,35 +54,35 @@ public class CloseIndexDisableCloseAllTests extends ElasticsearchIntegrationTest try { client().admin().indices().prepareClose("_all").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("test*").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*", "-test1").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 3d19218495b..1d120ca9f7c 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.indices.template; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; @@ -33,7 +33,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.FilterBuilders; @@ -495,7 +494,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { createIndex("test"); fail("index creation should have failed due to invalid alias filter in matching index template"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); assertThat(e.getCause(), instanceOf(QueryParsingException.class)); assertThat(e.getCause().getMessage(), equalTo("No filter registered for [invalid]")); @@ -512,7 +511,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); } @@ -546,7 +545,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); fail("put template should have failed due to alias with empty name"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("alias name is required")); } } @@ -560,7 +559,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); fail("put template should have failed due to alias with multiple index routings"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("alias [alias] has several index routing values associated with it")); } } @@ -661,7 +660,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().prepareIndex("d1", "test", "test").setSource("{}").get(); fail(); } catch (Exception e) { - assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]")); } response = client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get(); diff --git a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java index eda46721e36..69dac24aa69 100644 --- a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java @@ -19,11 +19,9 @@ package org.elasticsearch.operateAllIndices; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -54,12 +52,14 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat // should fail since index1 is the only index. client().admin().indices().prepareDelete("i*").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { client().admin().indices().prepareDelete("_all").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } settings = ImmutableSettings.builder() .put(DestructiveOperations.REQUIRES_NAME, false) @@ -85,20 +85,22 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat try { client().admin().indices().prepareClose("_all").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { assertAcked(client().admin().indices().prepareOpen("_all").get()); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().admin().indices().prepareClose("*").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { assertAcked(client().admin().indices().prepareOpen("*").get()); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } settings = ImmutableSettings.builder() @@ -111,5 +113,4 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat // end close index: client().admin().indices().prepareDelete("_all").get(); } - } diff --git a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java index eaaa8c64cda..37fe6aa04fe 100644 --- a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java +++ b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java @@ -23,17 +23,15 @@ import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; -import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -68,7 +66,7 @@ import static org.hamcrest.Matchers.notNullValue; // if its in your classpath, then do not use plugins!!!!!! public class PluginManagerTests extends ElasticsearchIntegrationTest { - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testDownloadAndExtract_NullName_ThrowsException() throws IOException { pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).downloadAndExtract(null); } @@ -480,12 +478,12 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { singlePluginInstallAndRemove("groupid/plugintest", getPluginUrlForResource("plugin_without_folders.zip")); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testRemovePlugin_NullName_ThrowsException() throws IOException { pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).removePlugin(null); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testRemovePluginWithURLForm() throws Exception { PluginManager pluginManager = pluginManager(null); pluginManager.removePlugin("file://whatever"); @@ -510,7 +508,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { pluginManager(null).removePlugin(name); fail("this plugin name [" + name + "] should not be allowed"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // We expect that error } } diff --git a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java index 861fdf7b1af..ef774983872 100644 --- a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.routing; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -65,14 +65,14 @@ public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest { try { clusterService().state().metaData().resolveIndexRouting("1", "alias10"); fail("should fail"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // all is well, we can't have two mappings, one provided, and one in the alias } try { clusterService().state().metaData().resolveIndexRouting(null, "alias0"); fail("should fail"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { // Expected } } diff --git a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java index bbbf8b875dc..7041be12e36 100644 --- a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java +++ b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -89,7 +89,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { try { scriptService.compile(new Script(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), null), new ScriptContext.Plugin("test", "unknown")); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test_unknown] not supported")); } } @@ -105,7 +105,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { } }); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test] not supported")); } } diff --git a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java index 1064bf464c7..2eb881156bd 100644 --- a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java +++ b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java @@ -158,7 +158,7 @@ public class IndexedScriptTests extends ElasticsearchIntegrationTest { fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(e.toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); + assertThat(e.getCause().toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); } try { String query = "{ \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"expression\" }}}"; diff --git a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java index bfc775e449a..ec28c3b90b0 100644 --- a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; @@ -36,7 +36,7 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("test", rejectedContext))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context")); } } @@ -49,18 +49,18 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin(rejectedContext, "test"))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context")); } } } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testValidateCustomScriptContextsEmptyPluginName() throws IOException { new ScriptContext.Plugin(randomBoolean() ? null : "", "test"); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testValidateCustomScriptContextsEmptyOperation() throws IOException { new ScriptContext.Plugin("test", randomBoolean() ? null : ""); } @@ -71,7 +71,7 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("testplugin", "test"), new ScriptContext.Plugin("testplugin", "test"))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("script context [testplugin_test] cannot be registered twice")); } } diff --git a/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 119a5b91106..96cc6fda02e 100644 --- a/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import com.google.common.collect.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.script.ScriptService.ScriptType; @@ -106,7 +106,7 @@ public class ScriptModesTests extends ElasticsearchTestCase { assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED, ScriptType.INLINE); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testMissingSetting() { assertAllSettingsWereChecked = false; this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, ImmutableSettings.EMPTY); diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0493203e7a5..0be2f2a80b5 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.ImmutableSettings; @@ -116,7 +116,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { try { buildScriptService(ImmutableSettings.builder().put(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING, randomUnicodeOfLength(randomIntBetween(1, 10))).build()); fail("script service should have thrown exception due to non supported script.disable_dynamic setting"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings")); } } @@ -144,7 +144,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { try { scriptService.compile(new Script("test", "test_script", ScriptType.FILE, null), ScriptContext.Standard.SEARCH); fail("the script test_script should no longer exist"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script")); } } @@ -351,7 +351,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { try { scriptService.compile(new Script(type, "test", randomFrom(ScriptType.values()), null), new ScriptContext.Plugin(pluginName, unknownContext)); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index a5b38728a0f..e4b226db3f0 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -210,35 +210,35 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { try { heuristicIsSuperset.getScore(2, 3, 1, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > supersetFreq")); } try { heuristicIsSuperset.getScore(1, 4, 2, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetSize > supersetSize")); } try { heuristicIsSuperset.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } try { heuristicIsSuperset.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } try { heuristicIsSuperset.getScore(1, 3, 4, 4); fail(); - } catch (ElasticsearchIllegalArgumentException assertionError) { + } catch (IllegalArgumentException assertionError) { assertNotNull(assertionError.getMessage()); assertTrue(assertionError.getMessage().contains("supersetFreq - subsetFreq > supersetSize - subsetSize")); } @@ -248,21 +248,21 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristicIsSuperset.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } try { heuristicNotSuperset.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } try { heuristicNotSuperset.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } @@ -272,7 +272,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristicNotSuperset.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } @@ -285,21 +285,21 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristic.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } try { heuristic.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } try { heuristic.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } @@ -339,7 +339,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { long c = randomLong(); long d = randomLong(); score = heuristic.getScore(a, b, c, d); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } assertThat(score, greaterThanOrEqualTo(0.0)); } @@ -362,7 +362,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { long c = randomLong(); long d = randomLong(); score = heuristic.getScore(a, b, c, d); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } assertThat(score, lessThanOrEqualTo(1.0)); assertThat(score, greaterThanOrEqualTo(0.0)); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index a3139e507a6..d2f2a905440 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.child; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -1467,13 +1467,13 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { try { client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } try { client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java index 315442a0022..50e693c2c3e 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java @@ -19,15 +19,13 @@ package org.elasticsearch.search.functionscore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.ElasticsearchParseException; +import java.lang.IllegalArgumentException; +import java.lang.IllegalStateException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -469,7 +467,7 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception { DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder("num1", "2013-05-28", "1d").setDecay(100); } @@ -916,17 +914,17 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { // next test java client try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery(FilterBuilders.matchAllFilter(), null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { + } catch (IllegalArgumentException failure) { assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(FilterBuilders.matchAllFilter(), null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { + } catch (IllegalArgumentException failure) { assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { + } catch (IllegalArgumentException failure) { assertTrue(failure.toString().contains("function must not be null")); } } diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java index e3215ddd2b4..c1d8e32c517 100644 --- a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java +++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.search.preference; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -112,7 +111,7 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); } - @Test (expected = ElasticsearchIllegalArgumentException.class) + @Test (expected = IllegalArgumentException.class) public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception { createIndex("test"); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java index d23d4da71ac..f16a6d2907c 100644 --- a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java +++ b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.scroll; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; @@ -319,13 +319,13 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { try { client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { // Fails during base64 decoding (Base64-encoded string must have at least four characters) client().prepareClearScroll().addScrollId("a").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().prepareClearScroll().addScrollId("abcabc").get(); @@ -513,7 +513,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -530,7 +530,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } @@ -555,7 +555,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -572,7 +572,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java index f4a2e81df61..5911985cc26 100644 --- a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java +++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java @@ -19,15 +19,13 @@ package org.elasticsearch.search.simple; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.ArrayList; @@ -49,14 +47,14 @@ public class SimpleSearchTests extends ElasticsearchIntegrationTest { try { client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } } diff --git a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java index b3d8eeeb9bc..336903e67f1 100644 --- a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java @@ -20,15 +20,13 @@ package org.elasticsearch.search.suggest; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestRequestBuilder; import org.elasticsearch.action.suggest.SuggestResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperParsingException; @@ -602,7 +600,7 @@ public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest { try { index(INDEX, "service", "2", jsonBuilder().startObject().startObject("suggest").field("input", "backback").endObject().endObject()); fail("index operation was not supposed to be successful"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("one or more prefixes needed")); } } diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java index 6f512a29c81..47802e9d848 100644 --- a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java @@ -19,13 +19,11 @@ package org.elasticsearch.search.suggest; -import com.carrotsearch.randomizedtesting.annotations.Nightly; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.*; @@ -124,11 +122,11 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { searchSuggest( termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { - assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class)); + assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"), endsWith("Suggest entries have different sizes actual [2] expected [1]"))); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"), endsWith("Suggest entries have different sizes actual [2] expected [1]"))); } @@ -144,10 +142,10 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { searchSuggest( termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { - assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class)); + assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"), endsWith("Suggest entries have different text actual [abcd] expected [ABCD]"))); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"), endsWith("Suggest entries have different text actual [abcd] expected [ABCD]"))); } diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index b24d74b84b2..a3abd47e913 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -25,7 +25,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; @@ -1646,7 +1646,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { .prepareRestoreSnapshot("test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectReplicasIndexSettings) - .setWaitForCompletion(true), ElasticsearchIllegalArgumentException.class); + .setWaitForCompletion(true), IllegalArgumentException.class); logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster() diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 70bc6b99c11..9777b57e3bf 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -38,7 +38,7 @@ import com.google.common.util.concurrent.SettableFuture; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -132,7 +132,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static junit.framework.Assert.fail; import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.util.LuceneTestCase.random; import static org.apache.lucene.util.LuceneTestCase.rarely; import static org.apache.lucene.util.LuceneTestCase.usually; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; @@ -365,7 +364,7 @@ public final class InternalTestCluster extends TestCluster { Settings settings = settingsSource.node(nodeOrdinal); if (settings != null) { if (settings.get(ClusterName.SETTING) != null) { - throw new ElasticsearchIllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]"); + throw new IllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]"); } builder.put(settings); } diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java index d96dd084b3d..e6b5967f2c9 100644 --- a/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/TestCluster.java @@ -20,7 +20,7 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectArrayList; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -132,7 +132,7 @@ public abstract class TestCluster implements Iterable, Closeable { assertAcked(client().admin().indices().prepareDelete(indices)); } catch (IndexMissingException e) { // ignore - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // Happens if `action.destructive_requires_name` is set to true // which is the case in the CloseIndexDisableCloseAllTests if ("_all".equals(indices[0])) { diff --git a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java index 69b3cee96d4..5c604592853 100644 --- a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java +++ b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.cluster; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; @@ -67,12 +67,12 @@ public class NoopClusterService implements ClusterService { } @Override - public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { } @Override - public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { } diff --git a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java index 27f09489763..22e6f2eecdf 100644 --- a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.cluster; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; @@ -107,13 +107,13 @@ public class TestClusterService implements ClusterService { } @Override - public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { throw new UnsupportedOperationException(); } @Override - public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { throw new UnsupportedOperationException(); } diff --git a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java index d8be9b704c6..ee845d2199c 100644 --- a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.transport.netty; -import org.elasticsearch.ElasticsearchIllegalStateException; +import java.lang.IllegalStateException; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; @@ -75,7 +75,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotAcquireTwoLocksGlobal() throws InterruptedException { KeyedLock.GlobalLockable connectionLock = new KeyedLock.GlobalLockable<>(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); @@ -89,7 +89,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotAcquireTwoLocks() throws InterruptedException { KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); @@ -97,7 +97,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { connectionLock.acquire(name); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotReleaseUnacquiredLock() throws InterruptedException { KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); From b4efb6c472715756f9920a653be70d3ce16767cc Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Apr 2015 10:08:50 +0200 Subject: [PATCH 187/236] Remove unused / obsolete import and throws statements This commit removes unused thorws statements when RuntimeExceptions are mentioned in the throws statement. It also removes obsolete import statements for java.lang.IllegalArgumentException and java.lang.IllegalStateException --- .../lucene/analysis/PrefixAnalyzer.java | 1 - .../lucene/store/StoreRateLimiting.java | 5 +-- .../elasticsearch/action/ActionFuture.java | 10 ++--- .../action/ActionRequestBuilder.java | 6 +-- .../ActionRequestValidationException.java | 1 - .../elasticsearch/action/ThreadingModel.java | 1 - .../action/WriteConsistencyLevel.java | 1 - .../cluster/health/ClusterHealthStatus.java | 1 - .../health/TransportClusterHealthAction.java | 3 +- .../TransportNodesHotThreadsAction.java | 2 +- .../node/info/TransportNodesInfoAction.java | 2 +- .../node/stats/TransportNodesStatsAction.java | 2 +- .../TransportDeleteRepositoryAction.java | 2 +- .../get/TransportGetRepositoriesAction.java | 2 +- .../put/PutRepositoryRequest.java | 1 - .../put/TransportPutRepositoryAction.java | 2 +- .../TransportVerifyRepositoryAction.java | 2 +- .../TransportClusterRerouteAction.java | 2 +- .../TransportClusterUpdateSettingsAction.java | 2 +- .../shards/ClusterSearchShardsRequest.java | 1 - .../TransportClusterSearchShardsAction.java | 2 +- .../create/CreateSnapshotRequest.java | 1 - .../create/TransportCreateSnapshotAction.java | 2 +- .../delete/TransportDeleteSnapshotAction.java | 2 +- .../get/TransportGetSnapshotsAction.java | 2 +- .../restore/RestoreSnapshotRequest.java | 1 - .../TransportRestoreSnapshotAction.java | 2 +- .../status/SnapshotIndexShardStage.java | 1 - .../status/SnapshotIndexShardStatus.java | 1 - .../snapshots/status/SnapshotShardsStats.java | 1 - .../status/TransportNodesSnapshotsStatus.java | 2 +- .../TransportSnapshotsStatusAction.java | 1 - .../state/TransportClusterStateAction.java | 2 +- .../stats/TransportClusterStatsAction.java | 2 +- .../TransportPendingClusterTasksAction.java | 2 +- .../action/admin/indices/alias/Alias.java | 1 - .../alias/TransportIndicesAliasesAction.java | 2 +- .../exists/TransportAliasesExistAction.java | 2 +- .../alias/get/TransportGetAliasesAction.java | 2 +- .../analyze/TransportAnalyzeAction.java | 3 +- .../TransportClearIndicesCacheAction.java | 2 +- .../close/TransportCloseIndexAction.java | 2 +- .../indices/create/CreateIndexRequest.java | 1 - .../create/TransportCreateIndexAction.java | 2 +- .../delete/TransportDeleteIndexAction.java | 2 +- .../indices/TransportIndicesExistsAction.java | 2 +- .../types/TransportTypesExistsAction.java | 2 +- .../indices/flush/TransportFlushAction.java | 2 +- .../admin/indices/get/GetIndexRequest.java | 5 +-- .../indices/get/TransportGetIndexAction.java | 3 +- .../TransportGetFieldMappingsIndexAction.java | 4 +- .../get/TransportGetMappingsAction.java | 2 +- .../mapping/put/PutMappingRequest.java | 1 - .../put/TransportPutMappingAction.java | 2 +- .../open/TransportOpenIndexAction.java | 2 +- .../optimize/TransportOptimizeAction.java | 2 +- .../recovery/TransportRecoveryAction.java | 2 +- .../refresh/TransportRefreshAction.java | 2 +- .../TransportIndicesSegmentsAction.java | 2 +- .../get/TransportGetSettingsAction.java | 2 +- .../put/TransportUpdateSettingsAction.java | 2 +- .../stats/TransportIndicesStatsAction.java | 2 +- .../TransportDeleteIndexTemplateAction.java | 2 +- .../get/TransportGetIndexTemplatesAction.java | 2 +- .../template/put/PutIndexTemplateRequest.java | 1 - .../put/TransportPutIndexTemplateAction.java | 2 +- .../query/TransportValidateQueryAction.java | 2 +- .../delete/TransportDeleteWarmerAction.java | 2 +- .../warmer/get/TransportGetWarmersAction.java | 2 +- .../warmer/put/TransportPutWarmerAction.java | 2 +- .../action/bulk/BulkProcessor.java | 1 - .../action/bulk/BulkRequest.java | 1 - .../action/bulk/TransportShardBulkAction.java | 1 - .../action/count/CountRequest.java | 1 - .../action/count/TransportCountAction.java | 2 +- .../action/delete/TransportDeleteAction.java | 1 - .../action/exists/TransportExistsAction.java | 2 +- .../explain/TransportExplainAction.java | 4 +- .../TransportFieldStatsTransportAction.java | 4 +- .../action/get/MultiGetRequest.java | 1 - .../action/get/TransportGetAction.java | 2 +- .../get/TransportShardMultiGetAction.java | 2 +- .../action/index/IndexRequest.java | 7 ++-- .../action/mlt/MoreLikeThisRequest.java | 3 +- .../mlt/MoreLikeThisRequestBuilder.java | 3 +- .../mlt/TransportMoreLikeThisAction.java | 1 - .../percolate/MultiPercolateRequest.java | 1 - .../percolate/TransportPercolateAction.java | 2 +- .../TransportShardMultiPercolateAction.java | 4 +- .../action/search/MultiSearchRequest.java | 1 - .../action/search/SearchRequest.java | 3 +- .../action/search/SearchRequestBuilder.java | 3 +- .../action/search/SearchType.java | 3 +- .../action/search/TransportSearchAction.java | 1 - .../search/TransportSearchScrollAction.java | 1 - .../search/type/TransportSearchHelper.java | 2 - .../type/TransportSearchTypeAction.java | 1 - .../suggest/TransportSuggestAction.java | 3 +- .../action/support/AdapterActionFuture.java | 11 +++--- .../action/support/DestructiveOperations.java | 1 - .../action/support/IndicesOptions.java | 1 - .../action/support/TransportAction.java | 2 +- .../TransportBroadcastOperationAction.java | 2 +- .../info/TransportClusterInfoAction.java | 4 +- .../nodes/TransportNodesOperationAction.java | 2 +- ...nsportShardReplicationOperationAction.java | 2 +- .../TransportSingleCustomOperationAction.java | 2 +- ...ransportInstanceSingleOperationAction.java | 6 +-- .../TransportShardSingleOperationAction.java | 4 +- .../termvectors/MultiTermVectorsRequest.java | 1 - .../termvectors/TermVectorsResponse.java | 1 - .../TransportShardMultiTermsVectorAction.java | 2 +- .../TransportTermVectorsAction.java | 2 +- .../dfs/TransportDfsOnlyAction.java | 2 +- .../action/update/TransportUpdateAction.java | 7 ++-- .../action/update/UpdateHelper.java | 1 - .../cache/recycler/PageCacheRecycler.java | 1 - .../client/transport/TransportClient.java | 6 +-- .../TransportClientNodesService.java | 3 +- .../elasticsearch/cluster/ClusterService.java | 1 - .../elasticsearch/cluster/ClusterState.java | 3 +- .../action/index/MappingUpdatedAction.java | 3 +- .../action/index/NodeIndexDeletedAction.java | 2 +- .../index/NodeMappingRefreshAction.java | 2 +- .../action/shard/ShardStateAction.java | 8 ++-- .../cluster/block/ClusterBlockLevel.java | 1 - .../cluster/metadata/AliasAction.java | 1 - .../cluster/metadata/AliasValidator.java | 1 - .../cluster/metadata/IndexMetaData.java | 4 +- .../cluster/metadata/MappingMetaData.java | 1 - .../cluster/metadata/MetaData.java | 3 +- .../metadata/MetaDataCreateIndexService.java | 5 +-- .../metadata/MetaDataIndexStateService.java | 1 - .../MetaDataIndexTemplateService.java | 3 +- .../MetaDataUpdateSettingsService.java | 1 - .../cluster/metadata/RestoreMetaData.java | 1 - .../cluster/metadata/SnapshotMetaData.java | 1 - .../cluster/node/DiscoveryNode.java | 1 - .../cluster/node/DiscoveryNodes.java | 1 - .../cluster/routing/IndexRoutingTable.java | 1 - .../cluster/routing/OperationRouting.java | 1 - .../cluster/routing/Preference.java | 1 - .../cluster/routing/RoutingNode.java | 1 - .../cluster/routing/RoutingService.java | 6 +-- .../cluster/routing/ShardRoutingState.java | 1 - .../routing/allocation/AllocationService.java | 3 +- .../allocator/BalancedShardsAllocator.java | 1 - .../command/AllocateAllocationCommand.java | 4 +- .../allocation/command/AllocationCommand.java | 2 +- .../command/AllocationCommands.java | 6 +-- .../command/CancelAllocationCommand.java | 3 +- .../command/MoveAllocationCommand.java | 3 +- .../ClusterRebalanceAllocationDecider.java | 2 - .../routing/allocation/decider/Decision.java | 1 - .../decider/EnableAllocationDecider.java | 2 - .../service/InternalClusterService.java | 9 ++--- .../org/elasticsearch/common/Booleans.java | 1 - .../org/elasticsearch/common/ParseField.java | 1 - .../org/elasticsearch/common/PidFile.java | 1 - .../elasticsearch/common/Preconditions.java | 2 - .../org/elasticsearch/common/Priority.java | 1 - .../common/RandomBasedUUIDGenerator.java | 1 - .../java/org/elasticsearch/common/Table.java | 1 - .../common/TimeBasedUUIDGenerator.java | 1 - .../common/breaker/CircuitBreaker.java | 1 - .../common/bytes/BytesArray.java | 1 - .../common/bytes/PagedBytesReference.java | 1 - .../bytes/ReleasablePagedBytesReference.java | 2 +- .../org/elasticsearch/common/cli/CliTool.java | 1 - .../common/collect/HppcMaps.java | 1 - .../component/AbstractLifecycleComponent.java | 12 +++--- .../common/component/Lifecycle.java | 1 - .../common/component/LifecycleComponent.java | 4 +- .../elasticsearch/common/geo/GeoDistance.java | 1 - .../common/geo/GeoHashUtils.java | 3 +- .../common/geo/builders/ShapeBuilder.java | 1 - .../common/lease/Releasable.java | 2 +- .../common/lease/Releasables.java | 4 +- .../elasticsearch/common/lucene/Lucene.java | 2 - .../common/lucene/all/AllEntries.java | 1 - .../common/lucene/docset/DocIdSets.java | 2 - .../lucene/index/FilterableTermsEnum.java | 3 +- .../common/lucene/index/FreqTermsEnum.java | 2 +- .../search/function/WeightFactorFunction.java | 1 - .../common/recycler/DequeRecycler.java | 1 - .../common/recycler/NoneRecycler.java | 1 - .../common/recycler/Recyclers.java | 3 +- .../org/elasticsearch/common/regex/Regex.java | 1 - .../common/rounding/TimeZoneRounding.java | 1 - .../common/settings/ImmutableSettings.java | 1 - .../TransportAddressSerializers.java | 1 - .../common/unit/ByteSizeValue.java | 5 +-- .../common/unit/DistanceUnit.java | 1 - .../elasticsearch/common/unit/Fuzziness.java | 1 - .../common/util/BloomFilter.java | 1 - .../common/util/LocaleUtils.java | 1 - .../common/util/LongObjectPagedHashMap.java | 2 +- .../common/util/MultiDataPathUpgrader.java | 1 - .../common/util/concurrent/CountDown.java | 1 - .../common/util/concurrent/EsAbortPolicy.java | 1 - .../util/concurrent/EsThreadPoolExecutor.java | 1 - .../common/util/concurrent/KeyedLock.java | 1 - .../util/concurrent/SizeBlockingQueue.java | 1 - .../common/xcontent/XContentFactory.java | 1 - .../xcontent/json/JsonXContentParser.java | 1 - .../discovery/DiscoveryService.java | 6 +-- .../discovery/DiscoverySettings.java | 1 - .../discovery/local/LocalDiscovery.java | 7 ++-- .../discovery/zen/NotMasterException.java | 1 - .../discovery/zen/ZenDiscovery.java | 8 ++-- .../zen/fd/MasterFaultDetection.java | 1 - .../discovery/zen/fd/NodesFaultDetection.java | 1 - .../zen/membership/MembershipAction.java | 6 +-- .../discovery/zen/ping/ZenPing.java | 2 +- .../discovery/zen/ping/ZenPingService.java | 9 ++--- .../zen/ping/multicast/MulticastZenPing.java | 7 ++-- .../zen/ping/unicast/UnicastZenPing.java | 10 ++--- .../elasticsearch/env/NodeEnvironment.java | 2 - .../gateway/GatewayMetaState.java | 1 - .../elasticsearch/gateway/GatewayService.java | 6 +-- .../gateway/MetaDataStateFormat.java | 1 - .../TransportNodesListGatewayMetaState.java | 2 +- ...ransportNodesListGatewayStartedShards.java | 2 +- .../org/elasticsearch/http/HttpServer.java | 6 +-- .../http/netty/NettyHttpServerTransport.java | 6 +-- .../org/elasticsearch/index/IndexService.java | 5 +-- .../org/elasticsearch/index/VersionType.java | 1 - .../index/analysis/Analysis.java | 1 - .../index/analysis/AnalysisModule.java | 1 - .../index/analysis/AnalysisService.java | 1 - .../CommonGramsTokenFilterFactory.java | 1 - .../analysis/EdgeNGramTokenizerFactory.java | 3 +- .../analysis/HunspellTokenFilterFactory.java | 1 - .../analysis/KeepTypesFilterFactory.java | 1 - .../index/analysis/KeepWordFilterFactory.java | 1 - .../KeywordMarkerTokenFilterFactory.java | 1 - .../analysis/LengthTokenFilterFactory.java | 1 - .../analysis/LowerCaseTokenFilterFactory.java | 1 - .../analysis/MappingCharFilterFactory.java | 1 - .../index/analysis/NGramTokenizerFactory.java | 3 +- .../PathHierarchyTokenizerFactory.java | 1 - .../analysis/PatternAnalyzerProvider.java | 1 - ...PatternCaptureGroupTokenFilterFactory.java | 1 - .../PatternReplaceCharFilterFactory.java | 1 - .../PatternReplaceTokenFilterFactory.java | 3 +- .../analysis/PatternTokenizerFactory.java | 3 +- .../StemmerOverrideTokenFilterFactory.java | 1 - .../analysis/StopTokenFilterFactory.java | 1 - .../analysis/SynonymTokenFilterFactory.java | 1 - .../analysis/TrimTokenFilterFactory.java | 1 - .../analysis/TruncateTokenFilterFactory.java | 1 - ...bstractCompoundWordTokenFilterFactory.java | 3 +- ...enationCompoundWordTokenFilterFactory.java | 3 +- .../index/cache/bitset/BitsetFilterCache.java | 2 +- .../filter/weighted/WeightedFilterCache.java | 2 +- .../index/codec/CodecService.java | 3 +- .../elasticsearch/index/engine/Engine.java | 11 +++--- .../index/engine/EngineSearcher.java | 3 +- .../index/engine/FlushingRecoveryCounter.java | 2 +- .../index/engine/InternalEngine.java | 2 +- .../index/engine/RecoveryCounter.java | 4 +- .../index/engine/ShadowEngine.java | 2 +- .../fielddata/IndexFieldDataService.java | 1 - .../fielddata/IndexNumericFieldData.java | 1 - .../plain/AbstractIndexGeoPointFieldData.java | 1 - .../plain/BinaryDVAtomicFieldData.java | 1 - .../plain/BinaryDVNumericIndexFieldData.java | 2 - .../plain/BytesBinaryDVIndexFieldData.java | 2 - .../plain/DisabledIndexFieldData.java | 1 - .../plain/DocValuesIndexFieldData.java | 1 - .../plain/GeoPointBinaryDVIndexFieldData.java | 2 - .../plain/NumericDVIndexFieldData.java | 1 - .../plain/ParentChildIndexFieldData.java | 3 +- .../plain/SortedNumericDVIndexFieldData.java | 1 - .../SortedSetDVBytesAtomicFieldData.java | 1 - .../index/get/ShardGetService.java | 5 +-- .../index/mapper/DocumentMapper.java | 1 - .../index/mapper/MapperService.java | 1 - .../index/mapper/ParseContext.java | 2 - .../index/mapper/core/BooleanFieldMapper.java | 1 - .../index/mapper/core/ByteFieldMapper.java | 1 - .../mapper/core/CompletionFieldMapper.java | 2 - .../index/mapper/core/DateFieldMapper.java | 1 - .../index/mapper/core/DoubleFieldMapper.java | 1 - .../index/mapper/core/FloatFieldMapper.java | 1 - .../index/mapper/core/IntegerFieldMapper.java | 1 - .../index/mapper/core/LongFieldMapper.java | 1 - .../index/mapper/core/NumberFieldMapper.java | 1 - .../index/mapper/core/ShortFieldMapper.java | 1 - .../index/mapper/core/StringFieldMapper.java | 1 - .../index/mapper/geo/GeoPointFieldMapper.java | 2 - .../index/mapper/geo/GeoShapeFieldMapper.java | 1 - .../internal/FieldNamesFieldMapper.java | 1 - .../index/mapper/ip/IpFieldMapper.java | 5 +-- .../index/mapper/object/DynamicTemplate.java | 1 - .../policy/AbstractMergePolicyProvider.java | 1 - .../LogByteSizeMergePolicyProvider.java | 2 +- .../policy/LogDocMergePolicyProvider.java | 2 +- .../policy/TieredMergePolicyProvider.java | 2 +- .../index/query/BaseFilterBuilder.java | 4 +- .../index/query/BaseQueryBuilder.java | 4 +- .../index/query/BoostingQueryBuilder.java | 1 - .../index/query/CommonTermsQueryBuilder.java | 1 - .../index/query/CommonTermsQueryParser.java | 1 - .../index/query/FilterBuilder.java | 4 +- .../query/GeoBoundingBoxFilterBuilder.java | 1 - .../index/query/GeoShapeQueryParser.java | 1 - .../index/query/GeohashCellFilter.java | 1 - .../index/query/IndexQueryParserService.java | 10 ++--- .../index/query/MoreLikeThisQueryBuilder.java | 1 - .../index/query/MoreLikeThisQueryParser.java | 3 +- .../index/query/QueryBuilder.java | 4 +- .../elasticsearch/index/query/RegexpFlag.java | 1 - .../index/query/ScriptFilterParser.java | 3 +- .../index/query/SimpleQueryStringFlag.java | 3 +- .../index/query/SpanNearQueryBuilder.java | 1 - .../index/query/SpanNotQueryBuilder.java | 1 - .../index/query/SpanOrQueryBuilder.java | 1 - .../index/query/TemplateQueryBuilder.java | 1 - .../functionscore/DecayFunctionBuilder.java | 3 +- .../functionscore/DecayFunctionParser.java | 1 - .../FunctionScoreQueryBuilder.java | 3 +- .../functionscore/factor/FactorBuilder.java | 3 +- .../support/InnerHitsQueryParserHelper.java | 1 - .../index/query/support/QueryParsers.java | 1 - .../index/search/MatchQuery.java | 4 +- .../index/search/MultiMatchQuery.java | 3 +- .../index/search/child/ChildrenQuery.java | 8 ++-- .../child/CustomQueryWrappingFilter.java | 2 +- .../index/search/child/ParentQuery.java | 2 +- .../index/search/child/ScoreType.java | 1 - .../index/search/child/TopChildrenQuery.java | 3 +- .../index/search/geo/GeoDistanceFilter.java | 1 - .../search/geo/GeoDistanceRangeFilter.java | 1 - .../geo/IndexedGeoBoundingBoxFilter.java | 1 - .../index/search/shape/ShapeFetchService.java | 2 - .../elasticsearch/index/shard/IndexShard.java | 38 +++++++++---------- .../index/shard/IndexShardState.java | 3 +- .../index/shard/ShadowIndexShard.java | 1 - .../elasticsearch/index/shard/ShardPath.java | 1 - .../shard/TranslogRecoveryPerformer.java | 3 +- .../AbstractSimilarityProvider.java | 1 - .../similarity/DFRSimilarityProvider.java | 1 - .../similarity/IBSimilarityProvider.java | 1 - .../index/similarity/SimilarityModule.java | 1 - .../elasticsearch/index/store/IndexStore.java | 2 +- .../org/elasticsearch/index/store/Store.java | 1 - .../index/translog/Translog.java | 2 - .../index/translog/fs/FsChannelSnapshot.java | 2 +- .../index/translog/fs/FsTranslog.java | 2 +- .../index/translog/fs/FsTranslogFile.java | 3 +- .../elasticsearch/indices/IndicesService.java | 16 ++++---- .../breaker/CircuitBreakerService.java | 6 +-- .../HierarchyCircuitBreakerService.java | 1 - .../cache/filter/IndicesFilterCache.java | 3 +- .../cache/query/IndicesQueryCache.java | 1 - .../cluster/IndicesClusterStateService.java | 11 +++--- .../cache/IndicesFieldDataCache.java | 1 - .../memory/IndexingMemoryController.java | 6 +-- .../recovery/RecoverySourceHandler.java | 6 +-- .../indices/recovery/RecoveryState.java | 6 +-- .../SharedFSRecoverySourceHandler.java | 5 +-- .../TransportNodesListShardStoreMetaData.java | 2 +- .../indices/ttl/IndicesTTLService.java | 6 +-- .../elasticsearch/monitor/MonitorService.java | 6 +-- .../elasticsearch/monitor/jvm/HotThreads.java | 1 - .../monitor/jvm/JvmMonitorService.java | 6 +-- .../java/org/elasticsearch/node/Node.java | 5 +-- .../MultiDocumentPercolatorIndex.java | 2 +- .../percolator/PercolatorService.java | 3 +- .../SingleDocumentPercolatorIndex.java | 2 +- .../elasticsearch/plugins/PluginsService.java | 1 - .../repositories/RepositoriesService.java | 1 - .../blobstore/BlobStoreRepository.java | 6 +-- .../elasticsearch/rest/RestController.java | 8 ++-- .../org/elasticsearch/rest/RestFilter.java | 2 +- .../org/elasticsearch/rest/RestRequest.java | 1 - .../alias/RestIndicesAliasesAction.java | 1 - .../alias/put/RestIndexPutAliasAction.java | 1 - .../indices/analyze/RestAnalyzeAction.java | 3 +- .../indices/get/RestGetIndicesAction.java | 3 +- .../rest/action/count/RestCountAction.java | 1 - .../action/explain/RestExplainAction.java | 1 - .../rest/action/index/RestIndexAction.java | 1 - .../script/RestPutIndexedScriptAction.java | 1 - .../action/search/RestClearScrollAction.java | 3 +- .../rest/action/search/RestSearchAction.java | 1 - .../action/search/RestSearchScrollAction.java | 3 +- .../action/suggest/RestSuggestAction.java | 1 - .../rest/action/support/RestActions.java | 1 - .../elasticsearch/river/RiversManager.java | 6 +-- .../elasticsearch/river/RiversService.java | 10 ++--- .../river/cluster/RiverClusterService.java | 6 +-- .../river/routing/RiversRouter.java | 6 +-- .../script/NativeScriptEngineService.java | 3 +- .../java/org/elasticsearch/script/Script.java | 1 - .../elasticsearch/script/ScriptContext.java | 1 - .../script/ScriptContextRegistry.java | 1 - .../org/elasticsearch/script/ScriptMode.java | 1 - .../org/elasticsearch/script/ScriptModes.java | 1 - .../elasticsearch/script/ScriptModule.java | 1 - .../elasticsearch/script/ScriptService.java | 2 - .../script/expression/ExpressionScript.java | 1 - .../elasticsearch/search/MultiValueMode.java | 1 - .../org/elasticsearch/search/SearchPhase.java | 2 +- .../elasticsearch/search/SearchService.java | 31 ++++++++------- .../search/aggregations/AggregationPhase.java | 2 +- .../aggregations/AggregatorFactories.java | 1 - .../aggregations/InternalAggregations.java | 1 - .../InternalMultiBucketAggregation.java | 1 - .../bucket/BestBucketsDeferringCollector.java | 1 - .../bucket/BestDocsDeferringCollector.java | 1 - .../bucket/DeferringBucketCollector.java | 5 +-- .../InternalSingleBucketAggregation.java | 1 - .../children/ParentToChildrenAggregator.java | 1 - .../bucket/geogrid/GeoHashGridBuilder.java | 1 - .../bucket/global/GlobalAggregator.java | 1 - .../bucket/histogram/InternalHistogram.java | 1 - ...DiversifiedBytesHashSamplerAggregator.java | 1 - .../DiversifiedMapSamplerAggregator.java | 1 - .../DiversifiedNumericSamplerAggregator.java | 1 - .../DiversifiedOrdinalsSamplerAggregator.java | 1 - .../bucket/sampler/SamplerAggregator.java | 1 - .../SignificantTermsAggregatorFactory.java | 3 +- .../significant/SignificantTermsBuilder.java | 1 - .../heuristics/NXYSignificanceHeuristic.java | 1 - .../heuristics/SignificanceHeuristic.java | 1 - .../bucket/terms/InternalTerms.java | 1 - .../bucket/terms/TermsAggregatorFactory.java | 1 - .../bucket/terms/TermsBuilder.java | 1 - .../bucket/terms/support/IncludeExclude.java | 1 - .../InternalNumericMetricsAggregation.java | 1 - .../cardinality/CardinalityAggregator.java | 6 +-- .../cardinality/HyperLogLogPlusPlus.java | 2 +- .../metrics/geobounds/InternalGeoBounds.java | 1 - .../AbstractInternalPercentiles.java | 1 - .../scripted/InternalScriptedMetric.java | 1 - .../metrics/stats/StatsAggegator.java | 1 - .../extended/ExtendedStatsAggregator.java | 1 - .../metrics/tophits/InternalTopHits.java | 1 - .../aggregations/support/AggregationPath.java | 1 - .../support/format/ValueFormatterStreams.java | 1 - .../search/builder/SearchSourceBuilder.java | 1 - .../search/fetch/FetchPhase.java | 2 - .../search/fetch/FetchSubPhase.java | 4 +- .../fetch/explain/ExplainFetchSubPhase.java | 4 +- .../FieldDataFieldsFetchSubPhase.java | 4 +- .../FieldDataFieldsParseElement.java | 1 - .../innerhits/InnerHitsFetchSubPhase.java | 4 +- .../innerhits/InnerHitsParseElement.java | 1 - .../MatchedQueriesFetchSubPhase.java | 4 +- .../script/ScriptFieldsFetchSubPhase.java | 5 +-- .../fetch/source/FetchSourceSubPhase.java | 4 +- .../fetch/version/VersionFetchSubPhase.java | 4 +- .../highlight/FastVectorHighlighter.java | 1 - .../search/highlight/HighlightPhase.java | 5 +-- .../highlight/HighlighterParseElement.java | 1 - .../search/highlight/PlainHighlighter.java | 1 - .../search/highlight/PostingsHighlighter.java | 1 - .../search/internal/DefaultSearchContext.java | 2 +- .../search/lookup/LeafDocLookup.java | 1 - .../search/lookup/LeafFieldsLookup.java | 1 - .../query/TerminateAfterParseElement.java | 1 - .../search/rescore/QueryRescorer.java | 2 - .../search/rescore/RescoreParseElement.java | 1 - .../search/rescore/RescorePhase.java | 2 +- .../search/sort/FieldSortBuilder.java | 1 - .../search/sort/GeoDistanceSortParser.java | 1 - .../search/sort/SortParseElement.java | 1 - .../elasticsearch/search/suggest/Suggest.java | 1 - .../search/suggest/SuggestBuilder.java | 1 - .../search/suggest/SuggestParseElement.java | 1 - .../search/suggest/SuggestPhase.java | 2 +- .../search/suggest/SuggestUtils.java | 1 - .../suggest/SuggestionSearchContext.java | 1 - .../Completion090PostingsFormat.java | 1 - .../completion/CompletionSuggestParser.java | 1 - .../phrase/DirectCandidateGenerator.java | 1 - .../suggest/phrase/PhraseSuggestParser.java | 1 - .../phrase/PhraseSuggestionBuilder.java | 1 - .../phrase/PhraseSuggestionContext.java | 3 +- .../search/suggest/phrase/WordScorer.java | 1 - .../suggest/term/TermSuggestParser.java | 1 - .../snapshots/SnapshotState.java | 1 - .../snapshots/SnapshotsService.java | 8 ++-- .../elasticsearch/threadpool/ThreadPool.java | 1 - .../transport/PlainTransportFuture.java | 5 +-- .../transport/RequestHandlerRegistry.java | 1 - .../transport/TransportFuture.java | 4 +- .../transport/TransportRequestOptions.java | 1 - .../transport/TransportService.java | 9 ++--- .../transport/local/LocalTransport.java | 6 +-- .../netty/MessageChannelHandler.java | 1 - .../transport/netty/NettyTransport.java | 6 +-- .../org/elasticsearch/tribe/TribeService.java | 6 +-- .../watcher/ResourceWatcherService.java | 7 ++-- .../indices/create/CreateIndexTests.java | 1 - .../action/index/IndexRequestTests.java | 1 - .../ShardReplicationOperationTests.java | 2 +- .../GetTermVectorsCheckDocFreqTests.java | 2 +- .../termvectors/GetTermVectorsTests.java | 20 +++++----- .../aliases/IndexAliasesTests.java | 1 - .../BasicBackwardsCompatibilityTest.java | 1 - .../transport/FailAndRetryMockTransport.java | 6 +-- .../cluster/ClusterServiceTests.java | 6 +-- .../UpdateSettingsValidationTests.java | 1 - .../cluster/metadata/MetaDataTests.java | 1 - .../allocation/AllocationCommandsTests.java | 1 - .../decider/DiskThresholdDeciderTests.java | 1 - .../elasticsearch/common/BooleansTests.java | 1 - .../elasticsearch/common/ParseFieldTests.java | 1 - .../elasticsearch/common/PidFileTests.java | 1 - .../org/elasticsearch/common/TableTests.java | 1 - .../collect/CopyOnWriteHashMapTests.java | 1 - .../collect/CopyOnWriteHashSetTests.java | 1 - .../common/geo/GeoJSONShapeParserTests.java | 1 - .../recycler/AbstractRecyclerTests.java | 1 - .../common/unit/SizeValueTests.java | 1 - .../count/query/CountQueryTests.java | 2 +- .../discovery/zen/ZenDiscoveryUnitTest.java | 1 - .../env/NodeEnvironmentTests.java | 1 - .../FieldStatsIntegrationTests.java | 1 - .../gateway/MetaDataStateFormatTest.java | 1 - .../netty/NettyHttpServerPipeliningTest.java | 2 +- .../analysis/KeepFilterFactoryTests.java | 1 - .../analysis/NGramTokenizerFactoryTests.java | 1 - .../PatternCaptureTokenFilterTests.java | 1 - .../CommonGramsTokenFilterFactoryTests.java | 1 - .../engine/InternalEngineSettingsTest.java | 1 - ...TokenCountFieldMapperIntegrationTests.java | 8 ++-- .../mapper/date/SimpleDateMappingTests.java | 3 +- .../index/mapper/ip/SimpleIpMappingTests.java | 1 - .../merge/policy/MergePolicySettingsTest.java | 1 - .../query/SimpleIndexQueryParserTests.java | 3 +- .../index/search/child/ScoreTypeTests.java | 1 - .../index/shard/IndexShardTests.java | 2 - .../index/shard/ShardPathTests.java | 1 - .../IndicesOptionsIntegrationTests.java | 1 - .../indices/IndicesServiceTest.java | 1 - .../indices/analyze/AnalyzeActionTests.java | 3 +- .../indices/recovery/RecoveryStateTest.java | 1 - .../settings/UpdateNumberOfReplicasTests.java | 1 - .../indices/settings/UpdateSettingsTests.java | 1 - .../state/CloseIndexDisableCloseAllTests.java | 1 - .../template/SimpleIndexTemplateTests.java | 1 - .../plugins/PluginManagerTests.java | 1 - .../HeadersAndContextCopyClientTests.java | 2 +- .../routing/AliasResolveRoutingTests.java | 1 - .../script/CustomScriptContextTests.java | 1 - .../script/ScriptContextRegistryTests.java | 1 - .../script/ScriptModesTests.java | 1 - .../script/ScriptServiceTests.java | 1 - .../SignificanceHeuristicTests.java | 1 - .../child/SimpleChildQuerySearchTests.java | 11 +++--- .../DecayFunctionScoreTests.java | 2 - .../highlight/HighlighterSearchTests.java | 20 +++++----- .../preference/SearchPreferenceTests.java | 1 - .../search/query/SearchQueryTests.java | 10 ++--- .../search/scroll/SearchScrollTests.java | 1 - .../search/simple/SimpleSearchTests.java | 1 - .../search/sort/SimpleSortTests.java | 8 ++-- .../suggest/ContextSuggestSearchTests.java | 1 - .../search/suggest/SuggestSearchTests.java | 11 +++--- .../SharedClusterSnapshotRestoreTests.java | 1 - .../snapshots/mockstore/MockRepository.java | 2 +- .../test/InternalTestCluster.java | 1 - .../org/elasticsearch/test/TestCluster.java | 1 - .../elasticsearch/test/TestSearchContext.java | 2 +- .../cache/recycler/MockPageCacheRecycler.java | 2 +- .../test/cluster/NoopClusterService.java | 9 ++--- .../test/cluster/TestClusterService.java | 7 ++-- .../index/merge/NoMergePolicyProvider.java | 2 +- .../test/transport/CapturingTransport.java | 6 +-- .../test/transport/MockTransportService.java | 6 +-- .../transport/netty/KeyedLockTests.java | 1 - .../elasticsearch/update/UpdateNoopTests.java | 2 +- 576 files changed, 483 insertions(+), 930 deletions(-) diff --git a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java index 6e7c718769c..0b7c433da7f 100644 --- a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java +++ b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java @@ -21,7 +21,6 @@ package org.apache.lucene.analysis; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import java.lang.IllegalArgumentException; import java.io.IOException; import java.util.Collections; diff --git a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index b474817183b..e50c92065e2 100644 --- a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -19,7 +19,6 @@ package org.apache.lucene.store; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.ByteSizeValue; @@ -42,7 +41,7 @@ public class StoreRateLimiting { MERGE, ALL; - public static Type fromString(String type) throws IllegalArgumentException { + public static Type fromString(String type) { if ("none".equalsIgnoreCase(type)) { return NONE; } else if ("merge".equalsIgnoreCase(type)) { @@ -88,7 +87,7 @@ public class StoreRateLimiting { this.type = type; } - public void setType(String type) throws IllegalArgumentException { + public void setType(String type) { this.type = Type.fromString(type); } } diff --git a/src/main/java/org/elasticsearch/action/ActionFuture.java b/src/main/java/org/elasticsearch/action/ActionFuture.java index 60115143e9d..f2b1d87ee5e 100644 --- a/src/main/java/org/elasticsearch/action/ActionFuture.java +++ b/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -42,7 +42,7 @@ public interface ActionFuture extends Future { * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet() throws ElasticsearchException; + T actionGet(); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing @@ -53,7 +53,7 @@ public interface ActionFuture extends Future { * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(String timeout) throws ElasticsearchException; + T actionGet(String timeout); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing @@ -66,7 +66,7 @@ public interface ActionFuture extends Future { * * @param timeoutMillis Timeout in millis */ - T actionGet(long timeoutMillis) throws ElasticsearchException; + T actionGet(long timeoutMillis); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing @@ -77,7 +77,7 @@ public interface ActionFuture extends Future { * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException; + T actionGet(long timeout, TimeUnit unit); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing @@ -88,7 +88,7 @@ public interface ActionFuture extends Future { * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(TimeValue timeout) throws ElasticsearchException; + T actionGet(TimeValue timeout); /** * The root (possibly) wrapped failure. diff --git a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index d52b2cf5923..4335a40e030 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -69,21 +69,21 @@ public abstract class ActionRequestBuilder listener) throws ElasticsearchException { + protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { if (request.waitForEvents() != null) { final long endTime = System.currentTimeMillis() + request.timeout().millis(); clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index 15534ff71a9..8b509ef2d94 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -73,7 +73,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesOperationActio } @Override - protected NodeHotThreads nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeHotThreads nodeOperation(NodeRequest request) { HotThreads hotThreads = new HotThreads() .busiestThreads(request.request.threads) .type(request.request.type) diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index ab3e4f05d34..e091854ea59 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -77,7 +77,7 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction listener) throws ElasticsearchException { + protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.unregisterRepository( new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name()) .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()), diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 026a5c404a7..fda863eb271 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -64,7 +64,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera } @Override - protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) { MetaData metaData = state.metaData(); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index cc7538ce1c5..e0fdc30b3ad 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 53d940a23e8..c9eb8a8c5f5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -64,7 +64,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeOperationAc } @Override - protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.registerRepository( new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 7f47a4888b6..6ce60afc74c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -68,7 +68,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio } @Override - protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.verifyRepository(request.name(), new ActionListener() { @Override public void onResponse(RepositoriesService.VerifyResponse verifyResponse) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index e0617f4d041..05c46177d86 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -67,7 +67,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener listener) { clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask(request, listener) { private volatile ClusterState clusterStateToSend; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index efef83a6e49..526b6afa389 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -86,7 +86,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe } @Override - protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder(); final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index faf6101109c..de8e1fcdfab 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 921effbc15f..bc83fecf647 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -67,7 +67,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadO } @Override - protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) { ClusterState clusterState = clusterService.state(); String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); Map> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index ed8130ec661..4cafbb2e52d 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index dc72f2dcce4..0441ec41dd2 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -64,7 +64,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) { SnapshotsService.SnapshotRequest snapshotRequest = new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository()) .indices(request.indices()) diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 3ee7fa55814..2b59a09db2b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -63,7 +63,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener) { SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot()); snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() { @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index c67a4eda73c..dd36800a277 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -66,7 +66,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct } @Override - protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener listener) { try { ImmutableList.Builder snapshotInfoBuilder = ImmutableList.builder(); if (isAllSnapshots(request.snapshots())) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 914331b33ef..4be7b39da9a 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index e7ecfbcb1a8..a1ea5f3f2df 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -70,7 +70,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeOperation } @Override - protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener listener) { RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest( "restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(), request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(), diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 8b3694531d3..efbc82c9b6a 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import java.lang.IllegalArgumentException; /** */ diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 44bb9a41638..878ca704345 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index 6fdbaa5edd9..ba3bd7a8c77 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 82be56b089b..b23010e3ecc 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -94,7 +94,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction } @Override - protected NodeSnapshotStatus nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeSnapshotStatus nodeOperation(NodeRequest request) { ImmutableMap.Builder> snapshotMapBuilder = ImmutableMap.builder(); try { String nodeId = clusterService.localNode().id(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index d3bcbcf5304..edfc9d5fd32 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index fc1db98c35e..7b114c92d43 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -79,7 +79,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio } @Override - protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener listener) { ClusterState currentState = clusterService.state(); logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index fd689945a58..7f7d00ad529 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -97,7 +97,7 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction shardsStats = new ArrayList<>(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 021be9f80fd..938f176e35c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -62,7 +62,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO } @Override - protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) { listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks())); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index b782868b577..15cd244ba23 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 0149d32d2fe..e2bf884428a 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -77,7 +77,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener listener) { //Expand the indices names List actions = request.aliasActions(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 3df818aa8b6..675d0d88922 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -58,7 +58,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio } @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices); listener.onResponse(new AliasesExistResponse(result)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index fd5c722b012..095aee369aa 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -61,7 +61,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA } @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 0b9c6e6b186..f1c9afe84a4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction; @@ -97,7 +96,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction } @Override - protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) throws ElasticsearchException { + protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) { IndexService indexService = null; if (shardId != null) { indexService = indicesService.indexServiceSafe(shardId.getIndex()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index ca03241bf25..f5f27115869 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -97,7 +97,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio } @Override - protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) throws ElasticsearchException { + protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) { IndexService service = indicesService.indexService(request.shardId().getIndex()); if (service != null) { IndexShard shard = service.shard(request.shardId().id()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index c0afb720e89..8c360590a64 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -75,7 +75,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio } @Override - protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 38782547440..d79c2128611 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.create; import com.google.common.base.Charsets; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 14e8e1c9a24..18bca510a6e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -66,7 +66,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi } @Override - protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 67ffc22c400..4fa760c7a9e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -75,7 +75,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi } @Override - protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new DeleteIndexResponse(true)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index f388a390690..45572ef801d 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -64,7 +64,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadOperati } @Override - protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener listener) { boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index d5bd8a6bb3a..ced21bd98e0 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -61,7 +61,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation } @Override - protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index c0b44d150ec..1d91fd6d800 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -90,7 +90,7 @@ public class TransportFlushAction extends TransportBroadcastOperationAction { return this.validNames.contains(name); } - public static Feature fromName(String name) throws IllegalArgumentException { + public static Feature fromName(String name) { for (Feature feature : Feature.values()) { if (feature.validName(name)) { return feature; @@ -83,7 +82,7 @@ public class GetIndexRequest extends ClusterInfoRequest { throw new IllegalArgumentException("No feature for name [" + name + "]"); } - public static Feature fromId(byte id) throws IllegalArgumentException { + public static Feature fromId(byte id) { if (id < 0 || id >= FEATURES.length) { throw new IllegalArgumentException("No mapping for id [" + id + "]"); } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 4b47e95c2bc..db9bfe99133 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.ActionFilters; @@ -70,7 +69,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + final ActionListener listener) { ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 5886fe55d65..ab6b31c0bb6 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -87,7 +87,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO } @Override - protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) throws ElasticsearchException { + protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); Collection typeIntersection; @@ -173,7 +173,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO } }; - private ImmutableMap findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) throws ElasticsearchException { + private ImmutableMap findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) { MapBuilder fieldMappings = new MapBuilder<>(); final DocumentFieldMappers allFieldMappers = documentMapper.mappers(); for (String field : request.fields()) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 8b9f67fcbb5..9867aeaf80c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -60,7 +60,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener listener) { logger.trace("serving getMapping request based on version {}", state.version()); ImmutableOpenMap> result = state.metaData().findMappings( concreteIndices, request.types() diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index b990a7e90b0..a2f28a9bcfc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import com.carrotsearch.hppc.ObjectOpenHashSet; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 700c90c02ce..dffb1780423 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -65,7 +65,7 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio } @Override - protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index a704c4a3d59..7a74cc123e1 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -75,7 +75,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction } @Override - protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java index 94a7b930e79..3c99a909e19 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java @@ -91,7 +91,7 @@ public class TransportOptimizeAction extends TransportBroadcastOperationAction listener) throws ElasticsearchException { + protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); for (String concreteIndex : concreteIndices) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 968c93f3fae..08195d17e38 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -74,7 +74,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index eb2e92370ff..4561c80cfd4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -119,7 +119,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi } @Override - protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException { + protected ShardStats shardOperation(IndexShardStatsRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 3e7772afd84..4e6fe81b7fd 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -63,7 +63,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera } @Override - protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() { @Override public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 1a7cc798731..5ff737c4d28 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -63,7 +63,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe } @Override - protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) { List results; // If we did not ask for a specific name, then we return all templates diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 9d8619219f8..41dd9ec2b45 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index ac345a732e2..d0b771262c2 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -63,7 +63,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeOperatio } @Override - protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 6a1ba30946f..99bb447bed8 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -156,7 +156,7 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct } @Override - protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) throws ElasticsearchException { + protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexQueryParserService queryParserService = indexService.queryParserService(); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java index 6ba466eae1a..17a30e50ce1 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java @@ -72,7 +72,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct } @Override - protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask(request, listener) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java index 2a84efbbfdf..b9fe92ea438 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java @@ -64,7 +64,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener listener) { ImmutableOpenMap> result = state.metaData().findWarmers( concreteIndices, request.types(), request.warmers() ); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java index 08e5d78d687..e92eb3195d7 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java @@ -85,7 +85,7 @@ public class TransportPutWarmerAction extends TransportMasterNodeOperationAction } @Override - protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener listener) { // first execute the search request, see that its ok... SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request); searchAction.execute(searchRequest, new ActionListener() { diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 607b7a69c65..be26f318625 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index b0f12b5996e..617c3fc32bd 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 799c7c073b1..59c2614af1b 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionWriteResponse; diff --git a/src/main/java/org/elasticsearch/action/count/CountRequest.java b/src/main/java/org/elasticsearch/action/count/CountRequest.java index 6ed17c8c11a..a37ba887fd5 100644 --- a/src/main/java/org/elasticsearch/action/count/CountRequest.java +++ b/src/main/java/org/elasticsearch/action/count/CountRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.count; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; diff --git a/src/main/java/org/elasticsearch/action/count/TransportCountAction.java b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java index 0c2496cda1a..ed39bbb01ac 100644 --- a/src/main/java/org/elasticsearch/action/count/TransportCountAction.java +++ b/src/main/java/org/elasticsearch/action/count/TransportCountAction.java @@ -143,7 +143,7 @@ public class TransportCountAction extends TransportBroadcastOperationAction fieldStats = new HashMap<>(); IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index e4c8c22d0bd..67c6141273c 100644 --- a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import com.google.common.collect.Iterators; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 469d5d52d41..939f5c3b27c 100644 --- a/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -89,7 +89,7 @@ public class TransportGetAction extends TransportShardSingleOperationAction } } - public static OpType fromString(String sOpType) throws IllegalArgumentException { + public static OpType fromString(String sOpType) { String lowersOpType = sOpType.toLowerCase(Locale.ROOT); switch(lowersOpType){ case "create": @@ -490,7 +489,7 @@ public class IndexRequest extends ShardReplicationOperationRequest * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can * be either "index" or "create". */ - public IndexRequest opType(String opType) throws IllegalArgumentException { + public IndexRequest opType(String opType) { return opType(OpType.fromString(opType)); } @@ -559,7 +558,7 @@ public class IndexRequest extends ShardReplicationOperationRequest return this.autoGeneratedId; } - public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) throws ElasticsearchException { + public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) { // resolve the routing if needed routing(metaData.resolveIndexRouting(routing, index)); // resolve timestamp if provided externally diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java index 88be29f5de1..b0d6bbb6c41 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.mlt; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; @@ -475,7 +474,7 @@ public class MoreLikeThisRequest extends ActionRequest impl /** * The search type of the mlt search query. */ - public MoreLikeThisRequest searchType(String searchType) throws IllegalArgumentException { + public MoreLikeThisRequest searchType(String searchType) { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java index de3d5d73f2d..ad8f7e43da0 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.mlt; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -213,7 +212,7 @@ public class MoreLikeThisRequestBuilder extends ActionRequestBuilder(request.items.size()); diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 0208b519efe..54f8c861bea 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 8fee22267c2..8e1da31affa 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -241,7 +240,7 @@ public class SearchRequest extends ActionRequest implements Indic * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". */ - public SearchRequest searchType(String searchType) throws IllegalArgumentException { + public SearchRequest searchType(String searchType) { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 0ea1b41a5dd..0a96133201d 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; @@ -86,7 +85,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder extends BaseFuture implements private Throwable rootFailure; @Override - public T actionGet() throws ElasticsearchException { + public T actionGet() { try { return get(); } catch (InterruptedException e) { @@ -51,22 +50,22 @@ public abstract class AdapterActionFuture extends BaseFuture implements } @Override - public T actionGet(String timeout) throws ElasticsearchException { + public T actionGet(String timeout) { return actionGet(TimeValue.parseTimeValue(timeout, null)); } @Override - public T actionGet(long timeoutMillis) throws ElasticsearchException { + public T actionGet(long timeoutMillis) { return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); } @Override - public T actionGet(TimeValue timeout) throws ElasticsearchException { + public T actionGet(TimeValue timeout) { return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); } @Override - public T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException { + public T actionGet(long timeout, TimeUnit unit) { try { return get(timeout, unit); } catch (TimeoutException e) { diff --git a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 5d32b631ae6..9722bef5990 100644 --- a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.settings.NodeSettingsService; diff --git a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index ccd458acb1f..c46a00720e5 100644 --- a/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.support; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/support/TransportAction.java b/src/main/java/org/elasticsearch/action/support/TransportAction.java index 4637ba00c58..7d3f3564693 100644 --- a/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -47,7 +47,7 @@ public abstract class TransportAction execute(Request request) throws ElasticsearchException { + public final ActionFuture execute(Request request) { PlainActionFuture future = newFuture(); // since we don't have a listener, and we release a possible lock with the future // there is no need to execute it under a listener thread diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java index 9c8f8ce065c..b0bcad2e86b 100644 --- a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java @@ -75,7 +75,7 @@ public abstract class TransportBroadcastOperationAction listener) throws ElasticsearchException { + protected final void masterOperation(final Request request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); doMasterOperation(request, concreteIndices, state, listener); } - protected abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, final ActionListener listener) throws ElasticsearchException; + protected abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, final ActionListener listener); } diff --git a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java index 65bdaeb7c26..9331a5d4a65 100644 --- a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java @@ -76,7 +76,7 @@ public abstract class TransportNodesOperationAction listener) throws ElasticsearchException; + protected abstract void shardOperation(InternalRequest request, ActionListener listener); protected abstract Response newResponse(); @@ -97,7 +97,7 @@ public abstract class TransportInstanceSingleOperationAction listener) throws ElasticsearchException { + protected void shardOperation(final InternalRequest request, final ActionListener listener) { shardOperation(request, listener, 0); } - protected void shardOperation(final InternalRequest request, final ActionListener listener, final int retryCount) throws ElasticsearchException { + protected void shardOperation(final InternalRequest request, final ActionListener listener, final int retryCount) { IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); IndexShard indexShard = indexService.shardSafe(request.request().shardId()); final UpdateHelper.Result result = updateHelper.prepare(request.request(), indexShard); diff --git a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index f60a09fcd14..ea90ac33f0b 100644 --- a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.update; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Requests; diff --git a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index 2f059ac2f41..44c6dc3a4ab 100644 --- a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -20,7 +20,6 @@ package org.elasticsearch.cache.recycler; import com.google.common.base.Strings; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.recycler.AbstractRecyclerC; diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index ad42cb764eb..6dd30b02af3 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -107,7 +107,7 @@ public class TransportClient extends AbstractClient { * Constructs a new transport client with settings loaded either from the classpath or the file system (the * elasticsearch.(yml|json) files optionally prefixed with config/). */ - public TransportClient() throws ElasticsearchException { + public TransportClient() { this(ImmutableSettings.Builder.EMPTY_SETTINGS, true); } @@ -136,7 +136,7 @@ public class TransportClient extends AbstractClient { * @param loadConfigSettings true if settings should be loaded from the classpath/file system. * @throws org.elasticsearch.ElasticsearchException */ - public TransportClient(Settings.Builder settings, boolean loadConfigSettings) throws ElasticsearchException { + public TransportClient(Settings.Builder settings, boolean loadConfigSettings) { this(settings.build(), loadConfigSettings); } @@ -149,7 +149,7 @@ public class TransportClient extends AbstractClient { * @param loadConfigSettings true if settings should be loaded from the classpath/file system. * @throws org.elasticsearch.ElasticsearchException */ - public TransportClient(Settings pSettings, boolean loadConfigSettings) throws ElasticsearchException { + public TransportClient(Settings pSettings, boolean loadConfigSettings) { Tuple tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); Settings settings = settingsBuilder() .put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 4fe1d48a2e9..e2cf962f65a 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -196,7 +195,7 @@ public class TransportClientNodesService extends AbstractComponent { return this; } - public void execute(NodeListenerCallback callback, ActionListener listener) throws ElasticsearchException { + public void execute(NodeListenerCallback callback, ActionListener listener) { ImmutableList nodes = this.nodes; ensureNodesAreAvailable(nodes); int index = getNodeNumber(); diff --git a/src/main/java/org/elasticsearch/cluster/ClusterService.java b/src/main/java/org/elasticsearch/cluster/ClusterService.java index bf351e46758..4ee9afb3e98 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index 1f1b8769efa..b90bc0bb2ac 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; -import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -103,7 +102,7 @@ public class ClusterState implements ToXContent { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) { Custom.Factory factory = customFactories.get(type); if (factory == null) { throw new IllegalArgumentException("No custom state factory registered for type [" + type + "]"); diff --git a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 532f829a06f..2010f67d703 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.action.index; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -133,4 +132,4 @@ public class MappingUpdatedAction extends AbstractComponent { void onFailure(Throwable t); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java index c153bf09b24..1bf3d3b6b90 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -72,7 +72,7 @@ public class NodeIndexDeletedAction extends AbstractComponent { listeners.remove(listener); } - public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) throws ElasticsearchException { + public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) { final DiscoveryNodes nodes = clusterState.nodes(); transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 794e5775852..5623dd96d6f 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -55,7 +55,7 @@ public class NodeMappingRefreshAction extends AbstractComponent { transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest.class, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } - public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) throws ElasticsearchException { + public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) { final DiscoveryNodes nodes = state.nodes(); if (nodes.masterNode() == null) { logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types())); diff --git a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 7630306ebe1..ecbc27bd266 100644 --- a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -76,7 +76,7 @@ public class ShardStateAction extends AbstractComponent { transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardFailedTransportHandler()); } - public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) throws ElasticsearchException { + public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { logger.warn("can't send shard failed for {}, no master known.", shardRouting); @@ -85,7 +85,7 @@ public class ShardStateAction extends AbstractComponent { innerShardFailed(shardRouting, indexUUID, reason, masterNode); } - public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) throws ElasticsearchException { + public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) { logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", shardRouting.shardId(), shardRouting, indexUUID, reason); innerShardFailed(shardRouting, indexUUID, reason, masterNode); } @@ -101,7 +101,7 @@ public class ShardStateAction extends AbstractComponent { }); } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) throws ElasticsearchException { + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { logger.warn("can't send shard started for {}. no master known.", shardRouting); @@ -110,7 +110,7 @@ public class ShardStateAction extends AbstractComponent { shardStarted(shardRouting, indexUUID, reason, masterNode); } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) throws ElasticsearchException { + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason); diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index 3fc77b4ee4c..45ff1d3707b 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.block; -import java.lang.IllegalArgumentException; import java.util.EnumSet; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index 5d2156ef56e..878082bf318 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 8a5ae1b128a..e774f0d82ce 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 8cea5faf7a4..b9713727d6e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -22,8 +22,6 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -109,7 +107,7 @@ public class IndexMetaData { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) { Custom.Factory factory = customFactories.get(type); if (factory == null) { throw new IllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index 6dc595be51c..f80c6072bfc 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 7dd6dc8f7bf..ea25a6d5256 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -25,7 +25,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -116,7 +115,7 @@ public class MetaData implements Iterable { return customFactories.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws IllegalArgumentException { + public static Custom.Factory lookupFactorySafe(String type) { Custom.Factory factory = customFactories.get(type); if (factory == null) { throw new IllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 67ddf9ea81b..9fcb5182180 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -26,7 +26,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -156,7 +155,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { }); } - public void validateIndexName(String index, ClusterState state) throws ElasticsearchException { + public void validateIndexName(String index, ClusterState state) { if (state.routingTable().hasIndex(index)) { throw new IndexAlreadyExistsException(new Index(index)); } @@ -551,7 +550,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { return templates; } - private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws ElasticsearchException { + private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { validateIndexName(request.index(), state); validateIndexSettings(request.index(), request.settings()); } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 8377f3ae41d..625c8d6e839 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 66ac3b17fb6..755d0077c13 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -23,7 +23,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeOperationRequest; import org.elasticsearch.cluster.ClusterService; @@ -178,7 +177,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { }); } - private void validate(PutRequest request) throws ElasticsearchException { + private void validate(PutRequest request) { if (request.name.contains(" ")) { throw new InvalidIndexTemplateException(request.name, "name must not contain a space"); } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 3caa1a9e0e4..8b4e334bade 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 51aa9198e2f..642136d7b7e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index 1622c247812..b1bcc92b8bd 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 0ea050c2fe8..915f0d5599d 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.node; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index ec2a6abc402..2831af8183d 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index afa92bae30d..de4ed5434e1 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -25,7 +25,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; diff --git a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 9f55d7d8893..ef46b6e8875 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/src/main/java/org/elasticsearch/cluster/routing/Preference.java b/src/main/java/org/elasticsearch/cluster/routing/Preference.java index 480ea700157..e8842f0de3e 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/Preference.java +++ b/src/main/java/org/elasticsearch/cluster/routing/Preference.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.cluster.routing; -import java.lang.IllegalArgumentException; /** * Routing Preference Type diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 9231f0782b4..deb25938f19 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Iterators; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import java.util.ArrayList; diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 21378f22b99..33804559e4d 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -75,15 +75,15 @@ public class RoutingService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { FutureUtils.cancel(scheduledRoutingTableFuture); scheduledRoutingTableFuture = null; clusterService.remove(this); diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java index d5a1f25f984..b36e1fcc88a 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java +++ b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import java.lang.IllegalStateException; /** * Represents the current state of a {@link ShardRouting} as defined by the diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 21aa3f82164..8e6b68ecf78 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -119,7 +118,7 @@ public class AllocationService extends AbstractComponent { return reroute(clusterState, commands, false); } - public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) throws ElasticsearchException { + public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) { RoutingNodes routingNodes = clusterState.routingNodes(); // we don't shuffle the unassigned shards here, to try and get as close as possible to // a consistent result of the effect the commands have on the routing diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 941cae71c98..11f3e45653f 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import com.google.common.base.Predicate; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; -import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.RoutingNode; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java index 515d0a60e9f..18e729aba89 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java @@ -20,8 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -167,7 +165,7 @@ public class AllocateAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode discoNode = allocation.nodes().resolveNode(node); MutableShardRouting shardRouting = null; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java index d53151c5dbd..1e835dc4039 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java @@ -88,5 +88,5 @@ public interface AllocationCommand { * @param allocation {@link RoutingAllocation} to modify * @throws org.elasticsearch.ElasticsearchException if something happens during reconfiguration */ - RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException; + RerouteExplanation execute(RoutingAllocation allocation, boolean explain); } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index f73a6711587..db41a759d35 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.command; -import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -61,7 +59,7 @@ public class AllocationCommands { } @SuppressWarnings("unchecked") - public static AllocationCommand.Factory lookupFactorySafe(String name) throws IllegalArgumentException { + public static AllocationCommand.Factory lookupFactorySafe(String name) { AllocationCommand.Factory factory = factories.get(name); if (factory == null) { throw new IllegalArgumentException("No allocation command factory registered for name [" + name + "]"); @@ -113,7 +111,7 @@ public class AllocationCommands { * @param allocation {@link RoutingAllocation} to apply this command to * @throws org.elasticsearch.ElasticsearchException if something happens during execution */ - public RoutingExplanations execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RoutingExplanations execute(RoutingAllocation allocation, boolean explain) { RoutingExplanations explanations = new RoutingExplanations(); for (AllocationCommand command : commands) { explanations.add(command.execute(allocation, explain)); diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java index 6ba8c88d21b..5d7a93635b4 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -165,7 +164,7 @@ public class CancelAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode discoNode = allocation.nodes().resolveNode(node); boolean found = false; for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) { diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index bdfad07a9cd..185e74443e1 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -147,7 +146,7 @@ public class MoveAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode fromDiscoNode = allocation.nodes().resolveNode(fromNode); DiscoveryNode toDiscoNode = allocation.nodes().resolveNode(toNode); Decision decision = null; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 59ef855816d..b057307cafb 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.settings.Validator; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 7068d3f0fd9..76922ae2462 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 08fa64390df..7546482d87a 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 21c6f1361d5..17350ba6c04 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.service; import com.google.common.collect.Iterables; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterState.Builder; @@ -145,7 +144,7 @@ public class InternalClusterService extends AbstractLifecycleComponent extends AbstractComponent im @SuppressWarnings({"unchecked"}) @Override - public T start() throws ElasticsearchException { + public T start() { if (!lifecycle.canMoveToStarted()) { return (T) this; } @@ -74,11 +74,11 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im return (T) this; } - protected abstract void doStart() throws ElasticsearchException; + protected abstract void doStart(); @SuppressWarnings({"unchecked"}) @Override - public T stop() throws ElasticsearchException { + public T stop() { if (!lifecycle.canMoveToStopped()) { return (T) this; } @@ -93,10 +93,10 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im return (T) this; } - protected abstract void doStop() throws ElasticsearchException; + protected abstract void doStop(); @Override - public void close() throws ElasticsearchException { + public void close() { if (lifecycle.started()) { stop(); } @@ -113,5 +113,5 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im } } - protected abstract void doClose() throws ElasticsearchException; + protected abstract void doClose(); } diff --git a/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 146a4adcb20..e6cbf264af3 100644 --- a/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.component; -import java.lang.IllegalStateException; /** * Lifecycle state. Allows the following transitions: diff --git a/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java index d8e72bd56d8..452f644462f 100644 --- a/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java +++ b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java @@ -35,7 +35,7 @@ public interface LifecycleComponent extends Releasable { void removeLifecycleListener(LifecycleListener listener); - T start() throws ElasticsearchException; + T start(); - T stop() throws ElasticsearchException; + T stop(); } diff --git a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java index 0f44b5738a6..fca80970439 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.geo; import org.apache.lucene.util.Bits; import org.apache.lucene.util.SloppyMath; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; diff --git a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index 8e908012d8a..de37ddb4eb4 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -17,7 +17,6 @@ package org.elasticsearch.common.geo; -import java.lang.IllegalArgumentException; import java.util.ArrayList; import java.util.Collection; @@ -479,4 +478,4 @@ public class GeoHashUtils { } return interval; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index ead682293b6..07ceaf1b762 100644 --- a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -27,7 +27,6 @@ import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; import org.apache.commons.lang3.tuple.Pair; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; diff --git a/src/main/java/org/elasticsearch/common/lease/Releasable.java b/src/main/java/org/elasticsearch/common/lease/Releasable.java index 84b11fd0498..29ee1d59e6f 100644 --- a/src/main/java/org/elasticsearch/common/lease/Releasable.java +++ b/src/main/java/org/elasticsearch/common/lease/Releasable.java @@ -27,5 +27,5 @@ import org.elasticsearch.ElasticsearchException; public interface Releasable extends AutoCloseable { @Override - void close() throws ElasticsearchException; + void close(); } diff --git a/src/main/java/org/elasticsearch/common/lease/Releasables.java b/src/main/java/org/elasticsearch/common/lease/Releasables.java index 0322d66adcc..c91494a235d 100644 --- a/src/main/java/org/elasticsearch/common/lease/Releasables.java +++ b/src/main/java/org/elasticsearch/common/lease/Releasables.java @@ -104,7 +104,7 @@ public enum Releasables { return new Releasable() { @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(releasables); } @@ -116,7 +116,7 @@ public enum Releasables { return new Releasable() { @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(releasables); } diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 991e74d3080..92593d13479 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -61,8 +61,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Counter; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java index 77ef6feeb64..fb6ebc28e1c 100644 --- a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java +++ b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.lucene.all; import com.google.common.collect.Lists; -import java.lang.IllegalStateException; import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.FastStringReader; diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java index 9afdc2dd90b..e05c11905ec 100644 --- a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java +++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java @@ -28,8 +28,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RoaringDocIdSet; import org.apache.lucene.util.SparseFixedBitSet; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import java.io.IOException; diff --git a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index fc956703826..15a421567e5 100644 --- a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.docset.DocIdSets; @@ -202,4 +201,4 @@ public class FilterableTermsEnum extends TermsEnum { public BytesRef next() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java b/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java index 8b33b3ef4ad..a74b1b74645 100644 --- a/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java +++ b/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java @@ -103,7 +103,7 @@ public class FreqTermsEnum extends FilterableTermsEnum implements Releasable { @Override - public void close() throws ElasticsearchException { + public void close() { try { Releasables.close(cachedTermOrds, termDocFreqs, termsTotalFreqs); } finally { diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index a1bc0d61108..db651ab8012 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; -import java.lang.IllegalArgumentException; import java.io.IOException; diff --git a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java index 179237a5d27..026711e2313 100644 --- a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import java.lang.IllegalStateException; import java.util.Deque; diff --git a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index 35a943da48c..258e4355b9f 100644 --- a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import java.lang.IllegalStateException; /** */ diff --git a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index 67f29c21130..4344a0689c1 100644 --- a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.recycler; import com.carrotsearch.hppc.hash.MurmurHash3; import com.google.common.collect.Queues; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; public enum Recyclers { ; @@ -126,7 +125,7 @@ public enum Recyclers { return new Recycler.V() { @Override - public void close() throws ElasticsearchException { + public void close() { synchronized (lock) { delegate.close(); } diff --git a/src/main/java/org/elasticsearch/common/regex/Regex.java b/src/main/java/org/elasticsearch/common/regex/Regex.java index 63bbb573227..f5c3094e31d 100644 --- a/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.regex; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import java.util.Locale; diff --git a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 98b7f0ecfb7..107324949db 100644 --- a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; diff --git a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java index 296a0e9b513..8738d30e774 100644 --- a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java +++ b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java @@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Classes; diff --git a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java index c75cedd1d6a..f3aa0941d46 100644 --- a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java +++ b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.transport; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalStateException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; diff --git a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 37c76d2b865..153417f869f 100644 --- a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.unit; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,7 +51,7 @@ public class ByteSizeValue implements Serializable, Streamable { this.sizeUnit = sizeUnit; } - public int bytesAsInt() throws IllegalArgumentException { + public int bytesAsInt() { long bytes = bytes(); if (bytes > Integer.MAX_VALUE) { throw new IllegalArgumentException("size [" + toString() + "] is bigger than max int"); @@ -250,4 +249,4 @@ public class ByteSizeValue implements Serializable, Streamable { result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0); return result; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java index 8ffec08d4c4..cb89ca83d51 100644 --- a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java +++ b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.unit; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index a1d1bb76d21..7ae494cabb9 100644 --- a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.unit; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/src/main/java/org/elasticsearch/common/util/BloomFilter.java index 5137a043210..8a2acba8d28 100644 --- a/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ b/src/main/java/org/elasticsearch/common/util/BloomFilter.java @@ -25,7 +25,6 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.hash.MurmurHash3; diff --git a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java index 65c99da43c5..2e6c01a1ca7 100644 --- a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java +++ b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import java.lang.IllegalArgumentException; import java.util.Locale; diff --git a/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java b/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java index eff145ae80b..7949989c2a0 100644 --- a/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java +++ b/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java @@ -166,7 +166,7 @@ public class LongObjectPagedHashMap extends AbstractPagedHashMap implements I } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(keys, values); } diff --git a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 527bb58f2b8..1cb700cff60 100644 --- a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -27,7 +27,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java index 29247e5939c..b2a80fc68db 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import java.lang.IllegalArgumentException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index 77f81b85d39..8bb16869c47 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import java.lang.IllegalStateException; import org.elasticsearch.common.metrics.CounterMetric; import java.util.concurrent.BlockingQueue; diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index 94d82b77362..8e21065e2f4 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import java.lang.IllegalStateException; import java.util.concurrent.*; diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index c9fb994985d..fb62a628244 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import java.lang.IllegalStateException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java index 4d979698161..bff4ee613e1 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import java.lang.IllegalStateException; import java.util.AbstractQueue; import java.util.Collection; diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index e43eb769e65..9ae1a03a67d 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index ccd2ca1ae2c..08174e30a3e 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -23,7 +23,6 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.AbstractXContentParser; diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index f73f2bbb593..1f7207abd5b 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -78,7 +78,7 @@ public class DiscoveryService extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { @@ -192,7 +191,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { @@ -250,7 +249,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java index 2f8ac0073d3..d78d22aa983 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java +++ b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.discovery.zen; -import java.lang.IllegalStateException; /** diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index d2271a92b6b..0defcb7edd5 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -23,8 +23,6 @@ import com.google.common.base.Objects; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -227,7 +225,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); pingService.start(); @@ -249,7 +247,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { joinThreadControl.stop(); pingService.stop(); masterFD.stop("zen disco stop"); @@ -283,7 +281,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { masterFD.close(); nodesFD.close(); publishClusterState.close(); diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 06a72665130..1159f1c4e6a 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index 667c4289921..41a524b2629 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen.fd; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; diff --git a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index b245a6a0d35..7a7567ea3b4 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -87,7 +87,7 @@ public class MembershipAction extends AbstractComponent { transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME); } - public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -95,7 +95,7 @@ public class MembershipAction extends AbstractComponent { transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME); } - public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -103,7 +103,7 @@ public class MembershipAction extends AbstractComponent { /** * Validates the join request, throwing a failure if it failed. */ - public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java index 370620a9b7b..18f734f7136 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java @@ -43,7 +43,7 @@ public interface ZenPing extends LifecycleComponent { void setPingContextProvider(PingContextProvider contextProvider); - void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException; + void ping(PingListener listener, TimeValue timeout); public interface PingListener { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java index ed32b22d37f..b7bd539c2f7 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen.ping; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Nullable; @@ -79,21 +78,21 @@ public class ZenPingService extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { for (ZenPing zenPing : zenPings) { zenPing.start(); } } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { for (ZenPing zenPing : zenPings) { zenPing.stop(); } } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (ZenPing zenPing : zenPings) { zenPing.close(); } @@ -119,7 +118,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen } @Override - public void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException { + public void ping(PingListener listener, TimeValue timeout) { ImmutableList zenPings = this.zenPings; CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings); for (ZenPing zenPing : zenPings) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index 8f06abcaca0..0d5ca7260b4 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen.ping.multicast; import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -119,7 +118,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { try { // we know OSX has bugs in the JVM when creating multiple instances of multicast sockets // causing for "socket close" exceptions when receive and/or crashes @@ -138,7 +137,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { if (multicastChannel != null) { multicastChannel.close(); multicastChannel = null; @@ -146,7 +145,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } public PingResponse[] pingAndWait(TimeValue timeout) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 6842808c669..f9cf98f86fa 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -23,8 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.Lists; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -142,15 +140,15 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { transportService.removeHandler(ACTION_NAME); ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS); try { @@ -200,7 +198,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen } @Override - public void ping(final PingListener listener, final TimeValue timeout) throws ElasticsearchException { + public void ping(final PingListener listener, final TimeValue timeout) { final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet()); try { receivedResponses.put(sendPingsHandler.id(), sendPingsHandler); diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 2b7db7d7937..134a15e3f7d 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -23,8 +23,6 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index c67c91dd6b9..158a3df5d91 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; diff --git a/src/main/java/org/elasticsearch/gateway/GatewayService.java b/src/main/java/org/elasticsearch/gateway/GatewayService.java index add9ea49f9c..da0b2bcabf8 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -102,7 +102,7 @@ public class GatewayService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { clusterService.addLast(this); // if we received initial state, see if we can recover within the start phase, so we hold the // node from starting until we recovered properly @@ -117,12 +117,12 @@ public class GatewayService extends AbstractLifecycleComponent i } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { clusterService.remove(this); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index fe7bdd5ef73..178b948f093 100644 --- a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.logging.ESLogger; diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 77ab900ce90..36372009f87 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -100,7 +100,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA } @Override - protected NodeGatewayMetaState nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeGatewayMetaState nodeOperation(NodeRequest request) { try { return new NodeGatewayMetaState(clusterService.localNode(), metaState.loadMetaState()); } catch (Exception e) { diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 4a812784e20..04b606cb09a 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -98,7 +98,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat } @Override - protected NodeGatewayStartedShards nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { try { final ShardId shardId = request.getShardId(); final String indexUUID = request.getIndexUUID(); diff --git a/src/main/java/org/elasticsearch/http/HttpServer.java b/src/main/java/org/elasticsearch/http/HttpServer.java index 6a84b0c04dc..a055196b54b 100644 --- a/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/src/main/java/org/elasticsearch/http/HttpServer.java @@ -88,7 +88,7 @@ public class HttpServer extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { transport.start(); if (logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); @@ -97,13 +97,13 @@ public class HttpServer extends AbstractLifecycleComponent { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { nodeService.removeAttribute("http_address"); transport.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { transport.close(); } diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index 693e5049f58..a727d3fed0f 100644 --- a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -209,7 +209,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get(); @@ -60,7 +60,7 @@ public class RecoveryCounter implements Releasable { } @Override - public void close() throws ElasticsearchException { + public void close() { endRecovery(); } } diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 30db42f502e..bd75cc289d2 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -214,7 +214,7 @@ public class ShadowEngine extends Engine { } @Override - protected void closeNoLock(String reason) throws ElasticsearchException { + protected void closeNoLock(String reason) { if (isClosed.compareAndSet(false, true)) { try { logger.debug("shadow replica close searcher manager refCount: {}", store.refCount()); diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 63885c4c539..19f3be065e8 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java index fb0a1ab4385..44d90d9dd13 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index 1c955efef35..1759067f780 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.CharsRefBuilder; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java index 80f03b90fe7..c82ac69f282 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVAtomicFieldData.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; -import java.lang.IllegalStateException; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java index 40bc77aaefb..4404ae461df 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVNumericIndexFieldData.java @@ -29,8 +29,6 @@ import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java index 6cc9adbfdf2..1010bdffa0b 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java index 4690a3529b6..425f85e81fa 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DisabledIndexFieldData.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalStateException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.*; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index a211787039d..ba458df6879 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata.plain; import com.google.common.collect.ImmutableSet; import org.apache.lucene.index.IndexReader; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java index 62a70b101d0..97b2ea1bae0 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java index 5c400fb0d73..710ddba04a4 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; -import java.lang.IllegalStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 7a4201d45cf..6b4fea41894 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -39,7 +39,6 @@ import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -427,7 +426,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData closeables = new ArrayList<>(); for (OrdinalMapAndAtomicFieldData fds : atomicFD.values()) { closeables.addAll(Arrays.asList(fds.fieldData)); diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java index 07ec2b77403..18995573ee4 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java @@ -29,7 +29,6 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.FieldData; diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java index e7aeb9d9c0f..0bcb8251b98 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.RandomAccessOrds; import org.apache.lucene.util.Accountable; -import java.lang.IllegalStateException; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.FieldData; diff --git a/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 23da1c35682..eb8da964983 100644 --- a/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -24,7 +24,6 @@ import com.google.common.collect.Sets; import org.apache.lucene.index.Term; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -99,7 +98,7 @@ public class ShardGetService extends AbstractIndexShardComponent { } public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) - throws ElasticsearchException { + { currentMetric.inc(); try { long now = System.nanoTime(); @@ -167,7 +166,7 @@ public class ShardGetService extends AbstractIndexShardComponent { return FetchSourceContext.DO_NOT_FETCH_SOURCE; } - public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) throws ElasticsearchException { + public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); boolean loadSource = (gFields != null && gFields.length > 0) || fetchSourceContext.fetchSource(); diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 5c9cea416ec..e38f97f8233 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b820aad9820..58f6bb271d7 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -39,7 +39,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; diff --git a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 3ddb3276b24..b608cf8a551 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.all.AllEntries; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index ae914de0597..7df9d2f179f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 64b9bbde985..fc91a8ac061 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 0d6e4f4bee3..83b8781af9e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -28,8 +28,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 75c1e8ac483..38309d729c6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -31,7 +31,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.ToStringUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 490455431e8..d3802650074 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index d1259d24598..ad5faa81c4e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 05015e25793..647e7c3b5d1 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index afaefb4a31e..a600769749a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index edb3a12291a..d042e1ea661 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -39,7 +39,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 99478906195..b0d15fac59d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 3d9d871f3a8..d5c4812b17d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 634b3c5d943..6b7b10f69c6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -28,8 +28,6 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index b34d4fea36d..5a32182e1b3 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -29,7 +29,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 565f1dcc00b..04e715f9942 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 1abf2d38c66..988152f4726 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -83,7 +82,7 @@ public class IpFieldMapper extends NumberFieldMapper { private static final Pattern pattern = Pattern.compile("\\."); - public static long ipToLong(String ip) throws IllegalArgumentException { + public static long ipToLong(String ip) { try { if (!InetAddresses.isInetAddress(ip)) { throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address"); @@ -94,7 +93,7 @@ public class IpFieldMapper extends NumberFieldMapper { } return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) + (Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]); - } catch (Exception e) { + } catch (Exception e) if (e instanceof IllegalArgumentException) { throw (IllegalArgumentException) e; } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index 2edd773a81c..d8a14abbacc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.ContentPath; diff --git a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java index 6ea8d71c4f2..d304d24cdbe 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.merge.policy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.TieredMergePolicy; -import java.lang.IllegalArgumentException; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.store.Store; diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java index 26b3ec39b91..4dc5b14b883 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java @@ -71,7 +71,7 @@ public class LogByteSizeMergePolicyProvider extends AbstractMergePolicyProvider< } @Override - public void close() throws ElasticsearchException { + public void close() { indexSettingsService.removeListener(applySettings); } diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java index b5949086206..03105b1ccda 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java @@ -64,7 +64,7 @@ public class LogDocMergePolicyProvider extends AbstractMergePolicyProvider docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) throws ElasticsearchException { + static Engine.Create prepareCreate(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.v1().parse(source); if (docMapper.v2() != null) { @@ -470,7 +468,7 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.Create(docMapper.v1(), docMapper.v1().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime, canHaveDuplicates, autoGeneratedId); } - public void create(Engine.Create create) throws ElasticsearchException { + public void create(Engine.Create create) { writeAllowed(create.origin()); create = indexingService.preCreate(create); mapperAnalyzer.setType(create.type()); @@ -487,11 +485,11 @@ public class IndexShard extends AbstractIndexShardComponent { indexingService.postCreate(create); } - public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) throws ElasticsearchException { + public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { return prepareIndex(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates); } - static Engine.Index prepareIndex(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) throws ElasticsearchException { + static Engine.Index prepareIndex(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.v1().parse(source); if (docMapper.v2() != null) { @@ -504,7 +502,7 @@ public class IndexShard extends AbstractIndexShardComponent { * Index a document and return whether it was created, as opposed to just * updated. */ - public boolean index(Engine.Index index) throws ElasticsearchException { + public boolean index(Engine.Index index) { writeAllowed(index.origin()); index = indexingService.preIndex(index); mapperAnalyzer.setType(index.type()); @@ -523,13 +521,13 @@ public class IndexShard extends AbstractIndexShardComponent { return created; } - public Engine.Delete prepareDelete(String type, String id, long version, VersionType versionType, Engine.Operation.Origin origin) throws ElasticsearchException { + public Engine.Delete prepareDelete(String type, String id, long version, VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); final DocumentMapper documentMapper = docMapper(type).v1(); return new Engine.Delete(type, id, documentMapper.uidMapper().term(type, id), version, versionType, origin, startTime, false); } - public void delete(Engine.Delete delete) throws ElasticsearchException { + public void delete(Engine.Delete delete) { writeAllowed(delete.origin()); delete = indexingService.preDelete(delete); try { @@ -545,11 +543,11 @@ public class IndexShard extends AbstractIndexShardComponent { indexingService.postDelete(delete); } - public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) throws ElasticsearchException { + public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) { return prepareDeleteByQuery(queryParserService, mapperService, indexAliasesService, indexCache, source, filteringAliases, origin, types); } - static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) throws ElasticsearchException { + static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) { long startTime = System.nanoTime(); if (types == null) { types = Strings.EMPTY_ARRAY; @@ -565,12 +563,12 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types); } - public Engine.GetResult get(Engine.Get get) throws ElasticsearchException { + public Engine.GetResult get(Engine.Get get) { readAllowed(); return engine().get(get); } - public void refresh(String source) throws ElasticsearchException { + public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source: {}", source); @@ -686,7 +684,7 @@ public class IndexShard extends AbstractIndexShardComponent { return completionStats; } - public void flush(FlushRequest request) throws ElasticsearchException { + public void flush(FlushRequest request) { // we allows flush while recovering, since we allow for operations to happen // while recovering, and we want to keep the translog at bay (up to deletes, which // we don't gc). @@ -699,7 +697,7 @@ public class IndexShard extends AbstractIndexShardComponent { flushMetric.inc(System.nanoTime() - time); } - public void optimize(OptimizeRequest optimize) throws ElasticsearchException { + public void optimize(OptimizeRequest optimize) { verifyStarted(); if (logger.isTraceEnabled()) { logger.trace("optimize with {}", optimize); @@ -780,7 +778,7 @@ public class IndexShard extends AbstractIndexShardComponent { } /** called before starting to copy index files over */ - public void prepareForIndexRecovery() throws ElasticsearchException { + public void prepareForIndexRecovery() { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -803,14 +801,14 @@ public class IndexShard extends AbstractIndexShardComponent { /** * After the store has been recovered, we need to start the engine in order to apply operations */ - public Map performTranslogRecovery() throws ElasticsearchException { + public Map performTranslogRecovery() { final Map recoveredTypes = internalPerformTranslogRecovery(false); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); return recoveredTypes; } - private Map internalPerformTranslogRecovery(boolean skipTranslogRecovery) throws ElasticsearchException { + private Map internalPerformTranslogRecovery(boolean skipTranslogRecovery) { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -833,7 +831,7 @@ public class IndexShard extends AbstractIndexShardComponent { * the replay of the transaction log which is required in cases where we restore a previous index or recover from * a remote peer. */ - public void skipTranslogRecovery() throws ElasticsearchException { + public void skipTranslogRecovery() { assert engineUnsafe() == null : "engine was already created"; Map recoveredTypes = internalPerformTranslogRecovery(true); assert recoveredTypes.isEmpty(); diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index a97cc450f97..fa4122cabba 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; -import java.lang.IllegalArgumentException; /** * @@ -51,7 +50,7 @@ public enum IndexShardState { return this.id; } - public static IndexShardState fromId(byte id) throws IllegalArgumentException { + public static IndexShardState fromId(byte id) { if (id < 0 || id >= IDS.length) { throw new IllegalArgumentException("No mapping for id [" + id + "]"); } diff --git a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index 64f250678ac..ba7dcdd3976 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 102d565d679..870283017c8 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; diff --git a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index 7d63b2cdd17..b2dbc1022cf 100644 --- a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.cache.IndexCache; @@ -89,7 +88,7 @@ public class TranslogRecoveryPerformer { * Performs a single recovery operation, and returns the indexing operation (or null if its not an indexing operation) * that can then be used for mapping updates (for example) if needed. */ - public void performRecoveryOperation(Engine engine, Translog.Operation operation) throws ElasticsearchException { + public void performRecoveryOperation(Engine engine, Translog.Operation operation) { try { switch (operation.opType()) { case CREATE: diff --git a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java index 48c5dc6f521..566016b3da1 100644 --- a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; /** diff --git a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java index 9c81bf0415a..6d30e81c091 100644 --- a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; diff --git a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java index 68eaccc741b..4741247080c 100644 --- a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java index 4a1c4578cdb..e2bc92adb3f 100644 --- a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java +++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.inject.assistedinject.FactoryProvider; diff --git a/src/main/java/org/elasticsearch/index/store/IndexStore.java b/src/main/java/org/elasticsearch/index/store/IndexStore.java index 0cae510f87b..cfef4f594be 100644 --- a/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -99,7 +99,7 @@ public class IndexStore extends AbstractIndexComponent implements Closeable { } @Override - public void close() throws ElasticsearchException { + public void close() { settingsService.removeListener(applySettings); } diff --git a/src/main/java/org/elasticsearch/index/store/Store.java b/src/main/java/org/elasticsearch/index/store/Store.java index 166bd7d96c3..71dd77c690e 100644 --- a/src/main/java/org/elasticsearch/index/store/Store.java +++ b/src/main/java/org/elasticsearch/index/store/Store.java @@ -27,7 +27,6 @@ import org.apache.lucene.index.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java index bcf707c795b..63970ab3aaf 100644 --- a/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,8 +23,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java index be2bd5de4b0..bff79bb5dbf 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java @@ -145,7 +145,7 @@ public class FsChannelSnapshot implements Translog.Snapshot { } @Override - public void close() throws ElasticsearchException { + public void close() { if (closed.compareAndSet(false, true)) { channelReference.decRef(); } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 58ce5ab5807..15b76333b7a 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -536,7 +536,7 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog } @Override - public void close() throws ElasticsearchException { + public void close() { try { input.close(); } catch (IOException ex) { diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java index 775c5cd7c47..751bfc3ec5b 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.translog.fs; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -49,7 +48,7 @@ public interface FsTranslogFile extends Closeable { public abstract FsTranslogFile create(ShardId shardId, long id, ChannelReference raf, int bufferSize) throws IOException; - public static Type fromString(String type) throws IllegalArgumentException { + public static Type fromString(String type) { if (SIMPLE.name().equalsIgnoreCase(type)) { return SIMPLE; } else if (BUFFERED.name().equalsIgnoreCase(type)) { diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index 579eca91b72..663fe402729 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -25,8 +25,6 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -124,11 +122,11 @@ public class IndicesService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { ImmutableSet indices = ImmutableSet.copyOf(this.indices.keySet()); final CountDownLatch latch = new CountDownLatch(indices.size()); @@ -160,7 +158,7 @@ public class IndicesService extends AbstractLifecycleComponent i } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { IOUtils.closeWhileHandlingException(injector.getInstance(RecoverySettings.class), indicesAnalysisService); } @@ -280,7 +278,7 @@ public class IndicesService extends AbstractLifecycleComponent i return indexService; } - public synchronized IndexService createIndex(String sIndexName, @IndexSettings Settings settings, String localNodeId) throws ElasticsearchException { + public synchronized IndexService createIndex(String sIndexName, @IndexSettings Settings settings, String localNodeId) { if (!lifecycle.started()) { throw new IllegalStateException("Can't create an index [" + sIndexName + "], node is closed"); } @@ -342,11 +340,11 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) throws ElasticsearchException { + public void removeIndex(String index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) throws ElasticsearchException { + private void removeIndex(String index, String reason, boolean delete) { try { final IndexService indexService; final Injector indexInjector; @@ -751,4 +749,4 @@ public class IndicesService extends AbstractLifecycleComponent i return deleteList.size(); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java b/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java index aaa81a68bec..cf9db03463d 100644 --- a/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java +++ b/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java @@ -57,14 +57,14 @@ public abstract class CircuitBreakerService extends AbstractLifecycleComponent= STAGES.length) { throw new IllegalArgumentException("No mapping for id [" + id + "]"); } @@ -115,7 +113,7 @@ public class RecoveryState implements ToXContent, Streamable { return id; } - public static Type fromId(byte id) throws IllegalArgumentException { + public static Type fromId(byte id) { if (id < 0 || id >= TYPES.length) { throw new IllegalArgumentException("No mapping for id [" + id + "]"); } diff --git a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index 0fc7c209111..bacd084d058 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.common.logging.ESLogger; @@ -46,7 +45,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { } @Override - public void phase1(SnapshotIndexCommit snapshot) throws ElasticsearchException { + public void phase1(SnapshotIndexCommit snapshot) { if (request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary()) { // here we simply fail the primary shard since we can't move them (have 2 writers open at the same time) // by failing the shard we play safe and just go through the entire reallocation procedure of the primary @@ -60,7 +59,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { @Override - protected int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException { + protected int sendSnapshot(Translog.Snapshot snapshot) { logger.trace("{} recovery [phase3] to {}: skipping transaction log operations for file sync", shard.shardId(), request.targetNode()); return 0; } diff --git a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index ab1ba9232aa..f95cfd8ece9 100644 --- a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -110,7 +110,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio } @Override - protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) { if (request.unallocated) { IndexService indexService = indicesService.indexService(request.shardId.index().name()); if (indexService == null) { diff --git a/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index f70a74521b3..9adbeb88280 100644 --- a/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -91,12 +91,12 @@ public class IndicesTTLService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { jvmMonitorService.start(); } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { jvmMonitorService.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { jvmMonitorService.close(); } } diff --git a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 911d6a6445a..861297f3482 100644 --- a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -20,7 +20,6 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.CollectionUtil; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.unit.TimeValue; diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java index 16bcd2077b5..64b0eeeefe3 100644 --- a/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java +++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java @@ -109,7 +109,7 @@ public class JvmMonitorService extends AbstractLifecycleComponent tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); diff --git a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java index 618703af3aa..2f3063a484f 100644 --- a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java @@ -107,7 +107,7 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex { } @Override - public void close() throws ElasticsearchException { + public void close() { try { this.reader().close(); rootDocMemoryIndex.reset(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 958b1a55963..4c79c2164d8 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateShardRequest; @@ -247,7 +246,7 @@ public class PercolatorService extends AbstractComponent { } } - private ParsedDocument parseRequest(IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) throws ElasticsearchException { + private ParsedDocument parseRequest(IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) { BytesReference source = request.source(); if (source == null || source.length() == 0) { return null; diff --git a/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java index 7df3d66e948..165193d35f0 100644 --- a/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java @@ -77,7 +77,7 @@ class SingleDocumentPercolatorIndex implements PercolatorIndex { } @Override - public void close() throws ElasticsearchException { + public void close() { try { this.reader().close(); memoryIndex.reset(); diff --git a/src/main/java/org/elasticsearch/plugins/PluginsService.java b/src/main/java/org/elasticsearch/plugins/PluginsService.java index a260a997ced..3d4b9f86e8a 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -25,7 +25,6 @@ import com.google.common.collect.*; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo; diff --git a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 08454424ab0..e88ff922d72 100644 --- a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; diff --git a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d7018947ac3..180f6595521 100644 --- a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -158,7 +158,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (RestFilter filter : filters) { filter.close(); } diff --git a/src/main/java/org/elasticsearch/rest/RestFilter.java b/src/main/java/org/elasticsearch/rest/RestFilter.java index 398f157d7b5..dd86c026b75 100644 --- a/src/main/java/org/elasticsearch/rest/RestFilter.java +++ b/src/main/java/org/elasticsearch/rest/RestFilter.java @@ -37,7 +37,7 @@ public abstract class RestFilter implements Closeable { } @Override - public void close() throws ElasticsearchException { + public void close() { // a no op } diff --git a/src/main/java/org/elasticsearch/rest/RestRequest.java b/src/main/java/org/elasticsearch/rest/RestRequest.java index d54195068be..6f2fd926ab9 100644 --- a/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.ContextHolder; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 9c697afdfe1..2019b71426a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices.alias; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.Client; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java index 0e0e2736b0d..4965f6b218d 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.rest.action.admin.indices.alias.put; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 77fd1b89fd3..4a5e47b9664 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices.analyze; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.client.Client; @@ -84,7 +83,7 @@ public class RestAnalyzeAction extends BaseRestHandler { client.admin().indices().analyze(analyzeRequest, new RestToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) throws IllegalArgumentException { + public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malforrmed content, must start with an object"); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index 678965adc85..7e4e56710b7 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.rest.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; -import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -162,4 +161,4 @@ public class RestGetIndicesAction extends BaseRestHandler { static final XContentBuilderString WARMERS = new XContentBuilderString("warmers"); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index a5d5b89d1a0..58dcfa50973 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.count; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java index aeb903aafd2..ce306c6563d 100644 --- a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java +++ b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.rest.action.explain; import org.apache.lucene.search.Explanation; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.support.QuerySourceBuilder; diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index c519238ea8c..d95ef3e9498 100644 --- a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.index; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; diff --git a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java index 46504cbf6bf..d63a39ac555 100644 --- a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.rest.action.script; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index 4fc2c2f00ad..5efd2584b38 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.client.Client; @@ -78,7 +77,7 @@ public class RestClearScrollAction extends BaseRestHandler { return Strings.splitStringByCommaToArray(scrollIds); } - public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) throws IllegalArgumentException { + public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malformed content, must start with an object"); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index b5037204639..9c3e85a2e94 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 86549b93fad..15de56265bc 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; @@ -83,7 +82,7 @@ public class RestSearchScrollAction extends BaseRestHandler { client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) throws IllegalArgumentException { + public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malforrmed content, must start with an object"); diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 4af6e7b59e6..9f9c3946b36 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.rest.action.suggest; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.support.IndicesOptions; diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 4a17a1b17dc..8bd59f1e8a9 100644 --- a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.support; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; diff --git a/src/main/java/org/elasticsearch/river/RiversManager.java b/src/main/java/org/elasticsearch/river/RiversManager.java index 30d67d4cc2f..42d7021a5f3 100644 --- a/src/main/java/org/elasticsearch/river/RiversManager.java +++ b/src/main/java/org/elasticsearch/river/RiversManager.java @@ -46,21 +46,21 @@ public class RiversManager extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { riversRouter.start(); riversService.start(); clusterService.start(); } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { riversRouter.stop(); clusterService.stop(); riversService.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { riversRouter.close(); clusterService.close(); riversService.close(); diff --git a/src/main/java/org/elasticsearch/river/RiversService.java b/src/main/java/org/elasticsearch/river/RiversService.java index 0d7863a6468..ed7369d8ad0 100644 --- a/src/main/java/org/elasticsearch/river/RiversService.java +++ b/src/main/java/org/elasticsearch/river/RiversService.java @@ -88,11 +88,11 @@ public class RiversService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { ImmutableSet indices = ImmutableSet.copyOf(this.rivers.keySet()); final CountDownLatch latch = new CountDownLatch(indices.size()); for (final RiverName riverName : indices) { @@ -117,10 +117,10 @@ public class RiversService extends AbstractLifecycleComponent { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } - public synchronized void createRiver(RiverName riverName, Map settings) throws ElasticsearchException { + public synchronized void createRiver(RiverName riverName, Map settings) { if (riversInjectors.containsKey(riverName)) { logger.warn("ignoring river [{}][{}] creation, already exists", riverName.type(), riverName.name()); return; @@ -182,7 +182,7 @@ public class RiversService extends AbstractLifecycleComponent { } } - public synchronized void closeRiver(RiverName riverName) throws ElasticsearchException { + public synchronized void closeRiver(RiverName riverName) { Injector riverInjector; River river; synchronized (this) { diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java index 49488c889f2..11a0350a40d 100644 --- a/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java +++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java @@ -59,12 +59,12 @@ public class RiverClusterService extends AbstractLifecycleComponent imple } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index e91567d8e3e..70bf27b82e4 100644 --- a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -22,7 +22,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -108,4 +107,4 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri public void scriptRemoved(CompiledScript script) { // Nothing to do here } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/script/Script.java b/src/main/java/org/elasticsearch/script/Script.java index 23cc02f529e..655ff82c08e 100644 --- a/src/main/java/org/elasticsearch/script/Script.java +++ b/src/main/java/org/elasticsearch/script/Script.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import java.util.Map; -import java.lang.IllegalArgumentException; import static org.elasticsearch.script.ScriptService.ScriptType; diff --git a/src/main/java/org/elasticsearch/script/ScriptContext.java b/src/main/java/org/elasticsearch/script/ScriptContext.java index 7fbaaf84684..a12fc85a53c 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContext.java +++ b/src/main/java/org/elasticsearch/script/ScriptContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; /** diff --git a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java index 643a06ad4bf..614e41e1c11 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java +++ b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import java.util.Map; diff --git a/src/main/java/org/elasticsearch/script/ScriptMode.java b/src/main/java/org/elasticsearch/script/ScriptMode.java index b7fb7474e73..b35dda716e4 100644 --- a/src/main/java/org/elasticsearch/script/ScriptMode.java +++ b/src/main/java/org/elasticsearch/script/ScriptMode.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Booleans; import java.util.Locale; diff --git a/src/main/java/org/elasticsearch/script/ScriptModes.java b/src/main/java/org/elasticsearch/script/ScriptModes.java index 36c3c457697..7411e3a0592 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModes.java +++ b/src/main/java/org/elasticsearch/script/ScriptModes.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService.ScriptType; diff --git a/src/main/java/org/elasticsearch/script/ScriptModule.java b/src/main/java/org/elasticsearch/script/ScriptModule.java index 92f15cf2f65..2cc12e13d85 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index 53173e639ca..8e363068c30 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -27,8 +27,6 @@ import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java index d37efc68201..9f4067a5997 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Scorer; -import java.lang.IllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.SearchScript; diff --git a/src/main/java/org/elasticsearch/search/MultiValueMode.java b/src/main/java/org/elasticsearch/search/MultiValueMode.java index 30f2d7504ab..839d4714dbe 100644 --- a/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; diff --git a/src/main/java/org/elasticsearch/search/SearchPhase.java b/src/main/java/org/elasticsearch/search/SearchPhase.java index 30013b5471c..bdab128fecb 100644 --- a/src/main/java/org/elasticsearch/search/SearchPhase.java +++ b/src/main/java/org/elasticsearch/search/SearchPhase.java @@ -36,5 +36,5 @@ public interface SearchPhase { */ void preProcess(SearchContext context); - void execute(SearchContext context) throws ElasticsearchException; + void execute(SearchContext context); } diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index dfbfb84e0fe..369c2cb499b 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -29,7 +29,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchType; @@ -193,23 +192,23 @@ public class SearchService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { for (final SearchContext context : activeContexts.values()) { freeContext(context.id()); } } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { doStop(); FutureUtils.cancel(keepAliveReaper); } - public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws ElasticsearchException { + public DfsSearchResult executeDfsPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); try { contextProcessing(context); @@ -225,7 +224,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResult executeScan(ShardSearchRequest request) throws ElasticsearchException { + public QuerySearchResult executeScan(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); final int originalSize = context.size(); try { @@ -256,7 +255,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -297,7 +296,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws ElasticsearchException { + public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); try { context.indexShard().searchService().onPreQueryPhase(context); @@ -328,7 +327,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); try { context.indexShard().searchService().onPreQueryPhase(context); @@ -349,7 +348,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticsearchException { + public QuerySearchResult executeQueryPhase(QuerySearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -381,7 +380,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws ElasticsearchException { + public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); contextProcessing(context); try { @@ -419,7 +418,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticsearchException { + public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -464,7 +463,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -503,7 +502,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public FetchSearchResult executeFetchPhase(ShardFetchRequest request) throws ElasticsearchException { + public FetchSearchResult executeFetchPhase(ShardFetchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -540,7 +539,7 @@ public class SearchService extends AbstractLifecycleComponent { return context; } - final SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException { + final SearchContext createAndPutContext(ShardSearchRequest request) { SearchContext context = createContext(request, null); boolean success = false; try { @@ -555,7 +554,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws ElasticsearchException { + final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(request.shardId()); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 118b37e386d..a687eb23bd7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -93,7 +93,7 @@ public class AggregationPhase implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { if (context.aggregations() == null) { context.queryResult().aggregations(null); return; diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 08c9cef997d..99aa5f6aefd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 091c1361634..ceefcae41b6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -25,7 +25,6 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 035db21d59d..f7c4fc9809b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import java.util.List; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index f4e2c169c9c..8a379d1ad82 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; -import java.lang.IllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongHash; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java index 51b7e968f0f..95bc28ef35a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index 9f0ce6c3c33..0b51a53be81 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -74,7 +73,7 @@ public abstract class DeferringBucketCollector extends BucketCollector { } @Override - public void close() throws ElasticsearchException { + public void close() { in.close(); } @@ -128,4 +127,4 @@ public abstract class DeferringBucketCollector extends BucketCollector { } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index a212d107083..29f39539293 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.bucket; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 0c70f787247..2f1864458d7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; -import java.lang.IllegalStateException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.util.LongArray; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java index 557b6b85971..9f382d86906 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 36b32d08d39..da2f27fe852 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.global; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalStateException; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index fb1d2973201..518240cd1cf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -22,7 +22,6 @@ import com.google.common.collect.Lists; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java index 5a657154684..be81fc33719 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java index 3f967400d9b..9774dddd391 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java index 750b6740260..c813a4ec084 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java index 6986fef2587..407ba512f7f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java @@ -27,7 +27,6 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 87897b12e8f..d4a078c2882 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index adf97c5a65e..8f84be12770 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.FilterableTermsEnum; @@ -253,7 +252,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - public void close() throws ElasticsearchException { + public void close() { try { if (termsEnum instanceof Releasable) { ((Releasable) termsEnum).close(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java index be5de56ec4a..961ca04872b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.significant; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index c50893877de..a8f84c8070a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index f57b1d5ba78..4f12277ca0f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregation; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 93985a39d22..b1e67ca87c2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -22,7 +22,6 @@ import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; -import java.lang.IllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 3d6c47b63b8..7207c67e742 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.search.IndexSearcher; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java index adf05d96bae..1125abdc826 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.util.automaton.RegExp; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.ValuesSourceAggregationBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java index 6f92bb85970..c0488011a35 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java @@ -34,7 +34,6 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index e6659358b64..5fac71fc6f7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.metrics; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.List; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java index e4c2acce93c..c241d690765 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java @@ -188,7 +188,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { // no-op } } @@ -218,7 +218,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { // no-op } @@ -295,7 +295,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(visitedOrds); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index 38661326818..b21bd5224c3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -406,7 +406,7 @@ public final class HyperLogLogPlusPlus implements Releasable { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(runLens, hashSet.sizes); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index 972ab7eacd5..1d32f7b52eb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.geobounds; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index 31bc8d8ac27..87f12155e30 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index d1508174a64..6a076678d4f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.scripted; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java index d0af5fa671b..ca8168bc87c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index b5863d8e352..b8cca425ab4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.apache.lucene.index.LeafReaderContext; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index 9bc2b42f2a5..562a6738667 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; -import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index db4b9590036..14d6c5e4a7b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.support; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationExecutionException; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java index 7f42d0b684e..97699c37b21 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.support.format; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index b77f86432ca..892cc2085ae 100644 --- a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -24,7 +24,6 @@ import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index b02ce1584a1..1e072a4916d 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -28,8 +28,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java index cbb8615da7f..92968309fcc 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java @@ -116,9 +116,9 @@ public interface FetchSubPhase { /** * Executes the hit level phase, with a reader and doc id (note, its a low level reader, and the matching doc). */ - void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException; + void hitExecute(SearchContext context, HitContext hitContext); boolean hitsExecutionNeeded(SearchContext context); - void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException; + void hitsExecute(SearchContext context, InternalSearchHit[] hits); } diff --git a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java index 02bdfbe3cd7..026e421a589 100644 --- a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java @@ -47,7 +47,7 @@ public class ExplainFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -56,7 +56,7 @@ public class ExplainFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java index 2fcf23c3ee3..922d8ce11e7 100644 --- a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java @@ -61,7 +61,7 @@ public class FieldDataFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -70,7 +70,7 @@ public class FieldDataFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { for (FieldDataFieldsContext.FieldDataField field : context.fieldDataFields().fields()) { if (hitContext.hit().fieldsOrNull() == null) { hitContext.hit().fields(new HashMap(2)); diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java index 7951e52bc18..3c45148cdfa 100644 --- a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.fetch.fielddata; -import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java index 74d0215ccfb..2a36797fcc8 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java @@ -77,7 +77,7 @@ public class InnerHitsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { Map results = new HashMap<>(); for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { InnerHitsContext.BaseInnerHits innerHits = entry.getValue(); @@ -117,7 +117,7 @@ public class InnerHitsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } // To get around cyclic dependency issue diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java index 931a00df7d5..2642b7d862a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.innerhits; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; diff --git a/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java index 8e6197eae0b..50ed21776ba 100644 --- a/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java @@ -54,7 +54,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -64,7 +64,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { List matchedQueries = Lists.newArrayListWithCapacity(2); try { diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java index b43cf006a60..05ec51efa77 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.script; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.search.SearchHitField; @@ -62,7 +61,7 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -71,7 +70,7 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { for (ScriptFieldsContext.ScriptField scriptField : context.scriptFields().fields()) { LeafSearchScript leafScript; try { diff --git a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java index 76106861c80..445d6801e59 100644 --- a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java @@ -56,7 +56,7 @@ public class FetchSourceSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -65,7 +65,7 @@ public class FetchSourceSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { FetchSourceContext fetchSourceContext = context.fetchSourceContext(); assert fetchSourceContext.fetchSource(); if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { diff --git a/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java index 1c2f7d4c846..6a5264dd625 100644 --- a/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java @@ -49,7 +49,7 @@ public class VersionFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -58,7 +58,7 @@ public class VersionFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { // it might make sense to cache the TermDocs on a shared fetch context and just skip here) // it is going to mean we work on the high level multi reader and not the lower level reader as is // the case below... diff --git a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java index 5a6a934d184..161cdd490f1 100644 --- a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java @@ -22,7 +22,6 @@ import com.google.common.collect.Maps; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.vectorhighlight.*; import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index a80357a18f4..cd3c12591f7 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; @@ -65,7 +64,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -74,7 +73,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { Map highlightFields = newHashMap(); for (SearchContextHighlight.Field field : context.highlight().fields()) { List fieldNamesToHighlight; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java index d05c35988e8..c4d8aa80ef5 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.IndexQueryParserService; diff --git a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index d1de1f5de8a..d7e3580ab4d 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.*; import org.apache.lucene.util.CollectionUtil; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index 8ad5a060a73..8b227e1d224 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.postingshighlight.WholeBreakIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.UnicodeUtil; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.text.StringText; diff --git a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index e4b7070b8c3..f0839688761 100644 --- a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -207,7 +207,7 @@ public class DefaultSearchContext extends SearchContext { } @Override - public void doClose() throws ElasticsearchException { + public void doClose() { if (scanContext != null) { scanContext.clear(); } diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index 4c7a1906723..7ae314e4756 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.lookup; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ScriptDocValues; diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index ccc81a1b684..52e0872742a 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; diff --git a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java index 1d39b432008..fc4b64fe9ac 100644 --- a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.query; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; diff --git a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 9f63a59cd6b..f83a6030e7a 100644 --- a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -24,8 +24,6 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.ParsedQuery; diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java index ecabfe6856f..7f9f2725fbc 100644 --- a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java +++ b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.rescore; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; diff --git a/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 1c8f8bab840..88d2b0aae60 100644 --- a/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -54,7 +54,7 @@ public class RescorePhase extends AbstractComponent implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { try { TopDocs topDocs = context.queryResult().topDocs(); for (RescoreSearchContext ctx : context.rescore()) { diff --git a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index fa8fb679097..9e3f86dcf72 100644 --- a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.sort; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index e78d90be38f..4993c426629 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index bb7656a9ebc..4723f427dbb 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lucene.search.Queries; diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/src/main/java/org/elasticsearch/search/suggest/Suggest.java index f4057cacc0c..8a1f5f12636 100644 --- a/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index 652d7e3a7ed..5399820dd94 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.lang.IllegalArgumentException; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index 2a25215eb9a..edfe04108a0 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java index 05023ba0f42..209c7b68ce7 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java @@ -66,7 +66,7 @@ public class SuggestPhase extends AbstractComponent implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { final SuggestionSearchContext suggest = context.suggest(); if (suggest == null) { return; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java index c28201bfd92..aaa37ecbdfe 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java @@ -29,7 +29,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.FastCharArrayReader; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index 59aecba1e06..2cb36f53914 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import java.util.LinkedHashMap; import java.util.Map; diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java index 88be9d2a22f..5ffe9501dc9 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java @@ -44,7 +44,6 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import java.lang.IllegalStateException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java index bbf65de6a45..05a14291cad 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.suggest.completion; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 9f615eb9f6d..d97f7cf44e5 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.spell.SuggestWord; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; -import java.lang.IllegalArgumentException; import org.elasticsearch.search.suggest.SuggestUtils; import java.io.IOException; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 18e974336b5..7905d538848 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index a66c715f3bc..ef320eb60e5 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.suggest.phrase; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java index cc20d356e0a..7115b7a629a 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionContext.java @@ -25,7 +25,6 @@ import java.util.Map; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; @@ -230,4 +229,4 @@ class PhraseSuggestionContext extends SuggestionContext { return prune; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index ce85a652064..9557715bcb9 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate; diff --git a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java index b16937bb417..94ef1f6b393 100644 --- a/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest.term; import java.io.IOException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.search.suggest.DirectSpellcheckerSettings; diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotState.java b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java index 41eaa271604..b893a372d13 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotState.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import java.lang.IllegalArgumentException; /** * Represents the state that a snapshot can be in diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 54693726d6d..bc00f9c7f18 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -247,7 +247,7 @@ public class SnapshotsService extends AbstractLifecycleComponent extends BaseFutur } @Override - public V txGet() throws ElasticsearchException { + public V txGet() { try { return get(); } catch (InterruptedException e) { @@ -56,7 +55,7 @@ public class PlainTransportFuture extends BaseFutur } @Override - public V txGet(long timeout, TimeUnit unit) throws ElasticsearchException { + public V txGet(long timeout, TimeUnit unit) { try { return get(timeout, unit); } catch (TimeoutException e) { diff --git a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 10be853ccc7..2b8caf8f055 100644 --- a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import java.lang.IllegalStateException; import java.lang.reflect.Constructor; diff --git a/src/main/java/org/elasticsearch/transport/TransportFuture.java b/src/main/java/org/elasticsearch/transport/TransportFuture.java index 1b330d2b1dc..c4bfcb7afea 100644 --- a/src/main/java/org/elasticsearch/transport/TransportFuture.java +++ b/src/main/java/org/elasticsearch/transport/TransportFuture.java @@ -33,12 +33,12 @@ public interface TransportFuture extends Future { * Waits if necessary for the computation to complete, and then * retrieves its result. */ - V txGet() throws ElasticsearchException; + V txGet(); /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result, if available. */ - V txGet(long timeout, TimeUnit unit) throws ElasticsearchException; + V txGet(long timeout, TimeUnit unit); } diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java index f8a1e1fb544..0d92d00f144 100644 --- a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java +++ b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.unit.TimeValue; /** diff --git a/src/main/java/org/elasticsearch/transport/TransportService.java b/src/main/java/org/elasticsearch/transport/TransportService.java index bc031d71d9a..7b635d0f851 100644 --- a/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/src/main/java/org/elasticsearch/transport/TransportService.java @@ -21,7 +21,6 @@ package org.elasticsearch.transport; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; @@ -160,7 +159,7 @@ public class TransportService extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { String address = settings.get(TRANSPORT_LOCAL_ADDRESS); if (address == null) { address = Long.toString(transportAddressIdGenerator.incrementAndGet()); @@ -110,7 +110,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { transports.remove(localAddress); // now, go over all the transports connected to me, and raise disconnected event for (final LocalTransport targetTransport : transports.values()) { @@ -123,7 +123,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { ThreadPool.terminate(workers, 10, TimeUnit.SECONDS); } diff --git a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 0a8e7187385..b0689df6f2c 100644 --- a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.netty; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index f098f7d8556..e2ad4efe3f5 100644 --- a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -234,7 +234,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { boolean success = false; try { clientBootstrap = createClientBootstrap(); @@ -487,7 +487,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { final CountDownLatch latch = new CountDownLatch(1); // make sure we run it on another thread than a possible IO handler thread threadPool.generic().execute(new Runnable() { @@ -559,7 +559,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java index 3994cbacaff..51cb9d0ecab 100644 --- a/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -164,7 +164,7 @@ public class TribeService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { for (Node node : nodes) { try { node.start(); @@ -186,12 +186,12 @@ public class TribeService extends AbstractLifecycleComponent { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { doClose(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (Node node : nodes) { try { node.close(); diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index a1f99108883..90e37447bb5 100644 --- a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -19,7 +19,6 @@ package org.elasticsearch.watcher; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -92,7 +91,7 @@ public class ResourceWatcherService extends AbstractLifecycleComponent imp } @Override - public Transport start() throws ElasticsearchException { + public Transport start() { return null; } @Override - public Transport stop() throws ElasticsearchException { + public Transport stop() { return null; } @Override - public void close() throws ElasticsearchException { + public void close() { } diff --git a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java index cd393ff88cd..2abb769f748 100644 --- a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java +++ b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java @@ -816,15 +816,15 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java index a91d12fa339..55b4a932c3a 100644 --- a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java +++ b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 7aa06ea0c06..8576e0f19b0 100644 --- a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetaData.State; diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 1c38a22c526..7a1ffaecff6 100644 --- a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 2b782f92d2c..3eb177643cd 100644 --- a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; diff --git a/src/test/java/org/elasticsearch/common/BooleansTests.java b/src/test/java/org/elasticsearch/common/BooleansTests.java index 0869f3ba772..3bacaed7f5a 100644 --- a/src/test/java/org/elasticsearch/common/BooleansTests.java +++ b/src/test/java/org/elasticsearch/common/BooleansTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/src/test/java/org/elasticsearch/common/ParseFieldTests.java index 8897e60e5d7..7b0dacf8500 100644 --- a/src/test/java/org/elasticsearch/common/ParseFieldTests.java +++ b/src/test/java/org/elasticsearch/common/ParseFieldTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; import org.apache.commons.lang3.ArrayUtils; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/common/PidFileTests.java b/src/test/java/org/elasticsearch/common/PidFileTests.java index 1593ffe512c..02f92f5b979 100644 --- a/src/test/java/org/elasticsearch/common/PidFileTests.java +++ b/src/test/java/org/elasticsearch/common/PidFileTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common; import com.google.common.base.Charsets; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/common/TableTests.java b/src/test/java/org/elasticsearch/common/TableTests.java index d9c608881e6..032299cd1b2 100644 --- a/src/test/java/org/elasticsearch/common/TableTests.java +++ b/src/test/java/org/elasticsearch/common/TableTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; -import java.lang.IllegalStateException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java index 66127c0618b..f86f4cf8556 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.collect; import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableMap; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashMap; diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java index 8028781bda6..f489c47bc4a 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.collect; import com.google.common.collect.ImmutableSet; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashSet; diff --git a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java index 4da1582f090..9a06b10d817 100644 --- a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java +++ b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java @@ -27,7 +27,6 @@ import com.spatial4j.core.shape.ShapeCollection; import com.spatial4j.core.shape.jts.JtsGeometry; import com.spatial4j.core.shape.jts.JtsPoint; import com.vividsolutions.jts.geom.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContentFactory; diff --git a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java index 20c327410f8..60d161b4121 100644 --- a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java +++ b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import java.lang.IllegalStateException; import org.elasticsearch.common.recycler.Recycler.V; import org.elasticsearch.test.ElasticsearchTestCase; diff --git a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java index 9bfc5af98f6..5e0ab4f3b08 100644 --- a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java +++ b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.unit; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java index 0f77e83c03a..3e8b6dd8218 100644 --- a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java +++ b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java @@ -818,7 +818,7 @@ public class CountQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2994 - public void testSimpleSpan() throws ElasticsearchException, IOException { + public void testSimpleSpan() throws IOException { createIndex("test"); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index 08ffca233a2..169dbdbe4a2 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; diff --git a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 31045dcb451..f84457dab3b 100644 --- a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.env; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java index ef845b08885..97231bec442 100644 --- a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.fieldstats; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStatsResponse; diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index 1eb92f203da..ed160d92b32 100644 --- a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -29,7 +29,6 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; diff --git a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java index c626dc1c966..78d03746b93 100644 --- a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java +++ b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java @@ -137,7 +137,7 @@ public class NettyHttpServerPipeliningTest extends ElasticsearchTestCase { } @Override - public HttpServerTransport stop() throws ElasticsearchException { + public HttpServerTransport stop() { executorService.shutdownNow(); return super.stop(); } diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java index 3db8c5039b7..78f2bd5077c 100644 --- a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.FailedToResolveConfigException; diff --git a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java index 1500377fc18..7b66c4f41e6 100644 --- a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.*; import org.apache.lucene.analysis.reverse.ReverseStringFilter; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings.Builder; diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java index 0aceb27bc02..996471a205c 100644 --- a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.analysis; -import java.lang.IllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Injector; diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java index 2e7e09f4ab9..2792f0c4150 100644 --- a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis.commongrams; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalysisService; diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java index b4f074498a3..1dd88a83a1e 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.LiveIndexWriterConfig; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java index c8760ddd561..ffbe671580f 100644 --- a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java @@ -67,7 +67,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to get the token count in a search response. */ @Test - public void searchReturnsTokenCount() throws ElasticsearchException, IOException { + public void searchReturnsTokenCount() throws IOException { init(); assertSearchReturns(searchById("single"), "single"); @@ -82,7 +82,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to search by token count. */ @Test - public void searchByTokenCount() throws ElasticsearchException, IOException { + public void searchByTokenCount() throws IOException { init(); assertSearchReturns(searchByNumericRange(4, 4).get(), "single"); @@ -96,7 +96,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to search by token count. */ @Test - public void facetByTokenCount() throws ElasticsearchException, IOException { + public void facetByTokenCount() throws IOException { init(); String facetField = randomFrom(ImmutableList.of( @@ -109,7 +109,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat assertThat(terms.getBuckets().size(), equalTo(9)); } - private void init() throws ElasticsearchException, IOException { + private void init() throws IOException { prepareCreate("test").addMapping("test", jsonBuilder().startObject() .startObject("test") .startObject("properties") diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index eda3d9fd9c8..b4ecfeeb220 100644 --- a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.QueryWrapperFilter; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LocaleUtils; @@ -433,4 +432,4 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { .bytes()); assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(43000L)); } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java index f97bb8e7b80..8923d95f405 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper.ip; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; diff --git a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java index 41c98311b8e..00cca381db2 100644 --- a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java +++ b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 03ed89f4c79..d37e5cf31c0 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -64,7 +64,6 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -2441,7 +2440,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { } // https://github.com/elasticsearch/elasticsearch/issues/6722 - public void testEmptyBoolSubClausesIsMatchAll() throws ElasticsearchException, IOException { + public void testEmptyBoolSubClausesIsMatchAll() throws IOException { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json"); IndexService indexService = createIndex("testidx", client().admin().indices().prepareCreate("testidx") .addMapping("foo") diff --git a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java index ad5b2b1d389..27d8641789a 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.search.child; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 42761ca7a04..e17721a0f00 100644 --- a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.shard; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.ShardRouting; diff --git a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index eed8769475d..ccf87f2efef 100644 --- a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import java.lang.IllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index c0dd7d1b834..4c422448ede 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; diff --git a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java index 67ca29e9461..415d806278f 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java +++ b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices; import org.apache.lucene.store.LockObtainFailedException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java index f65aeb64b2c..bf169d254e6 100644 --- a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java +++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analyze; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; @@ -73,7 +72,7 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { } @Test - public void analyzeNumericField() throws ElasticsearchException, IOException { + public void analyzeNumericField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "long", "type=long", "double", "type=double")); ensureGreen("test"); diff --git a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java index 6b42cbad84b..cc8e7c5fae8 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java +++ b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.recovery; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java index 8653c16da3e..a12e35a92ee 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.settings; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.count.CountResponse; diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 263a3394044..fad19ed7b28 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -23,7 +23,6 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; diff --git a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java index a4794649c8f..b2308b15a8c 100644 --- a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java +++ b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.state; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 1d120ca9f7c..ce96576ce15 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.template; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java index 37fe6aa04fe..21fa628f2a8 100644 --- a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java +++ b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java @@ -23,7 +23,6 @@ import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; diff --git a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java index e8501c40d1f..dc58961cc4b 100644 --- a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java +++ b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java @@ -445,7 +445,7 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { } @Override - public void close() throws ElasticsearchException { + public void close() { } diff --git a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java index ef774983872..9b2b11fddad 100644 --- a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.routing; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java index 7041be12e36..b2acd54a373 100644 --- a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java +++ b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; diff --git a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java index ec28c3b90b0..b96391682fe 100644 --- a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; -import java.lang.IllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 96cc6fda02e..f0051d188bb 100644 --- a/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.script; import com.google.common.collect.*; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.script.ScriptService.ScriptType; diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0be2f2a80b5..69aa8685835 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import java.lang.IllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.ImmutableSettings; diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index e4b226db3f0..16ce8a964dc 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.util.BytesRef; -import java.lang.IllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.InputStreamStreamInput; diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index d2f2a905440..c8cc0921bee 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.child; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -180,7 +179,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { @Test // see #6722 - public void test6722() throws ElasticsearchException, IOException { + public void test6722() throws IOException { assertAcked(prepareCreate("test") .addMapping("foo") .addMapping("test", "_parent", "type=foo")); @@ -199,7 +198,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { @Test // see #2744 - public void test2744() throws ElasticsearchException, IOException { + public void test2744() throws IOException { assertAcked(prepareCreate("test") .addMapping("foo") .addMapping("test", "_parent", "type=foo")); @@ -1341,7 +1340,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - public void testHasChildNotBeingCached() throws ElasticsearchException, IOException { + public void testHasChildNotBeingCached() throws IOException { assertAcked(prepareCreate("test") .addMapping("parent") .addMapping("child", "_parent", "type=parent")); @@ -1457,7 +1456,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - public void indexChildDocWithNoParentMapping() throws ElasticsearchException, IOException { + public void indexChildDocWithNoParentMapping() throws IOException { assertAcked(prepareCreate("test") .addMapping("parent") .addMapping("child1")); @@ -1481,7 +1480,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - public void testAddingParentToExistingMapping() throws ElasticsearchException, IOException { + public void testAddingParentToExistingMapping() throws IOException { createIndex("test"); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java index 50e693c2c3e..bd6052289c6 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.search.functionscore; -import java.lang.IllegalArgumentException; -import java.lang.IllegalStateException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; diff --git a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java index 9de55b2a304..b67a5c07494 100644 --- a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java @@ -98,7 +98,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { @Test // see #3486 - public void testHighTermFrequencyDoc() throws ElasticsearchException, IOException { + public void testHighTermFrequencyDoc() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no"))); ensureYellow(); @@ -115,7 +115,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlightingWithBrokenPositions() throws ElasticsearchException, IOException { + public void testNgramHighlightingWithBrokenPositions() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", jsonBuilder() .startObject() @@ -171,7 +171,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testMultiPhraseCutoff() throws ElasticsearchException, IOException { + public void testMultiPhraseCutoff() throws IOException { /* * MultiPhraseQuery can literally kill an entire node if there are too many terms in the * query. We cut off and extract terms if there are more than 16 terms in the query @@ -203,7 +203,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlightingPreLucene42() throws ElasticsearchException, IOException { + public void testNgramHighlightingPreLucene42() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", @@ -271,7 +271,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlighting() throws ElasticsearchException, IOException { + public void testNgramHighlighting() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", "name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets", @@ -1301,7 +1301,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testBoostingQueryTermVector() throws ElasticsearchException, IOException { + public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") @@ -1338,7 +1338,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testCommonTermsTermVector() throws ElasticsearchException, IOException { + public void testCommonTermsTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); @@ -1354,7 +1354,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPhrasePrefix() throws ElasticsearchException, IOException { + public void testPhrasePrefix() throws IOException { Builder builder = settingsBuilder() .put(indexSettings()) .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") @@ -2289,7 +2289,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPostingsHighlighterBoostingQuery() throws ElasticsearchException, IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") @@ -2306,7 +2306,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPostingsHighlighterCommonTermsQuery() throws ElasticsearchException, IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java index c1d8e32c517..85e20e7fcbc 100644 --- a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java +++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.preference; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index 9d3b887fd19..6a575fc38c7 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -1460,7 +1460,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2926 - public void testMustNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testMustNot() throws IOException, ExecutionException, InterruptedException { assertAcked(prepareCreate("test") //issue manifested only with shards>=2 .setSettings(SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS))); @@ -1483,7 +1483,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2994 - public void testSimpleSpan() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testSimpleSpan() throws IOException, ExecutionException, InterruptedException { createIndex("test"); ensureGreen(); @@ -1505,7 +1505,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSpanMultiTermQuery() throws ElasticsearchException, IOException { + public void testSpanMultiTermQuery() throws IOException { createIndex("test"); ensureGreen(); @@ -1538,7 +1538,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSpanNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testSpanNot() throws IOException, ExecutionException, InterruptedException { createIndex("test"); ensureGreen(); @@ -1577,7 +1577,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSimpleDFSQuery() throws ElasticsearchException, IOException { + public void testSimpleDFSQuery() throws IOException { assertAcked(prepareCreate("test") .addMapping("s", jsonBuilder() .startObject() diff --git a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java index f16a6d2907c..21dd1f3a1f8 100644 --- a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java +++ b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.scroll; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.search.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java index 5911985cc26..f95a1422786 100644 --- a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java +++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.simple; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java index 7972df64741..457c9278222 100644 --- a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java +++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java @@ -214,7 +214,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } } - public void testRandomSorting() throws ElasticsearchException, IOException, InterruptedException, ExecutionException { + public void testRandomSorting() throws IOException, InterruptedException, ExecutionException { Random random = getRandom(); assertAcked(prepareCreate("test") .addMapping("type", @@ -1023,7 +1023,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } @Test @Slow - public void testSortMissingStrings() throws ElasticsearchException, IOException { + public void testSortMissingStrings() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder() .startObject() @@ -1458,7 +1458,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } @Test - public void testSortOnRareField() throws ElasticsearchException, IOException { + public void testSortOnRareField() throws IOException { assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -1626,7 +1626,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { * Test case for issue 6150: https://github.com/elasticsearch/elasticsearch/issues/6150 */ @Test - public void testNestedSort() throws ElasticsearchException, IOException, InterruptedException, ExecutionException { + public void testNestedSort() throws IOException, InterruptedException, ExecutionException { assertAcked(prepareCreate("test") .addMapping("type", XContentFactory.jsonBuilder() diff --git a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java index 336903e67f1..a1f95a229cd 100644 --- a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import com.google.common.collect.Sets; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestRequestBuilder; diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java index 47802e9d848..86d979df31c 100644 --- a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java @@ -23,7 +23,6 @@ import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.*; @@ -410,7 +409,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { } @Test // see #2817 - public void testStopwordsOnlyPhraseSuggest() throws ElasticsearchException, IOException { + public void testStopwordsOnlyPhraseSuggest() throws IOException { assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=string,analyzer=stopwd").setSettings( settingsBuilder() .put("index.analysis.analyzer.stopwd.tokenizer", "whitespace") @@ -428,7 +427,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPrefixLength() throws ElasticsearchException, IOException { // Stopped here + public void testPrefixLength() throws IOException { // Stopped here CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(SETTING_NUMBER_OF_SHARDS, 1) .put("index.analysis.analyzer.reverse.tokenizer", "standard") @@ -473,7 +472,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { @Test @Slow @Nightly - public void testMarvelHerosPhraseSuggest() throws ElasticsearchException, IOException { + public void testMarvelHerosPhraseSuggest() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(indexSettings()) .put("index.analysis.analyzer.reverse.tokenizer", "standard") @@ -669,7 +668,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { @Test @Nightly - public void testPhraseBoundaryCases() throws ElasticsearchException, IOException { + public void testPhraseBoundaryCases() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) // to get reliable statistics we should put this all into one shard .put("index.analysis.analyzer.body.tokenizer", "standard") @@ -876,7 +875,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { * score during the reduce phase. Failures don't occur every time - maybe two out of five tries but we don't repeat it to save time. */ @Test - public void testSearchForRarePhrase() throws ElasticsearchException, IOException { + public void testSearchForRarePhrase() throws IOException { // If there isn't enough chaf per shard then shards can become unbalanced, making the cutoff recheck this is testing do more harm then good. int chafPerShard = 100; diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index a3abd47e913..0ca85672ec9 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; -import java.lang.IllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index fe2eb17f654..af3fc057d29 100644 --- a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -123,7 +123,7 @@ public class MockRepository extends FsRepository { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { unblock(); super.doStop(); } diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 9777b57e3bf..5d2d00c4870 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -38,7 +38,6 @@ import com.google.common.util.concurrent.SettableFuture; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java index e6b5967f2c9..c8d48521b14 100644 --- a/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/TestCluster.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectArrayList; -import java.lang.IllegalArgumentException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/src/test/java/org/elasticsearch/test/TestSearchContext.java b/src/test/java/org/elasticsearch/test/TestSearchContext.java index 77448544b1c..e1ccd525546 100644 --- a/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -590,7 +590,7 @@ public class TestSearchContext extends SearchContext { } @Override - public void doClose() throws ElasticsearchException { + public void doClose() { } @Override diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java index b7c2d07d4d2..6613a786449 100644 --- a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java +++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java @@ -78,7 +78,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler { return new V() { @Override - public void close() throws ElasticsearchException { + public void close() { final Throwable t = ACQUIRED_PAGES.remove(v); if (t == null) { throw new IllegalStateException("Releasing a page that has not been acquired"); diff --git a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java index 5c604592853..18f712e7259 100644 --- a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java +++ b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.cluster; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; @@ -152,17 +151,17 @@ public class NoopClusterService implements ClusterService { } @Override - public ClusterService start() throws ElasticsearchException { + public ClusterService start() { return null; } @Override - public ClusterService stop() throws ElasticsearchException { + public ClusterService stop() { return null; } @Override - public void close() throws ElasticsearchException { + public void close() { } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java index 22e6f2eecdf..6a61665e355 100644 --- a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.cluster; import org.elasticsearch.ElasticsearchException; -import java.lang.IllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; @@ -209,17 +208,17 @@ public class TestClusterService implements ClusterService { } @Override - public ClusterService start() throws ElasticsearchException { + public ClusterService start() { throw new UnsupportedOperationException(); } @Override - public ClusterService stop() throws ElasticsearchException { + public ClusterService stop() { throw new UnsupportedOperationException(); } @Override - public void close() throws ElasticsearchException { + public void close() { throw new UnsupportedOperationException(); } diff --git a/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java b/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java index d73f501a3ab..4d74b049612 100644 --- a/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java +++ b/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java @@ -41,6 +41,6 @@ public class NoMergePolicyProvider extends AbstractMergePolicyProvider Date: Wed, 29 Apr 2015 10:53:33 +0200 Subject: [PATCH 188/236] added missing { --- .../java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 988152f4726..9f69e83edfe 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -93,7 +93,7 @@ public class IpFieldMapper extends NumberFieldMapper { } return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) + (Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]); - } catch (Exception e) + } catch (Exception e) { if (e instanceof IllegalArgumentException) { throw (IllegalArgumentException) e; } From 11bf7074d77dcfb5ea9b9916829c0f53361e8ce6 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Wed, 29 Apr 2015 11:37:32 +0200 Subject: [PATCH 189/236] Remove Preconditions class We don't need our own anymore, since we removed the Elasticsearch illegal argument exception --- .../cluster/metadata/IndexMetaData.java | 2 +- .../elasticsearch/common/Preconditions.java | 289 ------------------ .../common/collect/CopyOnWriteHashMap.java | 2 +- .../org/elasticsearch/common/io/Streams.java | 4 +- .../common/property/PropertyPlaceholder.java | 4 +- .../elasticsearch/common/unit/Fuzziness.java | 2 +- .../elasticsearch/common/unit/SizeValue.java | 2 +- .../common/util/CollectionUtils.java | 2 +- .../gateway/MetaDataStateFormat.java | 2 +- .../elasticsearch/http/HttpServerModule.java | 8 +- .../elasticsearch/index/engine/Engine.java | 3 +- .../index/mapper/DocumentMapper.java | 2 +- .../LogByteSizeMergePolicyProvider.java | 3 +- .../policy/LogDocMergePolicyProvider.java | 3 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../transport/TransportModule.java | 13 +- 16 files changed, 22 insertions(+), 321 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/common/Preconditions.java diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b9713727d6e..d6bcacf1615 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.HashFunction; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; diff --git a/src/main/java/org/elasticsearch/common/Preconditions.java b/src/main/java/org/elasticsearch/common/Preconditions.java deleted file mode 100644 index 0dee3753403..00000000000 --- a/src/main/java/org/elasticsearch/common/Preconditions.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common; - -import org.elasticsearch.ElasticsearchNullPointerException; - -import java.util.NoSuchElementException; - -/** - * Simple static methods to be called at the start of your own methods to verify - * correct arguments and state. This allows constructs such as - *

    - *     if (count <= 0) {
    - *       throw new ElasticsearchIllegalArgumentException("must be positive: " + count);
    - *     }
    - * - * to be replaced with the more compact - *
    - *     checkArgument(count > 0, "must be positive: %s", count);
    - * - * Note that the sense of the expression is inverted; with {@code Preconditions} - * you declare what you expect to be true, just as you do with an - * - * {@code assert} or a JUnit {@code assertTrue()} call. - * - *

    Take care not to confuse precondition checking with other similar types - * of checks! Precondition exceptions -- including those provided here, but also - * {@link IndexOutOfBoundsException}, {@link NoSuchElementException}, {@link - * UnsupportedOperationException} and others -- are used to signal that the - * calling method has made an error. This tells the caller that it should - * not have invoked the method when it did, with the arguments it did, or - * perhaps ever. Postcondition or other invariant failures should not - * throw these types of exceptions. - * - *

    Note: The methods of the {@code Preconditions} class are highly - * unusual in one way: they are supposed to throw exceptions, and promise - * in their specifications to do so even when given perfectly valid input. That - * is, {@code null} is a valid parameter to the method {@link - * #checkNotNull(Object)} -- and technically this parameter could be even marked - * as Nullable -- yet the method will still throw an exception anyway, - * because that's what its contract says to do. - * - * - */ -public final class Preconditions { - private Preconditions() { - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @throws IllegalArgumentException - * if {@code expression} is false - */ - public static void checkArgument(boolean expression) { - if (!expression) { - throw new IllegalArgumentException(); - } - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @throws IllegalArgumentException - * if {@code expression} is false - */ - public static void checkArgument(boolean expression, Object errorMessage) { - if (!expression) { - throw new IllegalArgumentException(String.valueOf(errorMessage)); - } - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @throws IllegalArgumentException - * if {@code expression} is false - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if the check fails and either {@code - * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let - * this happen) - */ - public static void checkArgument(boolean expression, - String errorMessageTemplate, Object... errorMessageArgs) { - if (!expression) { - throw new IllegalArgumentException( - format(errorMessageTemplate, errorMessageArgs)); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @throws IllegalStateException - * if {@code expression} is false - */ - public static void checkState(boolean expression) { - if (!expression) { - throw new IllegalStateException(); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @throws IllegalStateException - * if {@code expression} is false - */ - public static void checkState(boolean expression, Object errorMessage) { - if (!expression) { - throw new IllegalStateException(String.valueOf(errorMessage)); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @throws IllegalStateException - * if {@code expression} is false - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if the check fails and either {@code - * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let - * this happen) - */ - public static void checkState(boolean expression, - String errorMessageTemplate, Object... errorMessageArgs) { - if (!expression) { - throw new IllegalStateException( - format(errorMessageTemplate, errorMessageArgs)); - } - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference) { - if (reference == null) { - throw new ElasticsearchNullPointerException(); - } - return reference; - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference, Object errorMessage) { - if (reference == null) { - throw new ElasticsearchNullPointerException(String.valueOf(errorMessage)); - } - return reference; - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference, String errorMessageTemplate, - Object... errorMessageArgs) { - if (reference == null) { - // If either of these parameters is null, the right thing happens anyway - throw new ElasticsearchNullPointerException( - format(errorMessageTemplate, errorMessageArgs)); - } - return reference; - } - - /** - * Substitutes each {@code %s} in {@code template} with an argument. These - * are matched by position - the first {@code %s} gets {@code args[0]}, etc. - * If there are more arguments than placeholders, the unmatched arguments will - * be appended to the end of the formatted message in square braces. - * - * @param template a non-null string containing 0 or more {@code %s} - * placeholders. - * @param args the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. Arguments can be null. - */ - // VisibleForTesting - static String format(String template, Object... args) { - // start substituting the arguments into the '%s' placeholders - StringBuilder builder = new StringBuilder( - template.length() + 16 * args.length); - int templateStart = 0; - int i = 0; - while (i < args.length) { - int placeholderStart = template.indexOf("%s", templateStart); - if (placeholderStart == -1) { - break; - } - builder.append(template.substring(templateStart, placeholderStart)); - builder.append(args[i++]); - templateStart = placeholderStart + 2; - } - builder.append(template.substring(templateStart)); - - // if we run out of placeholders, append the extra args in square braces - if (i < args.length) { - builder.append(" ["); - builder.append(args[i++]); - while (i < args.length) { - builder.append(", "); - builder.append(args[i++]); - } - builder.append("]"); - } - - return builder.toString(); - } -} diff --git a/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index cf5f4ac4f0a..6bd3a5bd45b 100644 --- a/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -19,11 +19,11 @@ package org.elasticsearch.common.collect; +import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.UnmodifiableIterator; import org.apache.commons.lang3.ArrayUtils; import org.apache.lucene.util.mutable.MutableValueInt; -import org.elasticsearch.common.Preconditions; import java.util.*; diff --git a/src/main/java/org/elasticsearch/common/io/Streams.java b/src/main/java/org/elasticsearch/common/io/Streams.java index 63f62015c87..b5f224e72f0 100644 --- a/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/src/main/java/org/elasticsearch/common/io/Streams.java @@ -20,14 +20,12 @@ package org.elasticsearch.common.io; import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.Callback; import java.io.*; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; /** diff --git a/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java index 8c41c1b79c1..5b4515f7b96 100644 --- a/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java +++ b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.property; -import org.elasticsearch.common.Preconditions; +import com.google.common.base.Preconditions; import org.elasticsearch.common.Strings; import java.util.HashSet; @@ -159,7 +159,7 @@ public class PropertyPlaceholder { * * @see PropertyPlaceholder */ - public static interface PlaceholderResolver { + public interface PlaceholderResolver { /** * Resolves the supplied placeholder name into the replacement value. diff --git a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index 7ae494cabb9..a4ac1fda851 100644 --- a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.common.unit; +import com.google.common.base.Preconditions; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; diff --git a/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/src/main/java/org/elasticsearch/common/unit/SizeValue.java index 458c855d833..415326f7069 100644 --- a/src/main/java/org/elasticsearch/common/unit/SizeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/SizeValue.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.unit; +import com.google.common.base.Preconditions; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 76189877ca0..de5171d50dc 100644 --- a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -23,8 +23,8 @@ import com.carrotsearch.hppc.DoubleArrayList; import com.carrotsearch.hppc.FloatArrayList; import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.ObjectArrayList; +import com.google.common.base.Preconditions; import org.apache.lucene.util.*; -import org.elasticsearch.common.Preconditions; import java.util.*; diff --git a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 178b948f093..9d88d84f64a 100644 --- a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.gateway; +import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.Collections2; import org.apache.lucene.codecs.CodecUtil; @@ -27,7 +28,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.*; diff --git a/src/main/java/org/elasticsearch/http/HttpServerModule.java b/src/main/java/org/elasticsearch/http/HttpServerModule.java index e197c3afbfd..df0be6bcf69 100644 --- a/src/main/java/org/elasticsearch/http/HttpServerModule.java +++ b/src/main/java/org/elasticsearch/http/HttpServerModule.java @@ -19,14 +19,12 @@ package org.elasticsearch.http; +import com.google.common.base.Preconditions; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.netty.NettyHttpServerTransport; -import org.elasticsearch.plugins.Plugin; - -import static org.elasticsearch.common.Preconditions.checkNotNull; /** * @@ -60,8 +58,8 @@ public class HttpServerModule extends AbstractModule { } public void setHttpServerTransport(Class httpServerTransport, String source) { - checkNotNull(httpServerTransport, "Configured http server transport may not be null"); - checkNotNull(source, "Plugin, that changes transport may not be null"); + Preconditions.checkNotNull(httpServerTransport, "Configured http server transport may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport may not be null"); this.configuredHttpServerTransport = httpServerTransport; this.configuredHttpServerTransportSource = source; } diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index 612b5cf7a70..7b4f874d410 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import com.google.common.base.Preconditions; import org.apache.lucene.index.*; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; @@ -27,10 +28,8 @@ import org.apache.lucene.search.SearcherManager; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index e38f97f8233..ceab117683e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.lucene.document.Field; @@ -27,7 +28,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java index 4dc5b14b883..d3107fa33d0 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.merge.policy; +import com.google.common.base.Preconditions; import org.apache.lucene.index.LogByteSizeMergePolicy; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java index 03105b1ccda..a46c0f0aa9c 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.merge.policy; +import com.google.common.base.Preconditions; import org.apache.lucene.index.LogDocMergePolicy; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettingsService; diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4e00df55baa..d8e37e40c2e 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.shard; import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.search.Filter; @@ -42,7 +43,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; diff --git a/src/main/java/org/elasticsearch/transport/TransportModule.java b/src/main/java/org/elasticsearch/transport/TransportModule.java index 484c65ec622..773d7d2296e 100644 --- a/src/main/java/org/elasticsearch/transport/TransportModule.java +++ b/src/main/java/org/elasticsearch/transport/TransportModule.java @@ -19,18 +19,15 @@ package org.elasticsearch.transport; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import com.google.common.base.Preconditions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.local.LocalTransport; import org.elasticsearch.transport.netty.NettyTransport; -import static org.elasticsearch.common.Preconditions.checkNotNull; - /** * */ @@ -78,15 +75,15 @@ public class TransportModule extends AbstractModule { } public void setTransportService(Class transportService, String source) { - checkNotNull(transportService, "Configured transport service may not be null"); - checkNotNull(source, "Plugin, that changes transport service may not be null"); + Preconditions.checkNotNull(transportService, "Configured transport service may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport service may not be null"); this.configuredTransportService = transportService; this.configuredTransportServiceSource = source; } public void setTransport(Class transport, String source) { - checkNotNull(transport, "Configured transport may not be null"); - checkNotNull(source, "Plugin, that changes transport may not be null"); + Preconditions.checkNotNull(transport, "Configured transport may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport may not be null"); this.configuredTransport = transport; this.configuredTransportSource = source; } From 94d8b206116c33d7e0214b011975b538a2020980 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 24 Apr 2015 10:53:03 +0200 Subject: [PATCH 190/236] Add multi data.path to migration guide this commit removes the obsolete settings for distributors and updates the documentation on multiple data.path. It also adds an explain to the migration guide. Relates to #9498 Closes #10770 --- docs/reference/migration/migrate_2_0.asciidoc | 19 +++++++++++ docs/reference/setup/dir-layout.asciidoc | 32 +++++++------------ .../org/elasticsearch/index/IndexService.java | 2 +- .../index/store/StoreModule.java | 8 +---- .../test/ElasticsearchIntegrationTest.java | 3 -- 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 40290e4cb1f..58282f5a0f2 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -423,3 +423,22 @@ systems and the provided start/stop scripts. The Analyze API return 0 as first Token's position instead of 1. +=== Multiple data.path striping + +Previously, if the `data.path` setting listed multiple data paths, then a +shard would be ``striped'' across all paths by writing a whole file to each +path in turn (in accordance with the `index.store.distributor` setting). The +result was that the files from a single segment in a shard could be spread +across multiple disks, and the failure of any one disk could corrupt multiple +shards. + +This striping is no longer supported. Instead, different shards may be +allocated to different paths, but all of the files in a single shard will be +written to the same path. + +If striping is detected while starting Elasticsearch 2.0.0 or later, all of +the files belonging to the same shard will be migrated to the same path. If +there is not enough disk space to complete this migration, the upgrade will be +cancelled and can only be resumed once enough disk space is made available. + +The `index.store.distributor` setting has also been removed. diff --git a/docs/reference/setup/dir-layout.asciidoc b/docs/reference/setup/dir-layout.asciidoc index 0095f60e332..19f565bec9a 100644 --- a/docs/reference/setup/dir-layout.asciidoc +++ b/docs/reference/setup/dir-layout.asciidoc @@ -18,30 +18,22 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins |======================================================================= -The multiple data locations allows to stripe it. The striping is simple, -placing whole files in one of the locations, and deciding where to place -the file based on the value of the `index.store.distributor` setting: +Multiple `data` paths may be specified, in order to spread data across +multiple disks or locations, but all of the files from a single shard will be +written to the same path. This can be configured as follows: -* `least_used` (default) always selects the directory with the most -available space + - * `random` selects directories at random. The probability of selecting -a particular directory is proportional to amount of available space in -this directory. + --------------------------------- + path.data: /mnt/first,/mnt/second + --------------------------------- -Note, there are no multiple copies of the same data, in that, its -similar to RAID 0. Though simple, it should provide a good solution for -people that don't want to mess with RAID. Here is how it is configured: + Or in an array format: ---------------------------------- -path.data: /mnt/first,/mnt/second ---------------------------------- - -Or the in an array format: - ----------------------------------------- -path.data: ["/mnt/first", "/mnt/second"] ----------------------------------------- + ---------------------------------------- + path.data: ["/mnt/first", "/mnt/second"] + ---------------------------------------- +TIP: To stripe shards across multiple disks, please use a RAID driver +instead. [float] [[default-paths]] diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index a0d30cc2f78..74c0e87f44c 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -311,7 +311,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone modules.add(new ShardIndexingModule()); modules.add(new ShardSearchModule()); modules.add(new ShardGetModule()); - modules.add(new StoreModule(indexSettings, injector.getInstance(IndexStore.class).shardDirectory(), lock, + modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock, new StoreCloseListener(shardId, canDeleteShardContent), path)); modules.add(new DeletionPolicyModule(indexSettings)); modules.add(new MergePolicyModule(indexSettings)); diff --git a/src/main/java/org/elasticsearch/index/store/StoreModule.java b/src/main/java/org/elasticsearch/index/store/StoreModule.java index fd6fe6e11bc..273455bf214 100644 --- a/src/main/java/org/elasticsearch/index/store/StoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/StoreModule.java @@ -29,11 +29,6 @@ import org.elasticsearch.index.shard.ShardPath; */ public class StoreModule extends AbstractModule { - public static final String DISTIBUTOR_KEY = "index.store.distributor"; - public static final String LEAST_USED_DISTRIBUTOR = "least_used"; - public static final String RANDOM_WEIGHT_DISTRIBUTOR = "random"; - - private final Settings settings; private final ShardLock lock; private final Store.OnClose closeCallback; @@ -41,9 +36,8 @@ public class StoreModule extends AbstractModule { private final Class shardDirectory; - public StoreModule(Settings settings, Class shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) { + public StoreModule(Class shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) { this.shardDirectory = shardDirectory; - this.settings = settings; this.lock = lock; this.closeCallback = closeCallback; this.path = path; diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 64ab72bd1e6..9e68d16caa0 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -487,9 +487,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase builder.put(IndicesStore.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values())); } - if (random.nextBoolean()) { - builder.put(StoreModule.DISTIBUTOR_KEY, random.nextBoolean() ? StoreModule.LEAST_USED_DISTRIBUTOR : StoreModule.RANDOM_WEIGHT_DISTRIBUTOR); - } if (random.nextBoolean()) { builder.put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, false); } From 28e5a649b41de815edd6b742eb1f339d21ae251a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Honza=20Kr=C3=A1l?= Date: Wed, 29 Apr 2015 00:11:32 +0200 Subject: [PATCH 191/236] [API] remove global parameter source from individual APIs Same way we don't define pretty anywhere, we shouldn't have source --- rest-api-spec/api/count.json | 4 ---- rest-api-spec/api/count_percolate.json | 4 ---- rest-api-spec/api/explain.json | 4 ---- rest-api-spec/api/indices.analyze.json | 4 ---- rest-api-spec/api/indices.validate_query.json | 4 ---- rest-api-spec/api/mget.json | 4 ---- rest-api-spec/api/mlt.json | 4 ---- rest-api-spec/api/mpercolate.json | 4 ---- rest-api-spec/api/msearch.json | 4 ---- rest-api-spec/api/mtermvectors.json | 6 +----- rest-api-spec/api/percolate.json | 4 ---- rest-api-spec/api/scroll.json | 4 ---- rest-api-spec/api/search.json | 4 ---- rest-api-spec/api/search_exists.json | 4 ---- rest-api-spec/api/search_template.json | 4 ---- rest-api-spec/api/suggest.json | 4 ---- rest-api-spec/api/termvectors.json | 4 ---- 17 files changed, 1 insertion(+), 69 deletions(-) diff --git a/rest-api-spec/api/count.json b/rest-api-spec/api/count.json index 998667c2ef2..c25997633b2 100644 --- a/rest-api-spec/api/count.json +++ b/rest-api-spec/api/count.json @@ -41,10 +41,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" } } }, diff --git a/rest-api-spec/api/count_percolate.json b/rest-api-spec/api/count_percolate.json index 57d19ae44fe..584f33685d3 100644 --- a/rest-api-spec/api/count_percolate.json +++ b/rest-api-spec/api/count_percolate.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "routing": { "type": "list", "description": "A comma-separated list of specific routing values" diff --git a/rest-api-spec/api/explain.json b/rest-api-spec/api/explain.json index 6e766823ad8..30b5deff1d3 100644 --- a/rest-api-spec/api/explain.json +++ b/rest-api-spec/api/explain.json @@ -69,10 +69,6 @@ "type" : "string", "description" : "Specific routing value" }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/api/indices.analyze.json b/rest-api-spec/api/indices.analyze.json index d1d87c5a867..372693b794a 100644 --- a/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/api/indices.analyze.json @@ -12,10 +12,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "analyzer": { "type" : "string", "description" : "The name of the analyzer to use" diff --git a/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/api/indices.validate_query.json index 2b93c241215..c96cd109b3d 100644 --- a/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/api/indices.validate_query.json @@ -37,10 +37,6 @@ "operation_threading": { "description" : "TODO: ?" }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, "q": { "type" : "string", "description" : "Query in the Lucene query string syntax" diff --git a/rest-api-spec/api/mget.json b/rest-api-spec/api/mget.json index 38d8ce20420..1639f3619b3 100644 --- a/rest-api-spec/api/mget.json +++ b/rest-api-spec/api/mget.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "fields": { "type": "list", "description" : "A comma-separated list of fields to return in the response" diff --git a/rest-api-spec/api/mlt.json b/rest-api-spec/api/mlt.json index b5b405912d4..0dc58782dff 100644 --- a/rest-api-spec/api/mlt.json +++ b/rest-api-spec/api/mlt.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "boost_terms": { "type" : "number", "description" : "The boost factor" diff --git a/rest-api-spec/api/mpercolate.json b/rest-api-spec/api/mpercolate.json index 69c92511ed7..7cbf4f61e43 100644 --- a/rest-api-spec/api/mpercolate.json +++ b/rest-api-spec/api/mpercolate.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ignore_unavailable": { "type": "boolean", "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/api/msearch.json b/rest-api-spec/api/msearch.json index f233b304492..3d8297e496d 100644 --- a/rest-api-spec/api/msearch.json +++ b/rest-api-spec/api/msearch.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "search_type": { "type" : "enum", "options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"], diff --git a/rest-api-spec/api/mtermvectors.json b/rest-api-spec/api/mtermvectors.json index b40ac72860b..58978b7d190 100644 --- a/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/api/mtermvectors.json @@ -16,10 +16,6 @@ } }, "params" : { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ids" : { "type" : "list", "description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body", @@ -97,4 +93,4 @@ } } -} \ No newline at end of file +} diff --git a/rest-api-spec/api/percolate.json b/rest-api-spec/api/percolate.json index 3ea1dca776e..e58655dea5a 100644 --- a/rest-api-spec/api/percolate.json +++ b/rest-api-spec/api/percolate.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "routing": { "type" : "list", "description" : "A comma-separated list of specific routing values" diff --git a/rest-api-spec/api/scroll.json b/rest-api-spec/api/scroll.json index 50cee5fbc2a..885b746d095 100644 --- a/rest-api-spec/api/scroll.json +++ b/rest-api-spec/api/scroll.json @@ -12,10 +12,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "scroll": { "type" : "duration", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" diff --git a/rest-api-spec/api/search.json b/rest-api-spec/api/search.json index 2a6ef4cc481..1f26c3e6a89 100644 --- a/rest-api-spec/api/search.json +++ b/rest-api-spec/api/search.json @@ -101,10 +101,6 @@ "type" : "list", "description" : "A comma-separated list of : pairs" }, - "source": { - "type" : "string", - "description" : "The URL-encoded request definition using the Query DSL (instead of using request body)" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/api/search_exists.json b/rest-api-spec/api/search_exists.json index 10ac51eddfb..4f52b272673 100644 --- a/rest-api-spec/api/search_exists.json +++ b/rest-api-spec/api/search_exists.json @@ -41,10 +41,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" } } }, diff --git a/rest-api-spec/api/search_template.json b/rest-api-spec/api/search_template.json index a210f3aeb33..a1122f19a1e 100644 --- a/rest-api-spec/api/search_template.json +++ b/rest-api-spec/api/search_template.json @@ -16,10 +16,6 @@ } }, "params" : { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ignore_unavailable": { "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/api/suggest.json b/rest-api-spec/api/suggest.json index 974ae158077..ca0ae8b4f3c 100644 --- a/rest-api-spec/api/suggest.json +++ b/rest-api-spec/api/suggest.json @@ -33,10 +33,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded request definition (instead of using request body)" } } }, diff --git a/rest-api-spec/api/termvectors.json b/rest-api-spec/api/termvectors.json index b0f5af89e94..147d7971c9c 100644 --- a/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/api/termvectors.json @@ -22,10 +22,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "term_statistics" : { "type" : "boolean", "description" : "Specifies if total term frequency and document frequency should be returned.", From ad813c02cbee06ce8944086f3b3f443251f0a9e7 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 29 Apr 2015 14:32:16 +0200 Subject: [PATCH 192/236] Docs: Add a Requirements section to the README mentioning recent Java Closes #10858 --- README.textile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.textile b/README.textile index e6057f022da..720f357406b 100644 --- a/README.textile +++ b/README.textile @@ -34,6 +34,10 @@ h2. Getting Started First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about. +h3. Requirements + +You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information. + h3. Installation * "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution. From ab83eb036bdaff0c45bc21d040b538d4cb20284d Mon Sep 17 00:00:00 2001 From: Antonio Bonuccelli Date: Wed, 29 Apr 2015 13:50:06 +0200 Subject: [PATCH 193/236] Docs: adding missing single quote on PUT index request Closes #10876 --- .../aggregations/metrics/scripted-metric-aggregation.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc index 18dd6f280f9..a775d545409 100644 --- a/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -84,24 +84,28 @@ $ curl -XPUT 'http://localhost:9200/transactions/stock/1' -d ' "type": "sale", "amount": 80 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d ' { "type": "cost", "amount": 10 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d ' { "type": "cost", "amount": 30 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d ' { "type": "sale", "amount": 130 } +' -------------------------------------------------- Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is From 891dfee0d605cec09366add40971438fab46ad77 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 29 Apr 2015 15:06:58 +0200 Subject: [PATCH 194/236] Fix some indentation issues. --- .../bucket/SingleBucketAggregator.java | 2 +- .../children/ParentToChildrenAggregator.java | 2 +- .../bucket/filter/FilterAggregator.java | 8 +- .../bucket/filters/FiltersAggregator.java | 10 +-- .../bucket/geogrid/GeoHashGridAggregator.java | 5 +- .../bucket/global/GlobalAggregator.java | 2 +- .../bucket/histogram/HistogramAggregator.java | 6 +- .../bucket/histogram/InternalHistogram.java | 2 +- .../bucket/missing/MissingAggregator.java | 4 +- .../bucket/nested/NestedAggregator.java | 78 +++++++++---------- .../nested/ReverseNestedAggregator.java | 30 +++---- .../bucket/range/RangeAggregator.java | 18 ++--- ...balOrdinalsSignificantTermsAggregator.java | 2 +- .../SignificantLongTermsAggregator.java | 7 +- .../SignificantStringTermsAggregator.java | 2 +- .../bucket/terms/DoubleTermsAggregator.java | 2 +- .../bucket/terms/LongTermsAggregator.java | 47 ++++++----- .../bucket/terms/StringTermsAggregator.java | 2 +- .../bucket/terms/TermsAggregatorFactory.java | 3 +- .../metrics/avg/AvgAggregator.java | 19 +++-- .../geobounds/GeoBoundsAggregator.java | 10 +-- .../metrics/geobounds/InternalGeoBounds.java | 4 +- .../metrics/max/MaxAggregator.java | 16 ++-- .../metrics/min/MinAggregator.java | 18 ++--- .../AbstractPercentilesAggregator.java | 4 +- .../PercentileRanksAggregator.java | 3 +- .../percentiles/PercentilesAggregator.java | 5 +- .../scripted/InternalScriptedMetric.java | 2 +- .../metrics/stats/StatsAggegator.java | 42 +++++----- .../stats/extended/InternalExtendedStats.java | 3 +- .../metrics/sum/SumAggregator.java | 20 ++--- .../valuecount/ValueCountAggregator.java | 6 +- 32 files changed, 186 insertions(+), 198 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java index 202f02c4a22..2e032640f98 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java @@ -33,7 +33,7 @@ import java.util.Map; public abstract class SingleBucketAggregator extends BucketsAggregator { protected SingleBucketAggregator(String name, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index da4c2622331..0a8a136d160 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -65,7 +65,7 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Filter childFilter, Filter parentFilter, - ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, + ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, long maxOrd, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.parentType = parentType; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index da728f1ee04..6459ff83215 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -48,7 +48,7 @@ public class FilterAggregator extends SingleBucketAggregator { org.apache.lucene.search.Filter filter, AggregatorFactories factories, AggregationContext aggregationContext, - Aggregator parent, List reducers, + Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.filter = filter; @@ -61,12 +61,12 @@ public class FilterAggregator extends SingleBucketAggregator { // no need to provide deleted docs to the filter final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.getDocIdSet(ctx, null)); return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - if (bits.get(doc)) { + if (bits.get(doc)) { collectBucket(sub, doc, bucket); } - } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 931ead734fb..913d844cb6a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -61,7 +61,7 @@ public class FiltersAggregator extends BucketsAggregator { private final boolean keyed; public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, List reducers, Map metaData) + Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.keyed = keyed; @@ -78,14 +78,14 @@ public class FiltersAggregator extends BucketsAggregator { bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].filter.getDocIdSet(ctx, null)); } return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - for (int i = 0; i < bits.length; i++) { - if (bits[i].get(doc)) { + for (int i = 0; i < bits.length; i++) { + if (bits[i].get(doc)) { collectBucket(sub, doc, bucketOrd(bucket, i)); } + } } - } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index c2c646f5702..36448a103c1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -51,9 +51,8 @@ public class GeoHashGridAggregator extends BucketsAggregator { private final LongHash bucketOrds; public GeoHashGridAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, - int requiredSize, - int shardSize, AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) - throws IOException { + int requiredSize, int shardSize, AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.requiredSize = requiredSize; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index edecdd749dd..acc1464d349 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -53,7 +53,7 @@ public class GlobalAggregator extends SingleBucketAggregator { public void collect(int doc, long bucket) throws IOException { assert bucket == 0 : "global aggregator can only be a top level aggregator"; collectBucket(sub, doc, bucket); - } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index 63325c12aad..44342366b3f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -57,14 +57,12 @@ public class HistogramAggregator extends BucketsAggregator { private final InternalHistogram.Factory histogramFactory; private final LongHash bucketOrds; - private SortedNumericDocValues values; public HistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - InternalHistogram.Factory histogramFactory, - AggregationContext aggregationContext, - Aggregator parent, List reducers, Map metaData) throws IOException { + InternalHistogram.Factory histogramFactory, AggregationContext aggregationContext, + Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.rounding = rounding; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 5c10e0d3ad4..9e35ddb97b3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -191,7 +191,7 @@ public class InternalHistogram extends Inter public ValueFormatter getFormatter() { return formatter; - } + } public boolean getKeyed() { return keyed; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java index eb81c6a5ec1..b60c8510238 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java @@ -44,8 +44,8 @@ public class MissingAggregator extends SingleBucketAggregator { private final ValuesSource valuesSource; public MissingAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, - AggregationContext aggregationContext, - Aggregator parent, List reducers, Map metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 459802f62a3..3356c089667 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -68,58 +68,58 @@ public class NestedAggregator extends SingleBucketAggregator { this.parentFilter = null; // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here. DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); - if (DocIdSets.isEmpty(childDocIdSet)) { - childDocs = null; - } else { - childDocs = childDocIdSet.iterator(); - } + if (DocIdSets.isEmpty(childDocIdSet)) { + childDocs = null; + } else { + childDocs = childDocIdSet.iterator(); + } return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int parentDoc, long bucket) throws IOException { - // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected + // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected - // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip: - if (parentDoc == 0 || childDocs == null) { - return; - } - if (parentFilter == null) { - // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs - // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed. - // So the trick is to set at the last moment just before needed and we can use its child filter as the - // parent filter. + // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip: + if (parentDoc == 0 || childDocs == null) { + return; + } + if (parentFilter == null) { + // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs + // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed. + // So the trick is to set at the last moment just before needed and we can use its child filter as the + // parent filter. - // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption - // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during - // aggs execution + // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption + // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during + // aggs execution Filter parentFilterNotCached = findClosestNestedPath(parent()); - if (parentFilterNotCached == null) { - parentFilterNotCached = NonNestedDocsFilter.INSTANCE; - } - parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); + if (parentFilterNotCached == null) { + parentFilterNotCached = NonNestedDocsFilter.INSTANCE; + } + parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); BitDocIdSet parentSet = parentFilter.getDocIdSet(ctx); - if (DocIdSets.isEmpty(parentSet)) { - // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations. - childDocs = null; - return; - } else { - parentDocs = parentSet.bits(); - } - } + if (DocIdSets.isEmpty(parentSet)) { + // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations. + childDocs = null; + return; + } else { + parentDocs = parentSet.bits(); + } + } - final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); - int childDocId = childDocs.docID(); - if (childDocId <= prevParentDoc) { - childDocId = childDocs.advance(prevParentDoc + 1); - } + final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + int childDocId = childDocs.docID(); + if (childDocId <= prevParentDoc) { + childDocId = childDocs.advance(prevParentDoc + 1); + } - for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) { + for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) { collectBucket(sub, childDocId, bucket); } - } + } }; } - + @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index b64abf55b10..5644c6acf1f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -72,29 +72,29 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { // must belong to parent docs that is alive. For this reason acceptedDocs can be null here. BitDocIdSet docIdSet = parentFilter.getDocIdSet(ctx); final BitSet parentDocs; - if (DocIdSets.isEmpty(docIdSet)) { + if (DocIdSets.isEmpty(docIdSet)) { return LeafBucketCollector.NO_OP_COLLECTOR; - } else { - parentDocs = docIdSet.bits(); - } + } else { + parentDocs = docIdSet.bits(); + } final LongIntOpenHashMap bucketOrdToLastCollectedParentDoc = new LongIntOpenHashMap(32); return new LeafBucketCollectorBase(sub, null) { - @Override + @Override public void collect(int childDoc, long bucket) throws IOException { - // fast forward to retrieve the parentDoc this childDoc belongs to - final int parentDoc = parentDocs.nextSetBit(childDoc); - assert childDoc <= parentDoc && parentDoc != DocIdSetIterator.NO_MORE_DOCS; + // fast forward to retrieve the parentDoc this childDoc belongs to + final int parentDoc = parentDocs.nextSetBit(childDoc); + assert childDoc <= parentDoc && parentDoc != DocIdSetIterator.NO_MORE_DOCS; if (bucketOrdToLastCollectedParentDoc.containsKey(bucket)) { - int lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.lget(); - if (parentDoc > lastCollectedParentDoc) { + int lastCollectedParentDoc = bucketOrdToLastCollectedParentDoc.lget(); + if (parentDoc > lastCollectedParentDoc) { collectBucket(sub, parentDoc, bucket); - bucketOrdToLastCollectedParentDoc.lset(parentDoc); - } - } else { + bucketOrdToLastCollectedParentDoc.lset(parentDoc); + } + } else { collectBucket(sub, parentDoc, bucket); bucketOrdToLastCollectedParentDoc.put(bucket, parentDoc); - } - } + } + } }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 14fe9ddd3bc..d6d961a5998 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -105,7 +105,7 @@ public class RangeAggregator extends BucketsAggregator { List ranges, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, List reducers, + Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); @@ -140,15 +140,15 @@ public class RangeAggregator extends BucketsAggregator { final LeafBucketCollector sub) throws IOException { final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { - values.setDocument(doc); - final int valuesCount = values.count(); - for (int i = 0, lo = 0; i < valuesCount; ++i) { - final double value = values.valueAt(i); + values.setDocument(doc); + final int valuesCount = values.count(); + for (int i = 0, lo = 0; i < valuesCount; ++i) { + final double value = values.valueAt(i); lo = collect(doc, value, bucket, lo); - } - } + } + } private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes @@ -267,7 +267,7 @@ public class RangeAggregator extends BucketsAggregator { ValueFormat format, AggregationContext context, Aggregator parent, - InternalRange.Factory factory, List reducers, + InternalRange.Factory factory, List reducers, Map metaData) throws IOException { super(name, context, parent, reducers, metaData); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 7e16dc29073..492167f1735 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -50,7 +50,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, - IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, + IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java index f67c533956c..329f5f566f5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java @@ -46,10 +46,9 @@ import java.util.Map; public class SignificantLongTermsAggregator extends LongTermsAggregator { public SignificantLongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - BucketCountThresholds bucketCountThresholds, - AggregationContext aggregationContext, - Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, - List reducers, Map metaData) throws IOException { + BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, + Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, + List reducers, Map metaData) throws IOException { super(name, factories, valuesSource, format, null, bucketCountThresholds, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, reducers, metaData); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java index 2423f228451..a49f18734ee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java @@ -50,7 +50,7 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { public SignificantStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, - SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) + SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index ea98734b94e..9250495524e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -43,7 +43,7 @@ import java.util.Map; public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, + Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, List reducers, Map metaData) throws IOException { super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index ef1150f1d7e..ea32e388fe6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -65,7 +65,7 @@ public class LongTermsAggregator extends TermsAggregator { this.longFilter = longFilter; bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } - + @Override public boolean needsScores() { return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); @@ -80,30 +80,30 @@ public class LongTermsAggregator extends TermsAggregator { final LeafBucketCollector sub) throws IOException { final SortedNumericDocValues values = getValues(valuesSource, ctx); return new LeafBucketCollectorBase(sub, values) { - @Override - public void collect(int doc, long owningBucketOrdinal) throws IOException { - assert owningBucketOrdinal == 0; - values.setDocument(doc); - final int valuesCount = values.count(); + @Override + public void collect(int doc, long owningBucketOrdinal) throws IOException { + assert owningBucketOrdinal == 0; + values.setDocument(doc); + final int valuesCount = values.count(); - long previous = Long.MAX_VALUE; - for (int i = 0; i < valuesCount; ++i) { - final long val = values.valueAt(i); - if (previous != val || i == 0) { - if ((longFilter == null) || (longFilter.accept(val))) { - long bucketOrdinal = bucketOrds.add(val); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = - 1 - bucketOrdinal; + long previous = Long.MAX_VALUE; + for (int i = 0; i < valuesCount; ++i) { + final long val = values.valueAt(i); + if (previous != val || i == 0) { + if ((longFilter == null) || (longFilter.accept(val))) { + long bucketOrdinal = bucketOrds.add(val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = - 1 - bucketOrdinal; collectExistingBucket(sub, doc, bucketOrdinal); - } else { + } else { collectBucket(sub, doc, bucketOrdinal); - } + } + } + + previous = val; + } } - - previous = val; } - } - } }; } @@ -152,7 +152,7 @@ public class LongTermsAggregator extends TermsAggregator { list[i] = bucket; otherDocCount -= bucket.docCount; } - + runDeferredCollections(survivingBucketOrds); //Now build the aggs @@ -160,13 +160,12 @@ public class LongTermsAggregator extends TermsAggregator { list[i].aggregations = bucketAggregations(list[i].bucketOrd); list[i].docCountError = 0; } - + return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), metaData()); } - - + @Override public InternalAggregation buildEmptyAggregation() { return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index f0bbcbef924..6f80142da27 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -51,7 +51,7 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, - IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, + IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 664211bc74c..e12e4227fdf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -243,8 +243,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory reducers, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; @@ -75,22 +74,22 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); sums = bigArrays.grow(sums, bucket + 1); - values.setDocument(doc); - final int valueCount = values.count(); + values.setDocument(doc); + final int valueCount = values.count(); counts.increment(bucket, valueCount); - double sum = 0; - for (int i = 0; i < valueCount; i++) { - sum += values.valueAt(i); - } + double sum = 0; + for (int i = 0; i < valueCount; i++) { + sum += values.valueAt(i); + } sums.increment(bucket, sum); } }; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java index 53e5c534094..464d0a339a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java @@ -51,10 +51,9 @@ public final class GeoBoundsAggregator extends MetricsAggregator { DoubleArray negLefts; DoubleArray negRights; - protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, - Aggregator parent, - ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List reducers, Map metaData) - throws IOException { + protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, Aggregator parent, + ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List reducers, + Map metaData) throws IOException { super(name, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.wrapLongitude = wrapLongitude; @@ -184,8 +183,7 @@ public final class GeoBoundsAggregator extends MetricsAggregator { @Override protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, - Aggregator parent, - boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new GeoBoundsAggregator(name, aggregationContext, parent, valuesSource, wrapLongitude, reducers, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index d0dbebf7a8e..fcf92009752 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -57,8 +57,8 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo } InternalGeoBounds(String name, double top, double bottom, double posLeft, double posRight, - double negLeft, double negRight, - boolean wrapLongitude, List reducers, Map metaData) { + double negLeft, double negRight, boolean wrapLongitude, + List reducers, Map metaData) { super(name, reducers, metaData); this.top = top; this.bottom = bottom; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index 0c97ba38ac3..7ade492660e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -53,8 +53,8 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray maxes; public MaxAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, - Aggregator parent, List reducers, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; @@ -80,16 +80,16 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { final NumericDoubleValues values = MultiValueMode.MAX.select(allValues, Double.NEGATIVE_INFINITY); return new LeafBucketCollectorBase(sub, allValues) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= maxes.size()) { - long from = maxes.size(); + long from = maxes.size(); maxes = bigArrays.grow(maxes, bucket + 1); - maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY); - } - final double value = values.get(doc); + maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY); + } + final double value = values.get(doc); double max = maxes.get(bucket); - max = Math.max(max, value); + max = Math.max(max, value); maxes.set(bucket, max); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index c80b7b8f064..cf832cabe1f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -53,8 +53,8 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray mins; public MinAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, - Aggregator parent, List reducers, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { @@ -74,22 +74,22 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MIN.select(allValues, Double.POSITIVE_INFINITY); return new LeafBucketCollectorBase(sub, allValues) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= mins.size()) { - long from = mins.size(); + long from = mins.size(); mins = bigArrays.grow(mins, bucket + 1); - mins.fill(from, mins.size(), Double.POSITIVE_INFINITY); - } - final double value = values.get(doc); + mins.fill(from, mins.size(), Double.POSITIVE_INFINITY); + } + final double value = values.get(doc); double min = mins.get(bucket); - min = Math.min(min, value); + min = Math.min(min, value); mins.set(bucket, min); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java index 8dd75b59110..a73639a3d7f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java @@ -55,8 +55,8 @@ public abstract class AbstractPercentilesAggregator extends NumericMetricsAggreg public AbstractPercentilesAggregator(String name, ValuesSource.Numeric valuesSource, AggregationContext context, Aggregator parent, double[] keys, double compression, boolean keyed, - @Nullable ValueFormatter formatter, List reducers, - Map metaData) throws IOException { + @Nullable ValueFormatter formatter, List reducers, + Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.keyed = keyed; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java index 9d14e3b70c3..380482b8ab3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java @@ -96,8 +96,7 @@ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentileRanksAggregator(name, valuesSource, aggregationContext, parent, values, compression, - keyed, - config.formatter(), reducers, metaData); + keyed, config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java index 1a9a839bb75..2a42dc94620 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java @@ -40,7 +40,7 @@ import java.util.Map; public class PercentilesAggregator extends AbstractPercentilesAggregator { public PercentilesAggregator(String name, Numeric valuesSource, AggregationContext context, - Aggregator parent, double[] percents, + Aggregator parent, double[] percents, double compression, boolean keyed, @Nullable ValueFormatter formatter, List reducers, Map metaData) throws IOException { super(name, valuesSource, context, parent, percents, compression, keyed, formatter, reducers, metaData); @@ -97,8 +97,7 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentilesAggregator(name, valuesSource, aggregationContext, parent, percents, compression, - keyed, - config.formatter(), reducers, metaData); + keyed, config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index edcfb811660..c67e8f3853d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -106,7 +106,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement aggregation = aggregationObjects; } return new InternalScriptedMetric(firstAggregation.getName(), aggregation, firstAggregation.scriptLang, firstAggregation.scriptType, - firstAggregation.reduceScript, firstAggregation.reduceParams, reducers(), getMetaData()); + firstAggregation.reduceScript, firstAggregation.reduceParams, reducers(), getMetaData()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java index 8a454b6cb73..7b1f6c84a2d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java @@ -57,8 +57,8 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { public StatsAggegator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, - Aggregator parent, List reducers, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { @@ -83,35 +83,35 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { if (bucket >= counts.size()) { - final long from = counts.size(); + final long from = counts.size(); final long overSize = BigArrays.overSize(bucket + 1); - counts = bigArrays.resize(counts, overSize); - sums = bigArrays.resize(sums, overSize); - mins = bigArrays.resize(mins, overSize); - maxes = bigArrays.resize(maxes, overSize); - mins.fill(from, overSize, Double.POSITIVE_INFINITY); - maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); - } + counts = bigArrays.resize(counts, overSize); + sums = bigArrays.resize(sums, overSize); + mins = bigArrays.resize(mins, overSize); + maxes = bigArrays.resize(maxes, overSize); + mins.fill(from, overSize, Double.POSITIVE_INFINITY); + maxes.fill(from, overSize, Double.NEGATIVE_INFINITY); + } - values.setDocument(doc); - final int valuesCount = values.count(); + values.setDocument(doc); + final int valuesCount = values.count(); counts.increment(bucket, valuesCount); - double sum = 0; + double sum = 0; double min = mins.get(bucket); double max = maxes.get(bucket); - for (int i = 0; i < valuesCount; i++) { - double value = values.valueAt(i); - sum += value; - min = Math.min(min, value); - max = Math.max(max, value); - } + for (int i = 0; i < valuesCount; i++) { + double value = values.valueAt(i); + sum += value; + min = Math.min(min, value); + max = Math.max(max, value); + } sums.increment(bucket, sum); mins.set(bucket, min); maxes.set(bucket, max); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 6785d6f35eb..7fac72d7b05 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -69,8 +69,7 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat InternalExtendedStats() {} // for serialization public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs, - double sigma, - @Nullable ValueFormatter formatter, List reducers, Map metaData) { + double sigma, @Nullable ValueFormatter formatter, List reducers, Map metaData) { super(name, count, sum, min, max, formatter, reducers, metaData); this.sumOfSqrs = sumOfSqrs; this.sigma = sigma; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index af834af7f7b..4c7981422f3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -51,8 +51,8 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray sums; public SumAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, - Aggregator parent, List reducers, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; @@ -71,19 +71,19 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { sums = bigArrays.grow(sums, bucket + 1); - values.setDocument(doc); - final int valuesCount = values.count(); - double sum = 0; - for (int i = 0; i < valuesCount; i++) { - sum += values.valueAt(i); - } + values.setDocument(doc); + final int valuesCount = values.count(); + double sum = 0; + for (int i = 0; i < valuesCount; i++) { + sum += values.valueAt(i); + } sums.increment(bucket, sum); } }; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java index 2bd7b505135..fedd7e09a2b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java @@ -70,17 +70,17 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedBinaryDocValues values = valuesSource.bytesValues(ctx); return new LeafBucketCollectorBase(sub, values) { - @Override + @Override public void collect(int doc, long bucket) throws IOException { counts = bigArrays.grow(counts, bucket + 1); values.setDocument(doc); counts.increment(bucket, values.count()); - } + } }; } From 7aa4c7e256ed2083fa5a10caaf06c578969d118e Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 29 Apr 2015 15:12:31 +0200 Subject: [PATCH 195/236] Docs: Removed a reference to index_name from the array mapping page --- docs/reference/mapping/types/array-type.asciidoc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/reference/mapping/types/array-type.asciidoc b/docs/reference/mapping/types/array-type.asciidoc index 3f3d832d66f..f2dc40ed094 100644 --- a/docs/reference/mapping/types/array-type.asciidoc +++ b/docs/reference/mapping/types/array-type.asciidoc @@ -67,8 +67,3 @@ the fact that the following JSON document is perfectly fine: } -------------------------------------------------- -Note also, that thanks to the fact that we used the `index_name` to use -the non plural form (`tag` instead of `tags`), we can actually refer to -the field using the `index_name` as well. For example, we can execute a -query using `tweet.tags:wow` or `tweet.tag:wow`. We could, of course, -name the field as `tag` and skip the `index_name` all together). From ccca0386ef3bc446ea1f63dcfdd7811f0567e0df Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 29 Apr 2015 15:14:23 +0200 Subject: [PATCH 196/236] Other indentation fixes --- .../search/aggregations/bucket/nested/NestedAggregator.java | 2 +- .../aggregations/bucket/nested/ReverseNestedAggregator.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 3356c089667..79da93d7301 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -66,7 +66,7 @@ public class NestedAggregator extends SingleBucketAggregator { public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // Reset parentFilter, so we resolve the parentDocs for each new segment being searched this.parentFilter = null; - // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here. + // In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here. DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); if (DocIdSets.isEmpty(childDocIdSet)) { childDocs = null; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 5644c6acf1f..9869c6d6a0a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -68,8 +68,8 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { @Override protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives - // must belong to parent docs that is alive. For this reason acceptedDocs can be null here. + // In ES if parent is deleted, then also the children are deleted, so the child docs this agg receives + // must belong to parent docs that is alive. For this reason acceptedDocs can be null here. BitDocIdSet docIdSet = parentFilter.getDocIdSet(ctx); final BitSet parentDocs; if (DocIdSets.isEmpty(docIdSet)) { From 4088dd38cbff19462e610db853ba1e54ee9785e4 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Mon, 2 Mar 2015 10:51:01 +0100 Subject: [PATCH 197/236] Write state also on data nodes if not master eligible When a node was a data node only then the index state was not written. In case this node connected to a master that did not have the index in the cluster state, for example because a master was restarted and the data folder was lost, then the indices were not imported as dangling but instead deleted. This commit makes sure that index state for data nodes is also written if they have at least one shard of this index allocated. closes #8823 closes #9952 --- .../gateway/GatewayMetaState.java | 176 +++++++-- .../gateway/GatewayMetaStateTests.java | 249 ++++++++++++ .../gateway/MetaDataWriteDataNodesTests.java | 354 ++++++++++++++++++ 3 files changed, 743 insertions(+), 36 deletions(-) create mode 100644 src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java create mode 100644 src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 158a3df5d91..ca8edebc571 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -27,9 +28,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.DjbHashFunction; -import org.elasticsearch.cluster.routing.HashFunction; -import org.elasticsearch.cluster.routing.SimpleHashFunction; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -43,6 +42,7 @@ import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.*; /** * @@ -57,7 +57,9 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL private final DanglingIndicesState danglingIndicesState; @Nullable - private volatile MetaData currentMetaData; + private volatile MetaData previousMetaData; + + private volatile ImmutableSet previouslyWrittenIndices = ImmutableSet.of(); @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -76,7 +78,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { nodeEnv.ensureAtomicMoveSupported(); } - if (DiscoveryNode.masterNode(settings)) { + if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { try { ensureNoPre019State(); pre20Upgrade(); @@ -96,10 +98,12 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Override public void clusterChanged(ClusterChangedEvent event) { + Set relevantIndices = new HashSet<>(); final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... - this.currentMetaData = null; + this.previousMetaData = null; + previouslyWrittenIndices= ImmutableSet.of(); return; } @@ -107,44 +111,47 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // we don't check if metaData changed, since we might be called several times and we need to check dangling... boolean success = true; - // only applied to master node, writing the global and index level states - if (state.nodes().localNode().masterNode()) { + // write the state if this node is a master eligible node or if it is a data node and has shards allocated on it + if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) { // check if the global state changed? - if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { + if (previousMetaData == null || !MetaData.isGlobalStateEquals(previousMetaData, newMetaData)) { try { metaStateService.writeGlobalState("changed", newMetaData); + // we determine if or if not we write meta data on data only nodes by looking at the shard routing + // and only write if a shard of this index is allocated on this node + // however, closed indices do not appear in the shard routing. if the meta data for a closed index is + // updated it will therefore not be written in case the list of previouslyWrittenIndices is empty (because state + // persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode(). + // we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list + if (isDataOnlyNode(state)) { + ImmutableSet.Builder previouslyWrittenIndicesBuilder = ImmutableSet.builder(); + for (IndexMetaData indexMetaData : newMetaData) { + IndexMetaData indexMetaDataOnDisk = null; + if (indexMetaData.state().equals(IndexMetaData.State.CLOSE)) { + try { + indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.index()); + } catch (IOException ex) { + throw new ElasticsearchException("failed to load index state", ex); + } + } + if (indexMetaDataOnDisk != null) { + previouslyWrittenIndicesBuilder.add(indexMetaDataOnDisk.index()); + } + } + previouslyWrittenIndices = previouslyWrittenIndicesBuilder.addAll(previouslyWrittenIndices).build(); + } } catch (Throwable e) { success = false; } } + Iterable writeInfo; + relevantIndices = getRelevantIndices(event.state(), previouslyWrittenIndices); + writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); // check and write changes in indices - for (IndexMetaData indexMetaData : newMetaData) { - String writeReason = null; - IndexMetaData currentIndexMetaData; - if (currentMetaData == null) { - // a new event..., check from the state stored - try { - currentIndexMetaData = metaStateService.loadIndexState(indexMetaData.index()); - } catch (IOException ex) { - throw new ElasticsearchException("failed to load index state", ex); - } - } else { - currentIndexMetaData = currentMetaData.index(indexMetaData.index()); - } - if (currentIndexMetaData == null) { - writeReason = "freshly created"; - } else if (currentIndexMetaData.version() != indexMetaData.version()) { - writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]"; - } - - // we update the writeReason only if we really need to write it - if (writeReason == null) { - continue; - } - + for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { try { - metaStateService.writeIndex(writeReason, indexMetaData, currentIndexMetaData); + metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData); } catch (Throwable e) { success = false; } @@ -154,10 +161,29 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL danglingIndicesState.processDanglingIndices(newMetaData); if (success) { - currentMetaData = newMetaData; + previousMetaData = newMetaData; + ImmutableSet.Builder builder= ImmutableSet.builder(); + previouslyWrittenIndices = builder.addAll(relevantIndices).build(); } } + public static Set getRelevantIndices(ClusterState state, ImmutableSet previouslyWrittenIndices) { + Set relevantIndices; + if (isDataOnlyNode(state)) { + relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previouslyWrittenIndices); + } else if (state.nodes().localNode().masterNode() == true) { + relevantIndices = getRelevantIndicesForMasterEligibleNode(state); + } else { + relevantIndices = Collections.emptySet(); + } + return relevantIndices; + } + + + protected static boolean isDataOnlyNode(ClusterState state) { + return ((state.nodes().localNode().masterNode() == false) && state.nodes().localNode().dataNode()); + } + /** * Throws an IAE if a pre 0.19 state is detected */ @@ -229,7 +255,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } - if (hasCustomPre20HashFunction|| pre20UseType != null) { + if (hasCustomPre20HashFunction || pre20UseType != null) { logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they " + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); } @@ -251,4 +277,82 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } + + /** + * Loads the current meta state for each index in the new cluster state and checks if it has to be persisted. + * Each index state that should be written to disk will be returned. This is only run for data only nodes. + * It will return only the states for indices that actually have a shard allocated on the current node. + * + * @param previouslyWrittenIndices A list of indices for which the state was already written before + * @param potentiallyUnwrittenIndices The list of indices for which state should potentially be written + * @param previousMetaData The last meta data we know of. meta data for all indices in previouslyWrittenIndices list is persisted now + * @param newMetaData The new metadata + * @return iterable over all indices states that should be written to disk + */ + public static Iterable resolveStatesToBeWritten(ImmutableSet previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { + List indicesToWrite = new ArrayList<>(); + for (String index : potentiallyUnwrittenIndices) { + IndexMetaData newIndexMetaData = newMetaData.index(index); + IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index); + String writeReason = null; + if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { + writeReason = "freshly created"; + } else if (previousIndexMetaData.version() != newIndexMetaData.version()) { + writeReason = "version changed from [" + previousIndexMetaData.version() + "] to [" + newIndexMetaData.version() + "]"; + } + if (writeReason != null) { + indicesToWrite.add(new GatewayMetaState.IndexMetaWriteInfo(newIndexMetaData, previousIndexMetaData, writeReason)); + } + } + return indicesToWrite; + } + + public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ImmutableSet previouslyWrittenIndices) { + RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId()); + if (newRoutingNode == null) { + throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); + } + Set indices = new HashSet<>(); + for (MutableShardRouting routing : newRoutingNode) { + indices.add(routing.index()); + } + // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously + for (IndexMetaData indexMetaData : state.metaData()) { + if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && state.metaData().getIndices().get(indexMetaData.getIndex()).state().equals(IndexMetaData.State.CLOSE)) { + indices.add(indexMetaData.getIndex()); + } + } + return indices; + } + + public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { + Set relevantIndices; + relevantIndices = new HashSet<>(); + // we have to iterate over the metadata to make sure we also capture closed indices + for (IndexMetaData indexMetaData : state.metaData()) { + relevantIndices.add(indexMetaData.getIndex()); + } + return relevantIndices; + } + + + public static class IndexMetaWriteInfo { + final IndexMetaData newMetaData; + final String reason; + final IndexMetaData previousMetaData; + + public IndexMetaWriteInfo(IndexMetaData newMetaData, IndexMetaData previousMetaData, String reason) { + this.newMetaData = newMetaData; + this.reason = reason; + this.previousMetaData = previousMetaData; + } + + public IndexMetaData getNewMetaData() { + return newMetaData; + } + + public String getReason() { + return reason; + } + } } diff --git a/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java new file mode 100644 index 00000000000..06b958d47aa --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.test.ElasticsearchAllocationTestCase; +import org.junit.Test; + +import java.util.*; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test IndexMetaState for master and data only nodes return correct list of indices to write + * There are many parameters: + * - meta state is not in memory + * - meta state is in memory with old version/ new version + * - meta state is in memory with new version + * - version changed in cluster state event/ no change + * - node is data only node + * - node is master eligible + * for data only nodes: shard initializing on shard + */ +public class GatewayMetaStateTests extends ElasticsearchAllocationTestCase { + + ClusterChangedEvent generateEvent(boolean initializing, boolean versionChanged, boolean masterEligible) { + //ridiculous settings to make sure we don't run into uninitialized because fo default + AllocationService strategy = createAllocationService(settingsBuilder() + .put("cluster.routing.allocation.concurrent_recoveries", 100) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) + .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) + .build()); + ClusterState newClusterState, previousClusterState; + MetaData metaDataOldClusterState = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable routingTableOldClusterState = RoutingTable.builder() + .addAsNew(metaDataOldClusterState.index("test")) + .build(); + + // assign all shards + ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaDataOldClusterState) + .routingTable(routingTableOldClusterState) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + // new cluster state will have initializing shards on node 1 + RoutingTable routingTableNewClusterState = strategy.reroute(init).routingTable(); + if (initializing == false) { + // pretend all initialized, nothing happened + ClusterState temp = ClusterState.builder(init).routingTable(routingTableNewClusterState).metaData(metaDataOldClusterState).build(); + routingTableNewClusterState = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + routingTableOldClusterState = routingTableNewClusterState; + + } else { + // nothing to do, we have one routing table with unassigned and one with initializing + } + + // create new meta data either with version changed or not + MetaData metaDataNewClusterState = MetaData.builder() + .put(init.metaData().index("test"), versionChanged) + .build(); + + + // create the cluster states with meta data and routing tables as computed before + previousClusterState = ClusterState.builder(init) + .metaData(metaDataOldClusterState) + .routingTable(routingTableOldClusterState) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + newClusterState = ClusterState.builder(previousClusterState).routingTable(routingTableNewClusterState).metaData(metaDataNewClusterState).version(previousClusterState.getVersion() + 1).build(); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); + assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); + return event; + } + + ClusterChangedEvent generateCloseEvent(boolean masterEligible) { + //ridiculous settings to make sure we don't run into uninitialized because fo default + AllocationService strategy = createAllocationService(settingsBuilder() + .put("cluster.routing.allocation.concurrent_recoveries", 100) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) + .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) + .build()); + ClusterState newClusterState, previousClusterState; + MetaData metaDataIndexCreated = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable routingTableIndexCreated = RoutingTable.builder() + .addAsNew(metaDataIndexCreated.index("test")) + .build(); + + // assign all shards + ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaDataIndexCreated) + .routingTable(routingTableIndexCreated) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + RoutingTable routingTableInitializing = strategy.reroute(init).routingTable(); + ClusterState temp = ClusterState.builder(init).routingTable(routingTableInitializing).build(); + RoutingTable routingTableStarted = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + + // create new meta data either with version changed or not + MetaData metaDataStarted = MetaData.builder() + .put(init.metaData().index("test"), true) + .build(); + + // create the cluster states with meta data and routing tables as computed before + MetaData metaDataClosed = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE).numberOfShards(5).numberOfReplicas(2)).version(metaDataStarted.version() + 1) + .build(); + previousClusterState = ClusterState.builder(init) + .metaData(metaDataStarted) + .routingTable(routingTableStarted) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + newClusterState = ClusterState.builder(previousClusterState) + .routingTable(routingTableIndexCreated) + .metaData(metaDataClosed) + .version(previousClusterState.getVersion() + 1).build(); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); + assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); + return event; + } + + private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { + Map masterNodeAttributes = new HashMap<>(); + masterNodeAttributes.put("master", "true"); + masterNodeAttributes.put("data", "true"); + Map dataNodeAttributes = new HashMap<>(); + dataNodeAttributes.put("master", "false"); + dataNodeAttributes.put("data", "true"); + return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? masterNodeAttributes : dataNodeAttributes)).put(newNode("master_node", masterNodeAttributes)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); + } + + public void assertState(ClusterChangedEvent event, + boolean stateInMemory, + boolean expectMetaData) throws Exception { + MetaData inMemoryMetaData = null; + ImmutableSet oldIndicesList = ImmutableSet.of(); + if (stateInMemory) { + inMemoryMetaData = event.previousState().metaData(); + ImmutableSet.Builder relevantIndices = ImmutableSet.builder(); + oldIndicesList = relevantIndices.addAll(GatewayMetaState.getRelevantIndices(event.previousState(), oldIndicesList)).build(); + } + Set newIndicesList = GatewayMetaState.getRelevantIndices(event.state(), oldIndicesList); + // third, get the actual write info + Iterator indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator(); + + if (expectMetaData) { + assertThat(indices.hasNext(), equalTo(true)); + assertThat(indices.next().getNewMetaData().index(), equalTo("test")); + assertThat(indices.hasNext(), equalTo(false)); + } else { + assertThat(indices.hasNext(), equalTo(false)); + } + } + + @Test + public void testVersionChangeIsAlwaysWritten() throws Exception { + // test that version changes are always written + boolean initializing = randomBoolean(); + boolean versionChanged = true; + boolean stateInMemory = randomBoolean(); + boolean masterEligible = randomBoolean(); + boolean expectMetaData = true; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testNewShardsAlwaysWritten() throws Exception { + // make sure new shards on data only node always written + boolean initializing = true; + boolean versionChanged = randomBoolean(); + boolean stateInMemory = randomBoolean(); + boolean masterEligible = false; + boolean expectMetaData = true; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testAllUpToDateNothingWritten() throws Exception { + // make sure state is not written again if we wrote already + boolean initializing = false; + boolean versionChanged = false; + boolean stateInMemory = true; + boolean masterEligible = randomBoolean(); + boolean expectMetaData = false; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testNoWriteIfNothingChanged() throws Exception { + boolean initializing = false; + boolean versionChanged = false; + boolean stateInMemory = true; + boolean masterEligible = randomBoolean(); + boolean expectMetaData = false; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + ClusterChangedEvent newEventWithNothingChanged = new ClusterChangedEvent("test cluster state", event.state(), event.state()); + assertState(newEventWithNothingChanged, stateInMemory, expectMetaData); + } + + @Test + public void testWriteClosedIndex() throws Exception { + // test that the closing of an index is written also on data only node + boolean masterEligible = randomBoolean(); + boolean expectMetaData = true; + boolean stateInMemory = true; + ClusterChangedEvent event = generateCloseEvent(masterEligible); + assertState(event, stateInMemory, expectMetaData); + } +} diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java b/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java new file mode 100644 index 00000000000..7947a6698c7 --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java @@ -0,0 +1,354 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.base.Predicate; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.elasticsearch.test.InternalTestCluster; +import org.junit.Test; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.elasticsearch.client.Requests.clusterHealthRequest; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +/** + * + */ +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class MetaDataWriteDataNodesTests extends ElasticsearchIntegrationTest { + + @Test + public void testMetaWrittenAlsoOnDataNode() throws Exception { + // this test checks that index state is written on data only nodes + String masterNodeName = startMasterNode(); + String redNode = startDataNode("red"); + assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0))); + index("test", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + waitForConcreteMappingsOnAll("test", "doc", "text"); + ensureGreen("test"); + assertIndexInMetaState(redNode, "test"); + assertIndexInMetaState(masterNodeName, "test"); + //stop master node and start again with an empty data folder + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + String newMasterNode = startMasterNode(); + ensureGreen("test"); + // wait for mapping also on master becasue then we can be sure the state was written + waitForConcreteMappingsOnAll("test", "doc", "text"); + // check for meta data + assertIndexInMetaState(redNode, "test"); + assertIndexInMetaState(newMasterNode, "test"); + // check if index and doc is still there + ensureGreen("test"); + assertTrue(client().prepareGet("test", "doc", "1").get().isExists()); + } + + @Test + public void testMetaWrittenOnlyForIndicesOnNodesThatHaveAShard() throws Exception { + // this test checks that the index state is only written to a data only node if they have a shard of that index allocated on the node + String masterNode = startMasterNode(); + String blueNode = startDataNode("blue"); + String redNode = startDataNode("red"); + + assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); + index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + ensureGreen(); + waitForConcreteMappingsOnAll("blue_index", "doc", "text"); + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + + // not the index state for blue_index should only be written on blue_node and the for red_index only on red_node + // we restart red node and master but with empty data folders + stopNode(redNode); + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + redNode = startDataNode("red"); + + ensureGreen(); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + // check that blue index is still there + assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); + assertTrue(client().prepareGet("blue_index", "doc", "1").get().isExists()); + // red index should be gone + // if the blue node had stored the index state then cluster health would be red and red_index would exist + assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); + + } + + @Test + public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { + // this test checks that the index state is removed from a data only node once all shards have been allocated away from it + String masterNode = startMasterNode(); + String blueNode = startDataNode("blue"); + String redNode = startDataNode("red"); + + // create blue_index on blue_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); + assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); + index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + ensureGreen(); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + + // now relocate blue_index to red_node and red_index to blue_node + logger.debug("relocating indices..."); + client().admin().indices().prepareUpdateSettings("blue_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")).get(); + client().admin().indices().prepareUpdateSettings("red_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")).get(); + client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get(); + ensureGreen(); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexNotInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(redNode, "blue_index"); + assertIndexInMetaState(blueNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + waitForConcreteMappingsOnAll("blue_index", "doc", "text"); + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + + //at this point the blue_index is on red node and the red_index on blue node + // now, when we start red and master node again but without data folder, the red index should be gone but the blue index should initialize fine + stopNode(redNode); + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + redNode = startDataNode("red"); + ensureGreen(); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(blueNode, "blue_index"); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexInMetaState(blueNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexNotInMetaState(masterNode, "blue_index"); + assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); + // if the red_node had stored the index state then cluster health would be red and blue_index would exist + assertFalse(client().admin().indices().prepareExists("blue_index").get().isExists()); + } + + @Test + public void testMetaWrittenWhenIndexIsClosed() throws Exception { + String masterNode = startMasterNode(); + String redNodeDataPath = createTempDir().toString(); + String redNode = startDataNode("red", redNodeDataPath); + String blueNode = startDataNode("blue"); + // create red_index on red_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + ensureGreen(); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + client().admin().indices().prepareClose("red_index").get(); + // close the index + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + // restart master with empty data folder and maybe red node + boolean restartRedNode = randomBoolean(); + //at this point the red_index on red node + if (restartRedNode) { + stopNode(redNode); + } + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + if (restartRedNode) { + redNode = startDataNode("red", redNodeDataPath); + } + + ensureGreen("red_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + // open the index again + client().admin().indices().prepareOpen("red_index").get(); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); + // restart again + ensureGreen(); + if (restartRedNode) { + stopNode(redNode); + } + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + if (restartRedNode) { + redNode = startDataNode("red", redNodeDataPath); + } + ensureGreen("red_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); + assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); + } + @Test + public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { + String masterNode = startMasterNode(); + String redNodeDataPath = createTempDir().toString(); + String redNode = startDataNode("red", redNodeDataPath); + // create red_index on red_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2")).get(); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + logger.info("--> wait for green red_index"); + ensureGreen(); + logger.info("--> wait for meta state written for red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + + logger.info("--> close red_index"); + client().admin().indices().prepareClose("red_index").get(); + // close the index + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + logger.info("--> restart red node"); + stopNode(redNode); + redNode = startDataNode("red", redNodeDataPath); + client().admin().indices().preparePutMapping("red_index").setType("doc").setSource(jsonBuilder().startObject() + .startObject("properties") + .startObject("integer_field") + .field("type", "integer") + .endObject() + .endObject() + .endObject()).get(); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); + assertNotNull(((LinkedHashMap)(getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); + // restart master with empty data folder and maybe red node + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + + ensureGreen("red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); + assertNotNull(((LinkedHashMap)(getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); + + } + + private String startDataNode(String color) { + return startDataNode(color, createTempDir().toString()); + } + + private String startDataNode(String color, String newDataPath) { + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() + .put("node.data", true) + .put("node.master", false) + .put("node.color", color) + .put("path.data", newDataPath); + return internalCluster().startNode(settingsBuilder.build()); + } + + private String startMasterNode() { + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() + .put("node.data", false) + .put("node.master", true) + .put("path.data", createTempDir().toString()); + return internalCluster().startNode(settingsBuilder.build()); + } + + private void stopNode(String name) throws IOException { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(name)); + } + + protected void assertIndexNotInMetaState(String nodeName, String indexName) throws Exception { + assertMetaState(nodeName, indexName, false); + } + + protected void assertIndexInMetaState(String nodeName, String indexName) throws Exception { + assertMetaState(nodeName, indexName, true); + } + + private void assertMetaState(final String nodeName, final String indexName, final boolean shouldBe) throws Exception { + awaitBusy(new Predicate() { + @Override + public boolean apply(Object o) { + logger.info("checking if meta state exists..."); + return shouldBe == metaStateExists(nodeName, indexName); + } + }); + boolean inMetaSate = metaStateExists(nodeName, indexName); + if (shouldBe) { + assertTrue("expected " + indexName + " in meta state of node " + nodeName, inMetaSate); + } else { + assertFalse("expected " + indexName + " to not be in meta state of node " + nodeName, inMetaSate); + } + } + + private boolean metaStateExists(String nodeName, String indexName) { + GatewayMetaState redNodeMetaState = ((InternalTestCluster) cluster()).getInstance(GatewayMetaState.class, nodeName); + MetaData redNodeMetaData = null; + try { + redNodeMetaData = redNodeMetaState.loadMetaState(); + } catch (Exception e) { + fail("failed to load meta state"); + } + ImmutableOpenMap indices = redNodeMetaData.getIndices(); + boolean inMetaSate = false; + for (ObjectObjectCursor index : indices) { + inMetaSate = inMetaSate || index.key.equals(indexName); + } + return inMetaSate; + } +} From 1d4df4b6283699bab1dcd4d53cfc06461abcdaf1 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 29 Apr 2015 15:26:10 +0200 Subject: [PATCH 198/236] [TEST] remove source parameter validation from REST tests runner source parameter is implicitly supported and doesn't need to be declared in rest spec. It is tested though, as every api that supports get with body can also get requests using POST with body or get with source query_string parameter. --- .../elasticsearch/test/rest/ElasticsearchRestTestCase.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index dd6ae14612d..b7b207a6b11 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -20,8 +20,6 @@ package org.elasticsearch.test.rest; import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.google.common.collect.Lists; @@ -221,9 +219,6 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration if (!restApi.getMethods().contains("POST")) { errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST"); } - if (!restApi.getParams().contains("source")) { - errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support the source query string parameter"); - } } } if (errorMessage.length() > 0) { From 3bb8ff2a925e69017826a5f71dca2ee1cdafcaac Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 29 Apr 2015 14:54:05 +0100 Subject: [PATCH 199/236] fixed issue with eggs in percolation request for 1 shard --- .../percolator/PercolatorService.java | 7 +- .../PercolatorFacetsAndAggregationsTests.java | 92 ++++++++++++++++--- 2 files changed, 78 insertions(+), 21 deletions(-) diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 23732af5d89..4480de38051 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -847,16 +847,11 @@ public class PercolatorService extends AbstractComponent { return null; } - InternalAggregations aggregations; - if (shardResults.size() == 1) { - aggregations = shardResults.get(0).aggregations(); - } else { List aggregationsList = new ArrayList<>(shardResults.size()); for (PercolateShardResponse shardResult : shardResults) { aggregationsList.add(shardResult.aggregations()); } - aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); - } + InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); if (aggregations != null) { List reducers = shardResults.get(0).reducers(); if (reducers != null) { diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java index 9f04e4a37b0..4540cc75a06 100644 --- a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java +++ b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java @@ -20,12 +20,14 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; import org.elasticsearch.search.aggregations.reducers.ReducerBuilders; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -70,20 +72,18 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati expectedCount[i % numUniqueQueries]++; QueryBuilder queryBuilder = matchQuery("field1", value); client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()) - .execute().actionGet(); + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() + .actionGet(); } client().admin().indices().prepareRefresh("test").execute().actionGet(); for (int i = 0; i < numQueries; i++) { String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() - .setIndices("test").setDocumentType("type") + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2") - .collectMode(aggCollectionMode )); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); if (randomBoolean()) { percolateRequestBuilder.setPercolateQuery(matchAllQuery()); @@ -135,20 +135,18 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati expectedCount[i % numUniqueQueries]++; QueryBuilder queryBuilder = matchQuery("field1", value); client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()) - .execute().actionGet(); + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() + .actionGet(); } client().admin().indices().prepareRefresh("test").execute().actionGet(); for (int i = 0; i < numQueries; i++) { String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() - .setIndices("test").setDocumentType("type") + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2") - .collectMode(aggCollectionMode )); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); if (randomBoolean()) { percolateRequestBuilder.setPercolateQuery(matchAllQuery()); @@ -186,7 +184,7 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati assertThat(maxA, notNullValue()); assertThat(maxA.getName(), equalTo("max_a")); assertThat(maxA.value(), equalTo((double) expectedCount[i % values.length])); - assertThat(maxA.keys(), equalTo(new String[] {"b"})); + assertThat(maxA.keys(), equalTo(new String[] { "b" })); } } @@ -194,12 +192,76 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati public void testSignificantAggs() throws Exception { client().admin().indices().prepareCreate("test").execute().actionGet(); ensureGreen(); - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() - .setIndices("test").setDocumentType("type") + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject())) .addAggregation(AggregationBuilders.significantTerms("a").field("field2")); PercolateResponse response = percolateRequestBuilder.get(); assertNoFailures(response); } + @Test + public void testSingleShardAggregations() throws Exception { + assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put(indexSettings()).put("SETTING_NUMBER_OF_SHARDS", 1)) + .addMapping("type", "field1", "type=string", "field2", "type=string")); + ensureGreen(); + + int numQueries = scaledRandomIntBetween(250, 500); + + logger.info("--> registering {} queries", numQueries); + for (int i = 0; i < numQueries; i++) { + String value = "value0"; + QueryBuilder queryBuilder = matchQuery("field1", value); + client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject()) + .execute() + .actionGet(); + } + client().admin().indices().prepareRefresh("test").execute().actionGet(); + + for (int i = 0; i < numQueries; i++) { + String value = "value0"; + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + + SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("terms").field("field2").collectMode(aggCollectionMode) + .order(Order.term(true)).shardSize(2).size(1)); + + if (randomBoolean()) { + percolateRequestBuilder.setPercolateQuery(matchAllQuery()); + } + if (randomBoolean()) { + percolateRequestBuilder.setScore(true); + } else { + percolateRequestBuilder.setSortByScore(true).setSize(numQueries); + } + + boolean countOnly = randomBoolean(); + if (countOnly) { + percolateRequestBuilder.setOnlyCount(countOnly); + } + + percolateRequestBuilder.addAggregation(ReducerBuilders.maxBucket("max_terms").setBucketsPaths("terms>_count")); + + PercolateResponse response = percolateRequestBuilder.execute().actionGet(); + assertMatchCount(response, numQueries); + if (!countOnly) { + assertThat(response.getMatches(), arrayWithSize(numQueries)); + } + + Aggregations aggregations = response.getAggregations(); + assertThat(aggregations.asList().size(), equalTo(2)); + Terms terms = aggregations.get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = new ArrayList<>(terms.getBuckets()); + assertThat(buckets.size(), equalTo(1)); + assertThat(buckets.get(0).getKeyAsString(), equalTo("a")); + + InternalBucketMetricValue maxA = aggregations.get("max_terms"); + assertThat(maxA, notNullValue()); + assertThat(maxA.getName(), equalTo("max_terms")); + assertThat(maxA.keys(), equalTo(new String[] { "a" })); + } + } } From 6e076efdb9ccd963ba45dfbe1adc2635f903af08 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 29 Apr 2015 15:59:24 +0200 Subject: [PATCH 200/236] Docs: Add documentation for the `doc_values` setting on the `boolean` field type. Close #10431 --- docs/reference/mapping/types/core-types.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/mapping/types/core-types.asciidoc b/docs/reference/mapping/types/core-types.asciidoc index e650ded89ca..1f6dcc01cb5 100644 --- a/docs/reference/mapping/types/core-types.asciidoc +++ b/docs/reference/mapping/types/core-types.asciidoc @@ -426,6 +426,9 @@ and it can be retrieved from it). in `_source`, have `include_in_all` enabled, or `store` be set to `true` for this to be useful. +|`doc_values` |Set to `true` to store field values in a column-stride fashion. +Automatically set to `true` when the fielddata format is `doc_values`. + |`boost` |The boost value. Defaults to `1.0`. |`null_value` |When there is a (JSON) null value for the field, use the From 528f6481eaadd2a0585dc6731a94d7a024b8ce29 Mon Sep 17 00:00:00 2001 From: markharwood Date: Mon, 27 Apr 2015 14:45:32 +0100 Subject: [PATCH 201/236] Query enhancement: return positions of parse errors found in JSON Extend SearchParseException and QueryParsingException to report position information in query JSON where errors were found. All query DSL parser classes that throw these exception types now pass the underlying position information (line and column number) at the point the error was found. Closes #3303 --- .../common/xcontent/XContentLocation.java | 37 +++++++++++ .../common/xcontent/XContentParser.java | 8 +++ .../xcontent/json/JsonXContentParser.java | 12 ++++ .../percolator/PercolatorQueriesRegistry.java | 2 +- .../index/query/AndFilterParser.java | 4 +- .../index/query/BoolFilterParser.java | 8 +-- .../index/query/BoolQueryParser.java | 6 +- .../index/query/BoostingQueryParser.java | 10 +-- .../index/query/CommonTermsQueryParser.java | 19 +++--- .../index/query/ConstantScoreQueryParser.java | 6 +- .../index/query/DisMaxQueryParser.java | 8 +-- .../index/query/ExistsFilterParser.java | 6 +- .../index/query/FQueryFilterParser.java | 6 +- .../query/FieldMaskingSpanQueryParser.java | 11 ++-- .../index/query/FilteredQueryParser.java | 6 +- .../index/query/FuzzyQueryParser.java | 6 +- .../query/GeoBoundingBoxFilterParser.java | 9 +-- .../index/query/GeoDistanceFilterParser.java | 9 +-- .../query/GeoDistanceRangeFilterParser.java | 4 +- .../index/query/GeoPolygonFilterParser.java | 20 +++--- .../index/query/GeoShapeFilterParser.java | 18 +++--- .../index/query/GeoShapeQueryParser.java | 18 +++--- .../index/query/GeohashCellFilter.java | 9 +-- .../index/query/HasChildFilterParser.java | 17 +++--- .../index/query/HasChildQueryParser.java | 20 +++--- .../index/query/HasParentFilterParser.java | 8 +-- .../index/query/HasParentQueryParser.java | 14 ++--- .../index/query/IdsFilterParser.java | 10 +-- .../index/query/IdsQueryParser.java | 14 ++--- .../index/query/IndexQueryParserService.java | 16 ++--- .../index/query/IndicesFilterParser.java | 16 ++--- .../index/query/IndicesQueryParser.java | 16 ++--- .../index/query/LimitFilterParser.java | 4 +- .../index/query/MatchAllQueryParser.java | 2 +- .../index/query/MatchQueryParser.java | 18 +++--- .../index/query/MissingFilterParser.java | 7 +-- .../index/query/MoreLikeThisQueryParser.java | 15 ++--- .../index/query/MultiMatchQueryParser.java | 17 +++--- .../index/query/NestedFilterParser.java | 4 +- .../index/query/NestedQueryParser.java | 8 +-- .../index/query/NotFilterParser.java | 5 +- .../index/query/OrFilterParser.java | 4 +- .../index/query/PrefixFilterParser.java | 2 +- .../index/query/PrefixQueryParser.java | 6 +- .../index/query/QueryParseContext.java | 21 ++++--- .../index/query/QueryParsingException.java | 61 ++++++++++++++++++- .../index/query/QueryStringQueryParser.java | 20 +++--- .../index/query/RangeFilterParser.java | 24 +++++--- .../index/query/RangeQueryParser.java | 15 +++-- .../index/query/RegexpFilterParser.java | 4 +- .../index/query/RegexpQueryParser.java | 6 +- .../index/query/ScriptFilterParser.java | 6 +- .../index/query/SimpleQueryStringParser.java | 14 ++--- .../index/query/SpanFirstQueryParser.java | 10 +-- .../index/query/SpanMultiTermQueryParser.java | 6 +- .../index/query/SpanNearQueryParser.java | 12 ++-- .../index/query/SpanNotQueryParser.java | 14 ++--- .../index/query/SpanOrQueryParser.java | 8 +-- .../index/query/SpanTermQueryParser.java | 4 +- .../index/query/TermFilterParser.java | 6 +- .../index/query/TermQueryParser.java | 6 +- .../index/query/TermsFilterParser.java | 17 +++--- .../index/query/TermsQueryParser.java | 10 +-- .../index/query/TopChildrenQueryParser.java | 12 ++-- .../index/query/TypeFilterParser.java | 6 +- .../index/query/WildcardQueryParser.java | 6 +- .../index/query/WrapperFilterParser.java | 4 +- .../index/query/WrapperQueryParser.java | 4 +- .../functionscore/DecayFunctionParser.java | 4 +- .../FunctionScoreQueryParser.java | 12 ++-- .../ScoreFunctionParserMapper.java | 7 ++- .../FieldValueFactorFunctionParser.java | 6 +- .../random/RandomScoreFunctionParser.java | 9 ++- .../script/ScriptScoreFunctionParser.java | 12 ++-- .../support/InnerHitsQueryParserHelper.java | 2 +- .../support/NestedInnerQueryParseSupport.java | 14 ++--- .../search/SearchParseException.java | 46 +++++++++++++- .../elasticsearch/search/SearchService.java | 24 ++++++-- .../aggregations/AggregatorParsers.java | 29 ++++++--- .../bucket/children/ChildrenParser.java | 11 ++-- .../bucket/filters/FiltersParser.java | 9 ++- .../bucket/histogram/DateHistogramParser.java | 27 +++++--- .../bucket/histogram/ExtendedBounds.java | 2 +- .../bucket/histogram/HistogramParser.java | 18 ++++-- .../bucket/missing/MissingParser.java | 3 +- .../bucket/nested/NestedParser.java | 9 ++- .../nested/ReverseNestedAggregator.java | 3 +- .../bucket/nested/ReverseNestedParser.java | 6 +- .../bucket/range/RangeParser.java | 12 ++-- .../bucket/range/date/DateRangeParser.java | 14 +++-- .../range/geodistance/GeoDistanceParser.java | 18 ++++-- .../bucket/range/ipv4/IpRangeParser.java | 15 +++-- .../bucket/sampler/SamplerParser.java | 10 +-- .../SignificantTermsParametersParser.java | 6 +- .../bucket/terms/TermsParametersParser.java | 21 ++++--- ...icValuesSourceMetricsAggregatorParser.java | 3 +- .../cardinality/CardinalityParser.java | 5 +- .../metrics/geobounds/GeoBoundsParser.java | 6 +- .../AbstractPercentilesParser.java | 12 ++-- .../percentiles/PercentileRanksParser.java | 2 +- .../scripted/ScriptedMetricAggregator.java | 8 ++- .../scripted/ScriptedMetricParser.java | 11 ++-- .../stats/extended/ExtendedStatsParser.java | 8 ++- .../metrics/tophits/TopHitsParser.java | 12 ++-- .../metrics/valuecount/ValueCountParser.java | 3 +- .../aggregations/support/GeoPointParser.java | 5 +- .../support/ValuesSourceParser.java | 3 +- .../highlight/HighlighterParseElement.java | 3 +- .../search/query/FromParseElement.java | 3 +- .../search/query/SizeParseElement.java | 3 +- .../search/sort/ScriptSortParser.java | 8 +-- .../search/sort/SortParseElement.java | 4 +- .../ElasticsearchExceptionTests.java | 60 ++++++++++++------ .../query/TestQueryParsingException.java | 37 +++++++++++ .../rest/BytesRestResponseTests.java | 10 +-- 115 files changed, 834 insertions(+), 482 deletions(-) create mode 100644 src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java create mode 100644 src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java b/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java new file mode 100644 index 00000000000..ade2a457797 --- /dev/null +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +/** + * Simple data structure representing the line and column number of a position + * in some XContent e.g. JSON. Locations are typically used to communicate the + * position of a parsing error to end users and consequently have line and + * column numbers starting from 1. + */ +public class XContentLocation { + public final int lineNumber; + public final int columnNumber; + + public XContentLocation(int lineNumber, int columnNumber) { + super(); + this.lineNumber = lineNumber; + this.columnNumber = columnNumber; + } +} diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 0aab32c4ba3..738fd9f6e72 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -241,4 +241,12 @@ public interface XContentParser extends Releasable { * */ byte[] binaryValue() throws IOException; + + /** + * Used for error reporting to highlight where syntax errors occur in + * content being parsed. + * + * @return last token's location or null if cannot be determined + */ + XContentLocation getTokenLocation(); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index 08174e30a3e..5d3a3f99f4e 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -19,10 +19,13 @@ package org.elasticsearch.common.xcontent.json; +import com.fasterxml.jackson.core.JsonLocation; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.AbstractXContentParser; @@ -187,6 +190,15 @@ public class JsonXContentParser extends AbstractXContentParser { return parser.getBinaryValue(); } + @Override + public XContentLocation getTokenLocation() { + JsonLocation loc = parser.getTokenLocation(); + if (loc == null) { + return null; + } + return new XContentLocation(loc.getLineNr(), loc.getColumnNr()); + } + @Override public void close() { IOUtils.closeWhileHandlingException(parser); diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index 486101f741f..fd4cce1c763 100644 --- a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -223,7 +223,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString ? true : false); return queryParserService.parseInnerQuery(context); } catch (IOException e) { - throw new QueryParsingException(queryParserService.index(), "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } finally { if (type != null) { QueryParseContext.setTypes(previousTypes); diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java index 176a8c6dd7b..02322db9a0b 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java @@ -100,14 +100,14 @@ public class AndFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[and] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[and] filter does not support [" + currentFieldName + "]"); } } } } if (!filtersFound) { - throw new QueryParsingException(parseContext.index(), "[and] filter requires 'filters' to be set on it'"); + throw new QueryParsingException(parseContext, "[and] filter requires 'filters' to be set on it'"); } if (filters.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java index fcd2e68c8b4..71f8b8248f7 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java @@ -85,7 +85,7 @@ public class BoolFilterParser implements FilterParser { boolFilter.add(new BooleanClause(filter, BooleanClause.Occur.SHOULD)); } } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("must".equals(currentFieldName)) { @@ -114,7 +114,7 @@ public class BoolFilterParser implements FilterParser { } } } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_cache".equals(currentFieldName)) { @@ -124,13 +124,13 @@ public class BoolFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } } if (!hasAnyFilter) { - throw new QueryParsingException(parseContext.index(), "[bool] filter has no inner should/must/must_not elements"); + throw new QueryParsingException(parseContext, "[bool] filter has no inner should/must/must_not elements"); } if (boolFilter.clauses().isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java index 29d4ba2edd5..b7c31647c94 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java @@ -85,7 +85,7 @@ public class BoolQueryParser implements QueryParser { clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD)); } } else { - throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("must".equals(currentFieldName)) { @@ -110,7 +110,7 @@ public class BoolQueryParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), "bool query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "bool query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) { @@ -126,7 +126,7 @@ public class BoolQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java index a117256ece1..c160b2f9a4a 100644 --- a/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java @@ -66,7 +66,7 @@ public class BoostingQueryParser implements QueryParser { negativeQuery = parseContext.parseInnerQuery(); negativeQueryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[boosting] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("negative_boost".equals(currentFieldName) || "negativeBoost".equals(currentFieldName)) { @@ -74,19 +74,19 @@ public class BoostingQueryParser implements QueryParser { } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[boosting] query does not support [" + currentFieldName + "]"); } } } if (positiveQuery == null && !positiveQueryFound) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'positive' query to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'positive' query to be set'"); } if (negativeQuery == null && !negativeQueryFound) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative' query to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'negative' query to be set'"); } if (negativeBoost == -1) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative_boost' to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'negative_boost' to be set'"); } // parsers returned null diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java index 683b8dfd9ba..29945de5686 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java @@ -65,7 +65,7 @@ public class CommonTermsQueryParser implements QueryParser { XContentParser parser = parseContext.parser(); XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[common] query malformed, no field"); + throw new QueryParsingException(parseContext, "[common] query malformed, no field"); } String fieldName = parser.currentName(); Object value = null; @@ -96,12 +96,13 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("high_freq".equals(innerFieldName) || "highFreq".equals(innerFieldName)) { highFreqMinimumShouldMatch = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + innerFieldName + "] for [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + innerFieldName + + "] for [" + currentFieldName + "]"); } } } } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -109,7 +110,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "[common] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[common] analyzer [" + parser.text() + "] not found"); } queryAnalyzer = analyzer; } else if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) { @@ -123,7 +124,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { highFreqOccur = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), + throw new QueryParsingException(parseContext, "[common] query requires operator to be either 'and' or 'or', not [" + op + "]"); } } else if ("low_freq_operator".equals(currentFieldName) || "lowFreqOperator".equals(currentFieldName)) { @@ -133,7 +134,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { lowFreqOccur = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), + throw new QueryParsingException(parseContext, "[common] query requires operator to be either 'and' or 'or', not [" + op + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { @@ -143,7 +144,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + currentFieldName + "]"); } } } @@ -154,13 +155,13 @@ public class CommonTermsQueryParser implements QueryParser { token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { throw new QueryParsingException( - parseContext.index(), + parseContext, "[common] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for text query"); + throw new QueryParsingException(parseContext, "No text specified for text query"); } FieldMapper mapper = null; String field; diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index 78c5879b63f..d89ff05b7fa 100644 --- a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -71,7 +71,7 @@ public class ConstantScoreQueryParser implements QueryParser { query = parseContext.parseInnerQuery(); queryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("boost".equals(currentFieldName)) { @@ -81,12 +81,12 @@ public class ConstantScoreQueryParser implements QueryParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } } } if (!filterFound && !queryFound) { - throw new QueryParsingException(parseContext.index(), "[constant_score] requires either 'filter' or 'query' element"); + throw new QueryParsingException(parseContext, "[constant_score] requires either 'filter' or 'query' element"); } if (query == null && filter == null) { diff --git a/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java index 82feb9854a5..2747387fbd7 100644 --- a/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java @@ -70,7 +70,7 @@ public class DisMaxQueryParser implements QueryParser { queries.add(query); } } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("queries".equals(currentFieldName)) { @@ -83,7 +83,7 @@ public class DisMaxQueryParser implements QueryParser { token = parser.nextToken(); } } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -93,13 +93,13 @@ public class DisMaxQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } } if (!queriesFound) { - throw new QueryParsingException(parseContext.index(), "[dis_max] requires 'queries' field"); + throw new QueryParsingException(parseContext, "[dis_max] requires 'queries' field"); } if (queries.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java index eb03586adf2..008f554a57f 100644 --- a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java @@ -23,8 +23,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryWrapperFilter; -import org.apache.lucene.search.TermRangeFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -71,13 +69,13 @@ public class ExistsFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[exists] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[exists] filter does not support [" + currentFieldName + "]"); } } } if (fieldPattern == null) { - throw new QueryParsingException(parseContext.index(), "exists must be provided with a [field]"); + throw new QueryParsingException(parseContext, "exists must be provided with a [field]"); } return newFilter(parseContext, fieldPattern, filterName); diff --git a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java index cb821912ca9..d31e2f1a943 100644 --- a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java @@ -66,7 +66,7 @@ public class FQueryFilterParser implements FilterParser { queryFound = true; query = parseContext.parseInnerQuery(); } else { - throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { @@ -76,12 +76,12 @@ public class FQueryFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[fquery] requires 'query' element"); + throw new QueryParsingException(parseContext, "[fquery] requires 'query' element"); } if (query == null) { return null; diff --git a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java index 2b69cf61561..1e8fd7cfa03 100644 --- a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java @@ -64,11 +64,12 @@ public class FieldMaskingSpanQueryParser implements QueryParser { if ("query".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query] must be of type span query"); + throw new QueryParsingException(parseContext, "[field_masking_span] query] must be of type span query"); } inner = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[field_masking_span] query does not support [" + + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -78,15 +79,15 @@ public class FieldMaskingSpanQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[field_masking_span] query does not support [" + currentFieldName + "]"); } } } if (inner == null) { - throw new QueryParsingException(parseContext.index(), "field_masking_span must have [query] span query clause"); + throw new QueryParsingException(parseContext, "field_masking_span must have [query] span query clause"); } if (field == null) { - throw new QueryParsingException(parseContext.index(), "field_masking_span must have [field] set for it"); + throw new QueryParsingException(parseContext, "field_masking_span must have [field] set for it"); } FieldMapper mapper = parseContext.fieldMapper(field); diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java index e1e27eec64b..f6ec14313b1 100644 --- a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java @@ -73,7 +73,7 @@ public class FilteredQueryParser implements QueryParser { filterFound = true; filter = parseContext.parseInnerFilter(); } else { - throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("strategy".equals(currentFieldName)) { @@ -93,7 +93,7 @@ public class FilteredQueryParser implements QueryParser { } else if ("leap_frog_filter_first".equals(value) || "leapFrogFilterFirst".equals(value)) { filterStrategy = FilteredQuery.LEAP_FROG_FILTER_FIRST_STRATEGY; } else { - throw new QueryParsingException(parseContext.index(), "[filtered] strategy value not supported [" + value + "]"); + throw new QueryParsingException(parseContext, "[filtered] strategy value not supported [" + value + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); @@ -104,7 +104,7 @@ public class FilteredQueryParser implements QueryParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java index 243f86534cd..229fcc95c72 100644 --- a/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java @@ -57,7 +57,7 @@ public class FuzzyQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[fuzzy] query malformed, no field"); + throw new QueryParsingException(parseContext, "[fuzzy] query malformed, no field"); } String fieldName = parser.currentName(); @@ -95,7 +95,7 @@ public class FuzzyQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[fuzzy] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fuzzy] query does not support [" + currentFieldName + "]"); } } } @@ -107,7 +107,7 @@ public class FuzzyQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for fuzzy query"); + throw new QueryParsingException(parseContext, "No value specified for fuzzy query"); } Query query = null; diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java index 8f68dbea074..107e3a507dd 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java @@ -147,7 +147,7 @@ public class GeoBoundingBoxFilterParser implements FilterParser { } else if ("type".equals(currentFieldName)) { type = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_bbox] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_bbox] filter does not support [" + currentFieldName + "]"); } } } @@ -169,11 +169,11 @@ public class GeoBoundingBoxFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); @@ -184,7 +184,8 @@ public class GeoBoundingBoxFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); filter = new InMemoryGeoBoundingBoxFilter(topLeft, bottomRight, indexFieldData); } else { - throw new QueryParsingException(parseContext.index(), "geo bounding box type [" + type + "] not supported, either 'indexed' or 'memory' are allowed"); + throw new QueryParsingException(parseContext, "geo bounding box type [" + type + + "] not supported, either 'indexed' or 'memory' are allowed"); } if (cache != null) { diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java index 252afdf25cf..a7859977388 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java @@ -98,7 +98,8 @@ public class GeoDistanceFilterParser implements FilterParser { } else if (currentName.equals(GeoPointFieldMapper.Names.GEOHASH)) { GeoHashUtils.decode(parser.text(), point); } else { - throw new QueryParsingException(parseContext.index(), "[geo_distance] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_distance] filter does not support [" + currentFieldName + + "]"); } } } @@ -141,7 +142,7 @@ public class GeoDistanceFilterParser implements FilterParser { } if (vDistance == null) { - throw new QueryParsingException(parseContext.index(), "geo_distance requires 'distance' to be specified"); + throw new QueryParsingException(parseContext, "geo_distance requires 'distance' to be specified"); } else if (vDistance instanceof Number) { distance = DistanceUnit.DEFAULT.convert(((Number) vDistance).doubleValue(), unit); } else { @@ -155,11 +156,11 @@ public class GeoDistanceFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java index b7452bec0f1..113c59d2c83 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java @@ -196,11 +196,11 @@ public class GeoDistanceRangeFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java index fefa37c07e3..e63c6012ede 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java @@ -96,10 +96,12 @@ public class GeoPolygonFilterParser implements FilterParser { shell.add(GeoUtils.parseGeoPoint(parser)); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support [" + currentFieldName + + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support token type [" + token.name() + "] under [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support token type [" + token.name() + + "] under [" + currentFieldName + "]"); } } } else if (token.isValue()) { @@ -113,25 +115,25 @@ public class GeoPolygonFilterParser implements FilterParser { normalizeLat = parser.booleanValue(); normalizeLon = parser.booleanValue(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] unexpected token type [" + token.name() + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] unexpected token type [" + token.name() + "]"); } } if (shell.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "no points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "no points defined for geo_polygon filter"); } else { if (shell.size() < 3) { - throw new QueryParsingException(parseContext.index(), "too few points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "too few points defined for geo_polygon filter"); } GeoPoint start = shell.get(0); if (!start.equals(shell.get(shell.size() - 1))) { shell.add(start); } if (shell.size() < 4) { - throw new QueryParsingException(parseContext.index(), "too few points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "too few points defined for geo_polygon filter"); } } @@ -143,11 +145,11 @@ public class GeoPolygonFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java index 72eba62854e..5a5e45736cd 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java @@ -113,7 +113,7 @@ public class GeoShapeFilterParser implements FilterParser { } else if ("relation".equals(currentFieldName)) { shapeRelation = ShapeRelation.getRelationByName(parser.text()); if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + "]"); + throw new QueryParsingException(parseContext, "Unknown shape operation [" + parser.text() + "]"); } } else if ("strategy".equals(currentFieldName)) { strategyName = parser.text(); @@ -134,13 +134,13 @@ public class GeoShapeFilterParser implements FilterParser { } } if (id == null) { - throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided"); + throw new QueryParsingException(parseContext, "ID for indexed shape not provided"); } else if (type == null) { - throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided"); + throw new QueryParsingException(parseContext, "Type for indexed shape not provided"); } shape = fetchService.fetch(id, type, index, shapePath); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } } } @@ -152,26 +152,26 @@ public class GeoShapeFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } } } if (shape == null) { - throw new QueryParsingException(parseContext.index(), "No Shape defined"); + throw new QueryParsingException(parseContext, "No Shape defined"); } else if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "No Shape Relation defined"); + throw new QueryParsingException(parseContext, "No Shape Relation defined"); } MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Failed to find geo_shape field [" + fieldName + "]"); } FieldMapper fieldMapper = smartNameFieldMappers.mapper(); // TODO: This isn't the nicest way to check this if (!(fieldMapper instanceof GeoShapeFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape"); + throw new QueryParsingException(parseContext, "Field [" + fieldName + "] is not a geo_shape"); } GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java index eeed9d270f0..ac3d4f59f92 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java @@ -93,7 +93,7 @@ public class GeoShapeQueryParser implements QueryParser { } else if ("relation".equals(currentFieldName)) { shapeRelation = ShapeRelation.getRelationByName(parser.text()); if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + " ]"); + throw new QueryParsingException(parseContext, "Unknown shape operation [" + parser.text() + " ]"); } } else if ("indexed_shape".equals(currentFieldName) || "indexedShape".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -112,13 +112,13 @@ public class GeoShapeQueryParser implements QueryParser { } } if (id == null) { - throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided"); + throw new QueryParsingException(parseContext, "ID for indexed shape not provided"); } else if (type == null) { - throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided"); + throw new QueryParsingException(parseContext, "Type for indexed shape not provided"); } shape = fetchService.fetch(id, type, index, shapePath); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] query does not support [" + currentFieldName + "]"); } } } @@ -128,26 +128,26 @@ public class GeoShapeQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] query does not support [" + currentFieldName + "]"); } } } if (shape == null) { - throw new QueryParsingException(parseContext.index(), "No Shape defined"); + throw new QueryParsingException(parseContext, "No Shape defined"); } else if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "No Shape Relation defined"); + throw new QueryParsingException(parseContext, "No Shape Relation defined"); } MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Failed to find geo_shape field [" + fieldName + "]"); } FieldMapper fieldMapper = smartNameFieldMappers.mapper(); // TODO: This isn't the nicest way to check this if (!(fieldMapper instanceof GeoShapeFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape"); + throw new QueryParsingException(parseContext, "Field [" + fieldName + "] is not a geo_shape"); } GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java index f93cc2681b8..63ca22db644 100644 --- a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java +++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java @@ -265,22 +265,23 @@ public class GeohashCellFilter { } if (geohash == null) { - throw new QueryParsingException(parseContext.index(), "no geohash value provided to geohash_cell filter"); + throw new QueryParsingException(parseContext, "no geohash value provided to geohash_cell filter"); } MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); if (!geoMapper.isEnableGeohashPrefix()) { - throw new QueryParsingException(parseContext.index(), "can't execute geohash_cell on field [" + fieldName + "], geohash_prefix is not enabled"); + throw new QueryParsingException(parseContext, "can't execute geohash_cell on field [" + fieldName + + "], geohash_prefix is not enabled"); } if(levels > 0) { diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index c04e48b8e1e..d22a05f6a11 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -94,7 +94,7 @@ public class HasChildFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) { @@ -112,15 +112,15 @@ public class HasChildFilterParser implements FilterParser { } else if ("max_children".equals(currentFieldName) || "maxChildren".equals(currentFieldName)) { maxChildren = parser.intValue(true); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound && !filterFound) { - throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[has_child] filter requires 'query' or 'filter' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'type' field"); + throw new QueryParsingException(parseContext, "[has_child] filter requires 'type' field"); } Query query; @@ -136,7 +136,7 @@ public class HasChildFilterParser implements FilterParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "No mapping for for type [" + childType + "]"); } if (innerHits != null) { InnerHitsContext.ParentChildInnerHits parentChildInnerHits = new InnerHitsContext.ParentChildInnerHits(innerHits.v2(), query, null, childDocMapper); @@ -145,7 +145,7 @@ public class HasChildFilterParser implements FilterParser { } ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "Type [" + childType + "] does not have parent mapping"); } String parentType = parentFieldMapper.type(); @@ -154,11 +154,12 @@ public class HasChildFilterParser implements FilterParser { DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] points to a non existent parent type [" + + parentType + "]"); } if (maxChildren > 0 && maxChildren < minChildren) { - throw new QueryParsingException(parseContext.index(), "[has_child] 'max_children' is less than 'min_children'"); + throw new QueryParsingException(parseContext, "[has_child] 'max_children' is less than 'min_children'"); } BitDocIdSetFilter nonNestedDocsFilter = null; diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index 058f47d5eb7..e088b58a51a 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -92,7 +92,7 @@ public class HasChildQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) { @@ -112,15 +112,15 @@ public class HasChildQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field"); + throw new QueryParsingException(parseContext, "[has_child] requires 'query' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field"); + throw new QueryParsingException(parseContext, "[has_child] requires 'type' field"); } Query innerQuery = iq.asQuery(childType); @@ -132,10 +132,10 @@ public class HasChildQueryParser implements QueryParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "[has_child] No mapping for for type [" + childType + "]"); } if (!childDocMapper.parentFieldMapper().active()) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] does not have parent mapping"); } if (innerHits != null) { @@ -146,18 +146,18 @@ public class HasChildQueryParser implements QueryParser { ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "[has_child] _parent field not configured"); + throw new QueryParsingException(parseContext, "[has_child] _parent field not configured"); } String parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType - + "] points to a non existent parent type [" + parentType + "]"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] points to a non existent parent type [" + + parentType + "]"); } if (maxChildren > 0 && maxChildren < minChildren) { - throw new QueryParsingException(parseContext.index(), "[has_child] 'max_children' is less than 'min_children'"); + throw new QueryParsingException(parseContext, "[has_child] 'max_children' is less than 'min_children'"); } BitDocIdSetFilter nonNestedDocsFilter = null; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java index fd3335202f3..388f24d4ab0 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java @@ -83,7 +83,7 @@ public class HasParentFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) { @@ -95,15 +95,15 @@ public class HasParentFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { // noop to be backwards compatible } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound && !filterFound) { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[has_parent] filter requires 'query' or 'filter' field"); } if (parentType == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'parent_type' field"); + throw new QueryParsingException(parseContext, "[has_parent] filter requires 'parent_type' field"); } Query innerQuery; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index 9525064647b..d7d57b6ddd6 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -88,7 +87,7 @@ public class HasParentQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) { @@ -112,15 +111,15 @@ public class HasParentQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'query' field"); + throw new QueryParsingException(parseContext, "[has_parent] query requires 'query' field"); } if (parentType == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'parent_type' field"); + throw new QueryParsingException(parseContext, "[has_parent] query requires 'parent_type' field"); } Query innerQuery = iq.asQuery(parentType); @@ -145,7 +144,8 @@ public class HasParentQueryParser implements QueryParser { static Query createParentQuery(Query innerQuery, String parentType, boolean score, QueryParseContext parseContext, Tuple innerHits) { DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query configured 'parent_type' [" + parentType + "] is not a valid type"); + throw new QueryParsingException(parseContext, "[has_parent] query configured 'parent_type' [" + parentType + + "] is not a valid type"); } if (innerHits != null) { @@ -169,7 +169,7 @@ public class HasParentQueryParser implements QueryParser { } } if (parentChildIndexFieldData == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] no _parent field configured"); + throw new QueryParsingException(parseContext, "[has_parent] no _parent field configured"); } Filter parentFilter = null; diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java index d0402aabf95..138557cd79a 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java @@ -68,7 +68,7 @@ public class IdsFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { BytesRef value = parser.utf8BytesOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } ids.add(value); } @@ -77,12 +77,12 @@ public class IdsFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No type specified for term filter"); + throw new QueryParsingException(parseContext, "No type specified for term filter"); } types.add(value); } } else { - throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { @@ -90,13 +90,13 @@ public class IdsFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] filter does not support [" + currentFieldName + "]"); } } } if (!idsProvided) { - throw new QueryParsingException(parseContext.index(), "[ids] filter requires providing a values element"); + throw new QueryParsingException(parseContext, "[ids] filter requires providing a values element"); } if (ids.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java index d0345944c66..3789b3039c0 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java @@ -74,12 +74,12 @@ public class IdsQueryParser implements QueryParser { (token == XContentParser.Token.VALUE_NUMBER)) { BytesRef value = parser.utf8BytesOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } ids.add(value); } else { - throw new QueryParsingException(parseContext.index(), - "Illegal value for id, expecting a string or number, got: " + token); + throw new QueryParsingException(parseContext, "Illegal value for id, expecting a string or number, got: " + + token); } } } else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) { @@ -87,12 +87,12 @@ public class IdsQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No type specified for term filter"); + throw new QueryParsingException(parseContext, "No type specified for term filter"); } types.add(value); } } else { - throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { @@ -102,13 +102,13 @@ public class IdsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] query does not support [" + currentFieldName + "]"); } } } if (!idsProvided) { - throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided"); + throw new QueryParsingException(parseContext, "[ids] query, no ids values provided"); } if (ids.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java index 93c94ee98da..e2bcd353e11 100644 --- a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java +++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java @@ -210,7 +210,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -230,7 +230,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -250,7 +250,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -266,7 +266,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse [" + source + "]", e); + throw new QueryParsingException(getParseContext(), "Failed to parse [" + source + "]", e); } finally { if (parser != null) { parser.close(); @@ -282,7 +282,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { try { return innerParse(context, parser); } catch (IOException e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } } @@ -359,7 +359,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource); parsedQuery = parse(qSourceParser); } else { - throw new QueryParsingException(index(), "request does not support [" + fieldName + "]"); + throw new QueryParsingException(getParseContext(), "request does not support [" + fieldName + "]"); } } } @@ -369,10 +369,10 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Throwable e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } - throw new QueryParsingException(index(), "Required query is missing"); + throw new QueryParsingException(getParseContext(), "Required query is missing"); } private ParsedQuery innerParse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException { diff --git a/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java index c1f5b804f94..7bd39dad947 100644 --- a/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java @@ -83,30 +83,30 @@ public class IndicesFilterParser implements FilterParser { noMatchFilter = parseContext.parseInnerFilter(); } } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("indices".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; Collection indices = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry"); + throw new QueryParsingException(parseContext, "[indices] no value specified for 'indices' entry"); } indices.add(value); } currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()])); } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("index".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text()); @@ -120,15 +120,15 @@ public class IndicesFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'filter' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'filter' element"); } if (!indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'indices' or 'index' element"); } Filter chosenFilter; diff --git a/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java index d5b5cefa149..a45fe9f88f6 100644 --- a/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java @@ -76,30 +76,30 @@ public class IndicesQueryParser implements QueryParser { } else if ("no_match_query".equals(currentFieldName)) { innerNoMatchQuery = new XContentStructure.InnerQuery(parseContext, null); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("indices".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; Collection indices = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry"); + throw new QueryParsingException(parseContext, "[indices] no value specified for 'indices' entry"); } indices.add(value); } currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()])); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("index".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text()); @@ -113,15 +113,15 @@ public class IndicesQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'query' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'query' element"); } if (!indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'indices' or 'index' element"); } Query chosenQuery; diff --git a/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java index 858b23c6693..f4f8fde7427 100644 --- a/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java @@ -53,13 +53,13 @@ public class LimitFilterParser implements FilterParser { if ("value".equals(currentFieldName)) { limit = parser.intValue(); } else { - throw new QueryParsingException(parseContext.index(), "[limit] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[limit] filter does not support [" + currentFieldName + "]"); } } } if (limit == -1) { - throw new QueryParsingException(parseContext.index(), "No value specified for limit filter"); + throw new QueryParsingException(parseContext, "No value specified for limit filter"); } // this filter is deprecated and parses to a filter that matches everything diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java index 2017b940921..933d3d35631 100644 --- a/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java @@ -59,7 +59,7 @@ public class MatchAllQueryParser implements QueryParser { if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new QueryParsingException(parseContext.index(), "[match_all] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match_all] query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java index a0f595a6626..8dd35c84b4d 100644 --- a/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java @@ -65,7 +65,7 @@ public class MatchQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[match] query malformed, no field"); + throw new QueryParsingException(parseContext, "[match] query malformed, no field"); } String fieldName = parser.currentName(); @@ -93,12 +93,12 @@ public class MatchQueryParser implements QueryParser { } else if ("phrase_prefix".equals(tStr) || "phrasePrefix".equals(currentFieldName)) { type = MatchQuery.Type.PHRASE_PREFIX; } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support type " + tStr); + throw new QueryParsingException(parseContext, "[match] query does not support type " + tStr); } } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "[match] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[match] analyzer [" + parser.text() + "] not found"); } matchQuery.setAnalyzer(analyzer); } else if ("boost".equals(currentFieldName)) { @@ -118,7 +118,8 @@ public class MatchQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { matchQuery.setOccur(BooleanClause.Occur.MUST); } else { - throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]"); + throw new QueryParsingException(parseContext, "text query requires operator to be either 'and' or 'or', not [" + + op + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); @@ -139,12 +140,12 @@ public class MatchQueryParser implements QueryParser { } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { matchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL); } else { - throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + throw new QueryParsingException(parseContext, "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match] query does not support [" + currentFieldName + "]"); } } } @@ -154,12 +155,13 @@ public class MatchQueryParser implements QueryParser { // move to the next token token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[match] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); + throw new QueryParsingException(parseContext, + "[match] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for text query"); + throw new QueryParsingException(parseContext, "No text specified for text query"); } Query query = matchQuery.parse(type, fieldName, value); diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java index 10f0405b832..3f394ff735e 100644 --- a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -78,13 +77,13 @@ public class MissingFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[missing] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[missing] filter does not support [" + currentFieldName + "]"); } } } if (fieldPattern == null) { - throw new QueryParsingException(parseContext.index(), "missing must be provided with a [field]"); + throw new QueryParsingException(parseContext, "missing must be provided with a [field]"); } return newFilter(parseContext, fieldPattern, existence, nullValue, filterName); @@ -92,7 +91,7 @@ public class MissingFilterParser implements FilterParser { public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, boolean existence, boolean nullValue, String filterName) { if (!existence && !nullValue) { - throw new QueryParsingException(parseContext.index(), "missing must have either existence, or null_value, or both set to true"); + throw new QueryParsingException(parseContext, "missing must have either existence, or null_value, or both set to true"); } final FieldMappers fieldNamesMappers = parseContext.mapperService().fullName(FieldNamesFieldMapper.NAME); diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java index 0050b7199a1..b726d4f0159 100644 --- a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java @@ -155,7 +155,7 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (Fields.INCLUDE.match(currentFieldName, parseContext.parseFlags())) { include = parser.booleanValue(); } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if (Fields.STOP_WORDS.match(currentFieldName, parseContext.parseFlags())) { @@ -192,7 +192,7 @@ public class MoreLikeThisQueryParser implements QueryParser { parseLikeField(parser, ignoreTexts, ignoreItems); } } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { if (Fields.LIKE.match(currentFieldName, parseContext.parseFlags())) { @@ -201,16 +201,16 @@ public class MoreLikeThisQueryParser implements QueryParser { else if (Fields.IGNORE_LIKE.match(currentFieldName, parseContext.parseFlags())) { parseLikeField(parser, ignoreTexts, ignoreItems); } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } } if (likeTexts.isEmpty() && likeItems.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "more_like_this requires 'like' to be specified"); + throw new QueryParsingException(parseContext, "more_like_this requires 'like' to be specified"); } if (moreLikeFields != null && moreLikeFields.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "more_like_this requires 'fields' to be non-empty"); + throw new QueryParsingException(parseContext, "more_like_this requires 'fields' to be non-empty"); } // set analyzer @@ -258,8 +258,9 @@ public class MoreLikeThisQueryParser implements QueryParser { } if (item.type() == null) { if (parseContext.queryTypes().size() > 1) { - throw new QueryParsingException(parseContext.index(), - "ambiguous type for item with id: " + item.id() + " and index: " + item.index()); + throw new QueryParsingException(parseContext, + "ambiguous type for item with id: " + item.id() + + " and index: " + item.index()); } else { item.type(parseContext.queryTypes().iterator().next()); } diff --git a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java index 3fbd43651de..976dd15dc7b 100644 --- a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Maps; + import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.elasticsearch.common.inject.Inject; @@ -77,8 +78,7 @@ public class MultiMatchQueryParser implements QueryParser { } else if (token.isValue()) { extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts); } else { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query does not support [" + currentFieldName - + "]"); + throw new QueryParsingException(parseContext, "[" + NAME + "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -88,7 +88,7 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "["+ NAME +"] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[" + NAME + "] analyzer [" + parser.text() + "] not found"); } multiMatchQuery.setAnalyzer(analyzer); } else if ("boost".equals(currentFieldName)) { @@ -108,7 +108,8 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { multiMatchQuery.setOccur(BooleanClause.Occur.MUST); } else { - throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]"); + throw new QueryParsingException(parseContext, "text query requires operator to be either 'and' or 'or', not [" + op + + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); @@ -131,22 +132,22 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { multiMatchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL); } else { - throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + throw new QueryParsingException(parseContext, "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match] query does not support [" + currentFieldName + "]"); } } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for multi_match query"); + throw new QueryParsingException(parseContext, "No text specified for multi_match query"); } if (fieldNameWithBoosts.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "No fields specified for multi_match query"); + throw new QueryParsingException(parseContext, "No fields specified for multi_match query"); } if (type == null) { type = MultiMatchQueryBuilder.Type.BEST_FIELDS; diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java index f6cad0a57e0..fc2237d6630 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java @@ -70,7 +70,7 @@ public class NestedFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { builder.setInnerHits(innerHitsQueryParserHelper.parse(parseContext)); } else { - throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("path".equals(currentFieldName)) { @@ -84,7 +84,7 @@ public class NestedFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java index 989388b79d4..ba9bcf07d46 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java @@ -75,7 +75,7 @@ public class NestedQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { builder.setInnerHits(innerHitsQueryParserHelper.parse(parseContext)); } else { - throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("path".equals(currentFieldName)) { @@ -93,12 +93,12 @@ public class NestedQueryParser implements QueryParser { } else if ("none".equals(sScoreMode)) { scoreMode = ScoreMode.None; } else { - throw new QueryParsingException(parseContext.index(), "illegal score_mode for nested query [" + sScoreMode + "]"); + throw new QueryParsingException(parseContext, "illegal score_mode for nested query [" + sScoreMode + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] query does not support [" + currentFieldName + "]"); } } } @@ -144,7 +144,7 @@ public class NestedQueryParser implements QueryParser { innerQuery = null; } } else { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } if (innerHits != null) { diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java index db8adccc5dd..38bff1997bb 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; @@ -80,13 +79,13 @@ public class NotFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[not] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[not] filter does not support [" + currentFieldName + "]"); } } } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "filter is required when using `not` filter"); + throw new QueryParsingException(parseContext, "filter is required when using `not` filter"); } if (filter == null) { diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java index 9c3ad615105..22932ac8290 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java @@ -100,14 +100,14 @@ public class OrFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[or] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[or] filter does not support [" + currentFieldName + "]"); } } } } if (!filtersFound) { - throw new QueryParsingException(parseContext.index(), "[or] filter requires 'filters' to be set on it'"); + throw new QueryParsingException(parseContext, "[or] filter requires 'filters' to be set on it'"); } if (filters.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java index e6bc4e3437f..c6bf3fe0a95 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java @@ -78,7 +78,7 @@ public class PrefixFilterParser implements FilterParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix filter"); + throw new QueryParsingException(parseContext, "No value specified for prefix filter"); } Filter filter = null; diff --git a/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java index 0cecb0aa651..dc59007c461 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java @@ -53,7 +53,7 @@ public class PrefixQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[prefix] query malformed, no field"); + throw new QueryParsingException(parseContext, "[prefix] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -80,7 +80,7 @@ public class PrefixQueryParser implements QueryParser { queryName = parser.text(); } } else { - throw new QueryParsingException(parseContext.index(), "[prefix] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[prefix] query does not support [" + currentFieldName + "]"); } } parser.nextToken(); @@ -90,7 +90,7 @@ public class PrefixQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix query"); + throw new QueryParsingException(parseContext, "No value specified for prefix query"); } MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null); diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 2f43985444c..39c0543759b 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -292,23 +292,23 @@ public class QueryParseContext { if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(index, "[_na] query malformed, must start with start_object"); + throw new QueryParsingException(this, "[_na] query malformed, must start with start_object"); } } token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(index, "[_na] query malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] query malformed, no field after start_object"); } String queryName = parser.currentName(); // move to the next START_OBJECT token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) { - throw new QueryParsingException(index, "[_na] query malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] query malformed, no field after start_object"); } QueryParser queryParser = indexQueryParser.queryParser(queryName); if (queryParser == null) { - throw new QueryParsingException(index, "No query registered for [" + queryName + "]"); + throw new QueryParsingException(this, "No query registered for [" + queryName + "]"); } Query result = queryParser.parse(this); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { @@ -335,7 +335,7 @@ public class QueryParseContext { if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(index, "[_na] filter malformed, must start with start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, must start with start_object"); } } token = parser.nextToken(); @@ -344,18 +344,18 @@ public class QueryParseContext { if (token == XContentParser.Token.END_OBJECT || token == XContentParser.Token.VALUE_NULL) { return null; } - throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, no field after start_object"); } String filterName = parser.currentName(); // move to the next START_OBJECT or START_ARRAY token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) { - throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, no field after start_object"); } FilterParser filterParser = indexQueryParser.filterParser(filterName); if (filterParser == null) { - throw new QueryParsingException(index, "No filter registered for [" + filterName + "]"); + throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } Filter result = executeFilterParser(filterParser); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { @@ -368,7 +368,7 @@ public class QueryParseContext { public Filter parseInnerFilter(String filterName) throws IOException, QueryParsingException { FilterParser filterParser = indexQueryParser.filterParser(filterName); if (filterParser == null) { - throw new QueryParsingException(index, "No filter registered for [" + filterName + "]"); + throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } return executeFilterParser(filterParser); } @@ -432,7 +432,8 @@ public class QueryParseContext { } else { Version indexCreatedVersion = indexQueryParser.getIndexCreatedVersion(); if (fieldMapping == null && indexCreatedVersion.onOrAfter(Version.V_1_4_0_Beta1)) { - throw new QueryParsingException(index, "Strict field resolution and no field mapping can be found for the field with name [" + name + "]"); + throw new QueryParsingException(this, "Strict field resolution and no field mapping can be found for the field with name [" + + name + "]"); } else { return fieldMapping; } diff --git a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java index 5bf1407a107..b9b0381e90e 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java @@ -19,21 +19,67 @@ package org.elasticsearch.index.query; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; + /** * */ public class QueryParsingException extends IndexException { - public QueryParsingException(Index index, String msg) { - super(index, msg); + static final int UNKNOWN_POSITION = -1; + private int lineNumber = UNKNOWN_POSITION; + private int columnNumber = UNKNOWN_POSITION; + + public QueryParsingException(QueryParseContext parseContext, String msg) { + this(parseContext, msg, null); } - public QueryParsingException(Index index, String msg, Throwable cause) { + public QueryParsingException(QueryParseContext parseContext, String msg, Throwable cause) { + super(parseContext.index(), msg, cause); + + XContentParser parser = parseContext.parser(); + if (parser != null) { + XContentLocation location = parser.getTokenLocation(); + if (location != null) { + lineNumber = location.lineNumber; + columnNumber = location.columnNumber; + } + } + } + + /** + * This constructor is provided for use in unit tests where a + * {@link QueryParseContext} may not be available + */ + QueryParsingException(Index index, int line, int col, String msg, Throwable cause) { super(index, msg, cause); + this.lineNumber = line; + this.columnNumber = col; + } + + /** + * Line number of the location of the error + * + * @return the line number or -1 if unknown + */ + public int getLineNumber() { + return lineNumber; + } + + /** + * Column number of the location of the error + * + * @return the column number or -1 if unknown + */ + public int getColumnNumber() { + return columnNumber; } @Override @@ -41,4 +87,13 @@ public class QueryParsingException extends IndexException { return RestStatus.BAD_REQUEST; } + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (lineNumber != UNKNOWN_POSITION) { + builder.field("line", lineNumber); + builder.field("col", columnNumber); + } + super.innerToXContent(builder, params); + } + } diff --git a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index d0b07941888..402080789f4 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -126,7 +126,8 @@ public class QueryStringQueryParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -140,18 +141,19 @@ public class QueryStringQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.AND); } else { - throw new QueryParsingException(parseContext.index(), "Query default operator [" + op + "] is not allowed"); + throw new QueryParsingException(parseContext, "Query default operator [" + op + "] is not allowed"); } } else if ("analyzer".equals(currentFieldName)) { NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[query_string] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[query_string] analyzer [" + parser.text() + "] not found"); } qpSettings.forcedAnalyzer(analyzer); } else if ("quote_analyzer".equals(currentFieldName) || "quoteAnalyzer".equals(currentFieldName)) { NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[query_string] quote_analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[query_string] quote_analyzer [" + parser.text() + + "] not found"); } qpSettings.forcedQuoteAnalyzer(analyzer); } else if ("allow_leading_wildcard".equals(currentFieldName) || "allowLeadingWildcard".equals(currentFieldName)) { @@ -199,17 +201,19 @@ public class QueryStringQueryParser implements QueryParser { try { qpSettings.timeZone(DateTimeZone.forID(parser.text())); } catch (IllegalArgumentException e) { - throw new QueryParsingException(parseContext.index(), "[query_string] time_zone [" + parser.text() + "] is unknown"); + throw new QueryParsingException(parseContext, + "[query_string] time_zone [" + parser.text() + "] is unknown"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + + "]"); } } } if (qpSettings.queryString() == null) { - throw new QueryParsingException(parseContext.index(), "query_string must be provided with a [query]"); + throw new QueryParsingException(parseContext, "query_string must be provided with a [query]"); } qpSettings.defaultAnalyzer(parseContext.mapperService().searchAnalyzer()); qpSettings.defaultQuoteAnalyzer(parseContext.mapperService().searchQuoteAnalyzer()); @@ -239,7 +243,7 @@ public class QueryStringQueryParser implements QueryParser { } return query; } catch (org.apache.lucene.queryparser.classic.ParseException e) { - throw new QueryParsingException(parseContext.index(), "Failed to parse query [" + qpSettings.queryString() + "]", e); + throw new QueryParsingException(parseContext, "Failed to parse query [" + qpSettings.queryString() + "]", e); } } } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java index 300ed66e6d8..8b5f557d0ba 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java @@ -105,7 +105,7 @@ public class RangeFilterParser implements FilterParser { } else if ("format".equals(currentFieldName)) { forcedDateParser = new DateMathParser(Joda.forPattern(parser.text()), DateFieldMapper.Defaults.TIME_UNIT); } else { - throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] filter does not support [" + currentFieldName + "]"); } } } @@ -119,13 +119,13 @@ public class RangeFilterParser implements FilterParser { } else if ("execution".equals(currentFieldName)) { execution = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] filter does not support [" + currentFieldName + "]"); } } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "[range] filter no field specified for range filter"); + throw new QueryParsingException(parseContext, "[range] filter no field specified for range filter"); } Filter filter = null; @@ -136,33 +136,39 @@ public class RangeFilterParser implements FilterParser { FieldMapper mapper = smartNameFieldMappers.mapper(); if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + + fieldName + "]"); } filter = ((DateFieldMapper) mapper).rangeFilter(from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } filter = mapper.rangeFilter(from, to, includeLower, includeUpper, parseContext); } } else if ("fielddata".equals(execution)) { FieldMapper mapper = smartNameFieldMappers.mapper(); if (!(mapper instanceof NumberFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "[range] filter field [" + fieldName + "] is not a numeric type"); + throw new QueryParsingException(parseContext, "[range] filter field [" + fieldName + "] is not a numeric type"); } if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + + fieldName + "]"); } filter = ((DateFieldMapper) mapper).rangeFilter(parseContext, from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext, from, to, includeLower, includeUpper, parseContext); } } else { - throw new QueryParsingException(parseContext.index(), "[range] filter doesn't support [" + execution + "] execution"); + throw new QueryParsingException(parseContext, "[range] filter doesn't support [" + execution + "] execution"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java index cfc600832ec..354da1df704 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java @@ -55,12 +55,12 @@ public class RangeQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, no field to indicate field name"); + throw new QueryParsingException(parseContext, "[range] query malformed, no field to indicate field name"); } String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, after field missing start object"); + throw new QueryParsingException(parseContext, "[range] query malformed, after field missing start object"); } Object from = null; @@ -106,7 +106,7 @@ public class RangeQueryParser implements QueryParser { } else if ("format".equals(currentFieldName)) { forcedDateParser = new DateMathParser(Joda.forPattern(parser.text()), DateFieldMapper.Defaults.TIME_UNIT); } else { - throw new QueryParsingException(parseContext.index(), "[range] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] query does not support [" + currentFieldName + "]"); } } } @@ -114,7 +114,7 @@ public class RangeQueryParser implements QueryParser { // move to the next end object, to close the field name token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, does not end with an object"); + throw new QueryParsingException(parseContext, "[range] query malformed, does not end with an object"); } Query query = null; @@ -124,12 +124,15 @@ public class RangeQueryParser implements QueryParser { FieldMapper mapper = smartNameFieldMappers.mapper(); if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + + "]"); } query = ((DateFieldMapper) mapper).rangeQuery(from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well? query = mapper.rangeQuery(from, to, includeLower, includeUpper, parseContext); diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java index 76db069af17..5f1d9174fc7 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java @@ -84,7 +84,7 @@ public class RegexpFilterParser implements FilterParser { } else if ("flags_value".equals(currentFieldName)) { flagsValue = parser.intValue(); } else { - throw new QueryParsingException(parseContext.index(), "[regexp] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[regexp] filter does not support [" + currentFieldName + "]"); } } } @@ -108,7 +108,7 @@ public class RegexpFilterParser implements FilterParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for regexp filter"); + throw new QueryParsingException(parseContext, "No value specified for regexp filter"); } Filter filter = null; diff --git a/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java index 41d53316a57..a1ec2996332 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java @@ -55,7 +55,7 @@ public class RegexpQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[regexp] query malformed, no field"); + throw new QueryParsingException(parseContext, "[regexp] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -92,7 +92,7 @@ public class RegexpQueryParser implements QueryParser { queryName = parser.text(); } } else { - throw new QueryParsingException(parseContext.index(), "[regexp] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[regexp] query does not support [" + currentFieldName + "]"); } } parser.nextToken(); @@ -102,7 +102,7 @@ public class RegexpQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for regexp query"); + throw new QueryParsingException(parseContext, "No value specified for regexp query"); } MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null); diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index a619b3d63ba..54dbe6cc1db 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -85,7 +85,7 @@ public class ScriptFilterParser implements FilterParser { if ("params".equals(currentFieldName)) { params = parser.map(); } else { - throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { @@ -95,7 +95,7 @@ public class ScriptFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else if (!scriptParameterParser.token(currentFieldName, token, parser)){ - throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } } } @@ -108,7 +108,7 @@ public class ScriptFilterParser implements FilterParser { scriptLang = scriptParameterParser.lang(); if (script == null) { - throw new QueryParsingException(parseContext.index(), "script must be provided with a [script] filter"); + throw new QueryParsingException(parseContext, "script must be provided with a [script] filter"); } if (params == null) { params = newHashMap(); diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java index 43e64ce0280..446dbc95b57 100644 --- a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java +++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java @@ -139,8 +139,9 @@ public class SimpleQueryStringParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), - "[" + NAME + "] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, + "[" + NAME + "] query does not support [" + currentFieldName + + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -148,7 +149,7 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[" + NAME + "] analyzer [" + parser.text() + "] not found"); } } else if ("field".equals(currentFieldName)) { field = parser.text(); @@ -159,8 +160,7 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { defaultOperator = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), - "[" + NAME + "] default operator [" + op + "] is not allowed"); + throw new QueryParsingException(parseContext, "[" + NAME + "] default operator [" + op + "] is not allowed"); } } else if ("flags".equals(currentFieldName)) { if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { @@ -188,14 +188,14 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("minimum_should_match".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); } else { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] unsupported field [" + parser.currentName() + "]"); + throw new QueryParsingException(parseContext, "[" + NAME + "] unsupported field [" + parser.currentName() + "]"); } } } // Query text is required if (queryBody == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query text missing"); + throw new QueryParsingException(parseContext, "[" + NAME + "] query text missing"); } // Support specifying only a field instead of a map diff --git a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java index ea8ff3d3923..5a302eb17d7 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java @@ -63,11 +63,11 @@ public class SpanFirstQueryParser implements QueryParser { if ("match".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanFirst [match] must be of type span query"); + throw new QueryParsingException(parseContext, "spanFirst [match] must be of type span query"); } match = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_first] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -77,15 +77,15 @@ public class SpanFirstQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_first] query does not support [" + currentFieldName + "]"); } } } if (match == null) { - throw new QueryParsingException(parseContext.index(), "spanFirst must have [match] span query clause"); + throw new QueryParsingException(parseContext, "spanFirst must have [match] span query clause"); } if (end == -1) { - throw new QueryParsingException(parseContext.index(), "spanFirst must have [end] set for it"); + throw new QueryParsingException(parseContext, "spanFirst must have [end] set for it"); } SpanFirstQuery query = new SpanFirstQuery(match, end); diff --git a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java index 7c9b2a67277..a44580a5176 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java @@ -51,17 +51,17 @@ public class SpanMultiTermQueryParser implements QueryParser { Token token = parser.nextToken(); if (!MATCH_NAME.equals(parser.currentName()) || token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); + throw new QueryParsingException(parseContext, "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); } token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); + throw new QueryParsingException(parseContext, "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); } Query subQuery = parseContext.parseInnerQuery(); if (!(subQuery instanceof MultiTermQuery)) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm [" + MATCH_NAME + "] must be of type multi term query"); + throw new QueryParsingException(parseContext, "spanMultiTerm [" + MATCH_NAME + "] must be of type multi term query"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java index 84283fce1a4..6ecf1b70bea 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java @@ -69,12 +69,12 @@ public class SpanNearQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNear [clauses] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNear [clauses] must be of type span query"); } clauses.add((SpanQuery) query); } } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("in_order".equals(currentFieldName) || "inOrder".equals(currentFieldName)) { @@ -88,17 +88,17 @@ public class SpanNearQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } if (clauses.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "span_near must include [clauses]"); + throw new QueryParsingException(parseContext, "span_near must include [clauses]"); } if (slop == null) { - throw new QueryParsingException(parseContext.index(), "span_near must include [slop]"); + throw new QueryParsingException(parseContext, "span_near must include [slop]"); } SpanNearQuery query = new SpanNearQuery(clauses.toArray(new SpanQuery[clauses.size()]), slop.intValue(), inOrder, collectPayloads); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java index afadf4c68ef..bcb62e7a224 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java @@ -68,17 +68,17 @@ public class SpanNotQueryParser implements QueryParser { if ("include".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNot [include] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNot [include] must be of type span query"); } include = (SpanQuery) query; } else if ("exclude".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNot [exclude] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNot [exclude] must be of type span query"); } exclude = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_not] query does not support [" + currentFieldName + "]"); } } else { if ("dist".equals(currentFieldName)) { @@ -92,18 +92,18 @@ public class SpanNotQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_not] query does not support [" + currentFieldName + "]"); } } } if (include == null) { - throw new QueryParsingException(parseContext.index(), "spanNot must have [include] span query clause"); + throw new QueryParsingException(parseContext, "spanNot must have [include] span query clause"); } if (exclude == null) { - throw new QueryParsingException(parseContext.index(), "spanNot must have [exclude] span query clause"); + throw new QueryParsingException(parseContext, "spanNot must have [exclude] span query clause"); } if (dist != null && (pre != null || post != null)) { - throw new QueryParsingException(parseContext.index(), "spanNot can either use [dist] or [pre] & [post] (or none)"); + throw new QueryParsingException(parseContext, "spanNot can either use [dist] or [pre] & [post] (or none)"); } // set appropriate defaults diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java index a9d12f6d941..db58d4cca82 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java @@ -66,12 +66,12 @@ public class SpanOrQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanOr [clauses] must be of type span query"); + throw new QueryParsingException(parseContext, "spanOr [clauses] must be of type span query"); } clauses.add((SpanQuery) query); } } else { - throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_or] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -79,12 +79,12 @@ public class SpanOrQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_or] query does not support [" + currentFieldName + "]"); } } } if (clauses.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "spanOr must include [clauses]"); + throw new QueryParsingException(parseContext, "spanOr must include [clauses]"); } SpanOrQuery query = new SpanOrQuery(clauses.toArray(new SpanQuery[clauses.size()])); diff --git a/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java index 0203bb26051..535b626306a 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java @@ -77,7 +77,7 @@ public class SpanTermQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_term] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_term] query does not support [" + currentFieldName + "]"); } } } @@ -89,7 +89,7 @@ public class SpanTermQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term query"); + throw new QueryParsingException(parseContext, "No value specified for term query"); } BytesRef valueBytes = null; diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java index f03a8a43cae..ca077b91ee3 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java @@ -81,7 +81,7 @@ public class TermFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[term] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[term] filter does not support [" + currentFieldName + "]"); } } } @@ -100,11 +100,11 @@ public class TermFilterParser implements FilterParser { } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "No field specified for term filter"); + throw new QueryParsingException(parseContext, "No field specified for term filter"); } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } Filter filter = null; diff --git a/src/main/java/org/elasticsearch/index/query/TermQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java index 2c016973b6e..3a913fc21ad 100644 --- a/src/main/java/org/elasticsearch/index/query/TermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java @@ -51,7 +51,7 @@ public class TermQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[term] query malformed, no field"); + throw new QueryParsingException(parseContext, "[term] query malformed, no field"); } String fieldName = parser.currentName(); @@ -74,7 +74,7 @@ public class TermQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[term] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[term] query does not support [" + currentFieldName + "]"); } } } @@ -86,7 +86,7 @@ public class TermQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term query"); + throw new QueryParsingException(parseContext, "No value specified for term query"); } Query query = null; diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java index 3c5ecd15106..46c52b80f64 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java @@ -90,14 +90,14 @@ public class TermsFilterParser implements FilterParser { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support multiple fields"); + throw new QueryParsingException(parseContext, "[terms] filter does not support multiple fields"); } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Object value = parser.objectBytes(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for terms filter"); + throw new QueryParsingException(parseContext, "No value specified for terms filter"); } terms.add(value); } @@ -118,18 +118,19 @@ public class TermsFilterParser implements FilterParser { } else if ("routing".equals(currentFieldName)) { lookupRouting = parser.textOrNull(); } else { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "] within lookup element"); + throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + + "] within lookup element"); } } } if (lookupType == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the type"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the type"); } if (lookupId == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the id"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the id"); } if (lookupPath == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the path"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the path"); } } else if (token.isValue()) { if (EXECUTION_KEY.equals(currentFieldName)) { @@ -141,13 +142,13 @@ public class TermsFilterParser implements FilterParser { } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + "]"); } } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "terms filter requires a field name, followed by array of terms"); + throw new QueryParsingException(parseContext, "terms filter requires a field name, followed by array of terms"); } FieldMapper fieldMapper = null; diff --git a/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java index 15c9f18388e..dcf078d19b1 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java @@ -75,13 +75,13 @@ public class TermsQueryParser implements QueryParser { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support multiple fields"); + throw new QueryParsingException(parseContext, "[terms] query does not support multiple fields"); } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Object value = parser.objectBytes(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for terms query"); + throw new QueryParsingException(parseContext, "No value specified for terms query"); } values.add(value); } @@ -97,15 +97,15 @@ public class TermsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] query does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] query does not support [" + currentFieldName + "]"); } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "No field specified for terms query"); + throw new QueryParsingException(parseContext, "No field specified for terms query"); } FieldMapper mapper = null; diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index a44239e863e..095a849b792 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -78,7 +78,7 @@ public class TopChildrenQueryParser implements QueryParser { iq = new XContentStructure.InnerQuery(parseContext, childType == null ? null : new String[] {childType}); queryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[top_children] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName)) { @@ -96,15 +96,15 @@ public class TopChildrenQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[top_children] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[top_children] requires 'query' field"); + throw new QueryParsingException(parseContext, "[top_children] requires 'query' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[top_children] requires 'type' field"); + throw new QueryParsingException(parseContext, "[top_children] requires 'type' field"); } Query innerQuery = iq.asQuery(childType); @@ -115,11 +115,11 @@ public class TopChildrenQueryParser implements QueryParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "No mapping for for type [" + childType + "]"); } ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "Type [" + childType + "] does not have parent mapping"); } String parentType = childDocMapper.parentFieldMapper().type(); diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java index e4ae0b957e0..a6248a4e228 100644 --- a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java @@ -50,15 +50,15 @@ public class TypeFilterParser implements FilterParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } String fieldName = parser.currentName(); if (!fieldName.equals("value")) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } token = parser.nextToken(); if (token != XContentParser.Token.VALUE_STRING) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } BytesRef type = parser.utf8Bytes(); // move to the next token diff --git a/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java index 6a641e96219..a661c185878 100644 --- a/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java @@ -52,7 +52,7 @@ public class WildcardQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wildcard] query malformed, no field"); + throw new QueryParsingException(parseContext, "[wildcard] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -78,7 +78,7 @@ public class WildcardQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[wildcard] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[wildcard] query does not support [" + currentFieldName + "]"); } } } @@ -89,7 +89,7 @@ public class WildcardQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix query"); + throw new QueryParsingException(parseContext, "No value specified for prefix query"); } BytesRef valueBytes; diff --git a/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java index 2346d65943d..35ca8724453 100644 --- a/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java @@ -48,11 +48,11 @@ public class WrapperFilterParser implements FilterParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed"); + throw new QueryParsingException(parseContext, "[wrapper] filter malformed"); } String fieldName = parser.currentName(); if (!fieldName.equals("filter")) { - throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed"); + throw new QueryParsingException(parseContext, "[wrapper] filter malformed"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java index 3fc16d7af74..f7b98ad3dd5 100644 --- a/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java @@ -48,11 +48,11 @@ public class WrapperQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed"); + throw new QueryParsingException(parseContext, "[wrapper] query malformed"); } String fieldName = parser.currentName(); if (!fieldName.equals("query")) { - throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed"); + throw new QueryParsingException(parseContext, "[wrapper] query malformed"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index 6268f4a5c74..001bdf05854 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -154,7 +154,7 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { // the doc later MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Unknown field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Unknown field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.fieldMappers().mapper(); @@ -167,7 +167,7 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { } else if (mapper instanceof NumberFieldMapper) { return parseNumberVariable(fieldName, parser, parseContext, (NumberFieldMapper) mapper, mode); } else { - throw new QueryParsingException(parseContext.index(), "Field " + fieldName + " is of type " + mapper.fieldType() + throw new QueryParsingException(parseContext, "Field " + fieldName + " is of type " + mapper.fieldType() + ", but only numeric types are supported."); } } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java index 10d4c7f3d55..734ab2f7759 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java @@ -134,7 +134,7 @@ public class FunctionScoreQueryParser implements QueryParser { // we try to parse a score function. If there is no score // function for the current field name, // functionParserMapper.get() will throw an Exception. - scoreFunction = functionParserMapper.get(parseContext.index(), currentFieldName).parse(parseContext, parser); + scoreFunction = functionParserMapper.get(parseContext, currentFieldName).parse(parseContext, parser); } if (functionArrayFound) { String errorString = "Found \"functions\": [...] already, now encountering \"" + currentFieldName + "\"."; @@ -202,8 +202,8 @@ public class FunctionScoreQueryParser implements QueryParser { ScoreFunction scoreFunction = null; Float functionWeight = null; if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), NAME + ": malformed query, expected a " - + XContentParser.Token.START_OBJECT + " while parsing functions but got a " + token); + throw new QueryParsingException(parseContext, NAME + ": malformed query, expected a " + XContentParser.Token.START_OBJECT + + " while parsing functions but got a " + token); } else { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -217,7 +217,7 @@ public class FunctionScoreQueryParser implements QueryParser { // do not need to check null here, // functionParserMapper throws exception if parser // non-existent - ScoreFunctionParser functionParser = functionParserMapper.get(parseContext.index(), currentFieldName); + ScoreFunctionParser functionParser = functionParserMapper.get(parseContext, currentFieldName); scoreFunction = functionParser.parse(parseContext, parser); } } @@ -253,7 +253,7 @@ public class FunctionScoreQueryParser implements QueryParser { } else if ("first".equals(scoreMode)) { return FiltersFunctionScoreQuery.ScoreMode.First; } else { - throw new QueryParsingException(parseContext.index(), NAME + " illegal score_mode [" + scoreMode + "]"); + throw new QueryParsingException(parseContext, NAME + " illegal score_mode [" + scoreMode + "]"); } } @@ -261,7 +261,7 @@ public class FunctionScoreQueryParser implements QueryParser { String boostMode = parser.text(); CombineFunction cf = combineFunctionsMap.get(boostMode); if (cf == null) { - throw new QueryParsingException(parseContext.index(), NAME + " illegal boost_mode [" + boostMode + "]"); + throw new QueryParsingException(parseContext, NAME + " illegal boost_mode [" + boostMode + "]"); } return cf; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java index 4f7d9de390b..abe8b5c4e35 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java @@ -20,9 +20,10 @@ package org.elasticsearch.index.query.functionscore; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; import java.util.Set; @@ -42,10 +43,10 @@ public class ScoreFunctionParserMapper { this.functionParsers = builder.immutableMap(); } - public ScoreFunctionParser get(Index index, String parserName) { + public ScoreFunctionParser get(QueryParseContext parseContext, String parserName) { ScoreFunctionParser functionParser = get(parserName); if (functionParser == null) { - throw new QueryParsingException(index, "No function with the name [" + parserName + "] is registered."); + throw new QueryParsingException(parseContext, "No function with the name [" + parserName + "] is registered.", null); } return functionParser; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java index c5f454ef40a..90c4b953bed 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java @@ -72,15 +72,15 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { } else if ("missing".equals(currentFieldName)) { missing = parser.doubleValue(); } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } else if("factor".equals(currentFieldName) && (token == XContentParser.Token.START_ARRAY || token == XContentParser.Token.START_OBJECT)) { - throw new QueryParsingException(parseContext.index(), "[" + NAMES[0] + "] field 'factor' does not support lists or objects"); + throw new QueryParsingException(parseContext, "[" + NAMES[0] + "] field 'factor' does not support lists or objects"); } } if (field == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAMES[0] + "] required field 'field' missing"); + throw new QueryParsingException(parseContext, "[" + NAMES[0] + "] required field 'field' missing"); } SearchContext searchContext = SearchContext.current(); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java index 8bdc3074f3f..e4b26822d66 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query.functionscore.random; import com.google.common.primitives.Longs; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; @@ -66,15 +67,17 @@ public class RandomScoreFunctionParser implements ScoreFunctionParser { } else if (parser.numberType() == XContentParser.NumberType.LONG) { seed = Longs.hashCode(parser.longValue()); } else { - throw new QueryParsingException(parseContext.index(), "random_score seed must be an int, long or string, not '" + token.toString() + "'"); + throw new QueryParsingException(parseContext, "random_score seed must be an int, long or string, not '" + + token.toString() + "'"); } } else if (token == XContentParser.Token.VALUE_STRING) { seed = parser.text().hashCode(); } else { - throw new QueryParsingException(parseContext.index(), "random_score seed must be an int/long or string, not '" + token.toString() + "'"); + throw new QueryParsingException(parseContext, "random_score seed must be an int/long or string, not '" + + token.toString() + "'"); } } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java index aaa9bec3fac..b01eaee3615 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; -import org.elasticsearch.script.*; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; @@ -67,11 +69,11 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { if ("params".equals(currentFieldName)) { vars = parser.map(); } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if (!scriptParameterParser.token(currentFieldName, token, parser)) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } } @@ -82,7 +84,7 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { scriptType = scriptValue.scriptType(); } if (script == null) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " requires 'script' field"); + throw new QueryParsingException(parseContext, NAMES[0] + " requires 'script' field"); } SearchScript searchScript; @@ -90,7 +92,7 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { searchScript = parseContext.scriptService().search(parseContext.lookup(), new Script(scriptParameterParser.lang(), script, scriptType, vars), ScriptContext.Standard.SEARCH); return new ScriptScoreFunction(script, vars, searchScript); } catch (Exception e) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e); + throw new QueryParsingException(parseContext, NAMES[0] + " the script could not be loaded", e); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java index 149b47eadf9..ae839c41d1c 100644 --- a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java +++ b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java @@ -72,7 +72,7 @@ public class InnerHitsQueryParserHelper { } } } catch (Exception e) { - throw new QueryParsingException(parserContext.index(), "Failed to parse [_inner_hits]", e); + throw new QueryParsingException(parserContext, "Failed to parse [_inner_hits]", e); } return new Tuple<>(innerHitName, subSearchContext); } diff --git a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 17eb059e1d0..c96fdb7e103 100644 --- a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -106,10 +106,10 @@ public class NestedInnerQueryParseSupport { return innerQuery; } else { if (path == null) { - throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field"); + throw new QueryParsingException(parseContext, "[nested] requires 'path' field"); } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } XContentParser old = parseContext.parser(); @@ -135,10 +135,10 @@ public class NestedInnerQueryParseSupport { return innerFilter; } else { if (path == null) { - throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field"); + throw new QueryParsingException(parseContext, "[nested] requires 'path' field"); } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } setPathLevel(); @@ -160,15 +160,15 @@ public class NestedInnerQueryParseSupport { this.path = path; MapperService.SmartNameObjectMapper smart = parseContext.smartObjectMapper(path); if (smart == null) { - throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]"); + throw new QueryParsingException(parseContext, "[nested] failed to find nested object under path [" + path + "]"); } childDocumentMapper = smart.docMapper(); nestedObjectMapper = smart.mapper(); if (nestedObjectMapper == null) { - throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]"); + throw new QueryParsingException(parseContext, "[nested] failed to find nested object under path [" + path + "]"); } if (!nestedObjectMapper.nested().isNested()) { - throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type"); + throw new QueryParsingException(parseContext, "[nested] nested object under path [" + path + "] is not of nested type"); } } diff --git a/src/main/java/org/elasticsearch/search/SearchParseException.java b/src/main/java/org/elasticsearch/search/SearchParseException.java index 923532373a5..15c6bfd6f90 100644 --- a/src/main/java/org/elasticsearch/search/SearchParseException.java +++ b/src/main/java/org/elasticsearch/search/SearchParseException.java @@ -19,24 +19,64 @@ package org.elasticsearch.search; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; + /** * */ public class SearchParseException extends SearchContextException { - public SearchParseException(SearchContext context, String msg) { - super(context, msg); + public static final int UNKNOWN_POSITION = -1; + private int lineNumber = UNKNOWN_POSITION; + private int columnNumber = UNKNOWN_POSITION; + + public SearchParseException(SearchContext context, String msg, @Nullable XContentLocation location) { + this(context, msg, location, null); } - public SearchParseException(SearchContext context, String msg, Throwable cause) { + public SearchParseException(SearchContext context, String msg, @Nullable XContentLocation location, Throwable cause) { super(context, msg, cause); + if (location != null) { + lineNumber = location.lineNumber; + columnNumber = location.columnNumber; + } } @Override public RestStatus status() { return RestStatus.BAD_REQUEST; } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (lineNumber != UNKNOWN_POSITION) { + builder.field("line", lineNumber); + builder.field("col", columnNumber); + } + super.innerToXContent(builder, params); + } + + /** + * Line number of the location of the error + * + * @return the line number or -1 if unknown + */ + public int getLineNumber() { + return lineNumber; + } + + /** + * Column number of the location of the error + * + * @return the column number or -1 if unknown + */ + public int getColumnNumber() { + return columnNumber; + } } diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 369c2cb499b..38f4e03a0f1 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -77,10 +78,23 @@ import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.*; -import org.elasticsearch.search.internal.*; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.DefaultSearchContext; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.query.*; +import org.elasticsearch.search.internal.ShardSearchLocalRequest; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QueryPhaseExecutionException; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; @@ -718,7 +732,7 @@ public class SearchService extends AbstractLifecycleComponent { parser.nextToken(); SearchParseElement element = elementParsers.get(fieldName); if (element == null) { - throw new SearchParseException(context, "No parser for element [" + fieldName + "]"); + throw new SearchParseException(context, "No parser for element [" + fieldName + "]", parser.getTokenLocation()); } element.parse(parser, context); } else { @@ -736,7 +750,7 @@ public class SearchService extends AbstractLifecycleComponent { } catch (Throwable e1) { // ignore } - throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", e); + throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", parser.getTokenLocation(), e); } finally { if (parser != null) { parser.close(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index b55f6a4f022..dbf2d948a36 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; @@ -86,16 +87,19 @@ public class AggregatorParsers { XContentParser.Token token = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new SearchParseException(context, "Unexpected token " + token + " in [aggs]: aggregations definitions must start with the name of the aggregation."); + throw new SearchParseException(context, "Unexpected token " + token + + " in [aggs]: aggregations definitions must start with the name of the aggregation.", parser.getTokenLocation()); } final String aggregationName = parser.currentName(); if (!validAggMatcher.reset(aggregationName).matches()) { - throw new SearchParseException(context, "Invalid aggregation name [" + aggregationName + "]. Aggregation names must be alpha-numeric and can only contain '_' and '-'"); + throw new SearchParseException(context, "Invalid aggregation name [" + aggregationName + + "]. Aggregation names must be alpha-numeric and can only contain '_' and '-'", parser.getTokenLocation()); } token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new SearchParseException(context, "Aggregation definition for [" + aggregationName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new SearchParseException(context, "Aggregation definition for [" + aggregationName + " starts with a [" + token + + "], expected a [" + XContentParser.Token.START_OBJECT + "].", parser.getTokenLocation()); } AggregatorFactory factory = null; @@ -105,13 +109,16 @@ public class AggregatorParsers { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new SearchParseException(context, "Expected [" + XContentParser.Token.FIELD_NAME + "] under a [" + XContentParser.Token.START_OBJECT + "], but got a [" + token + "] in [" + aggregationName + "]"); + throw new SearchParseException(context, "Expected [" + XContentParser.Token.FIELD_NAME + "] under a [" + + XContentParser.Token.START_OBJECT + "], but got a [" + token + "] in [" + aggregationName + "]", + parser.getTokenLocation()); } final String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new SearchParseException(context, "Expected [" + XContentParser.Token.START_OBJECT + "] under [" + fieldName + "], but got a [" + token + "] in [" + aggregationName + "]"); + throw new SearchParseException(context, "Expected [" + XContentParser.Token.START_OBJECT + "] under [" + fieldName + + "], but got a [" + token + "] in [" + aggregationName + "]", parser.getTokenLocation()); } switch (fieldName) { @@ -121,24 +128,28 @@ public class AggregatorParsers { case "aggregations": case "aggs": if (subFactories != null) { - throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]"); + throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]", + parser.getTokenLocation()); } subFactories = parseAggregators(parser, context, level+1); break; default: if (factory != null) { - throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + factory.type + "] and [" + fieldName + "]"); + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + + factory.type + "] and [" + fieldName + "]", parser.getTokenLocation()); } Aggregator.Parser aggregatorParser = parser(fieldName); if (aggregatorParser == null) { - throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + "]"); + throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + + "]", parser.getTokenLocation()); } factory = aggregatorParser.parse(aggregationName, parser, context); } } if (factory == null) { - throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]", + parser.getTokenLocation()); } if (metaData != null) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java index 4834774053b..aacd76b0b5b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java @@ -56,15 +56,18 @@ public class ChildrenParser implements Aggregator.Parser { if ("type".equals(currentFieldName)) { childType = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (childType == null) { - throw new SearchParseException(context, "Missing [child_type] field for children aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [child_type] field for children aggregation [" + aggregationName + "]", + parser.getTokenLocation()); } ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSource.Bytes.WithOrdinals.ParentChild.class); @@ -76,7 +79,7 @@ public class ChildrenParser implements Aggregator.Parser { if (childDocMapper != null) { ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new SearchParseException(context, "[children] _parent field not configured"); + throw new SearchParseException(context, "[children] _parent field not configured", parser.getTokenLocation()); } parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java index 49f43eafc36..e30fcc8a3a4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java @@ -65,7 +65,8 @@ public class FiltersParser implements Aggregator.Parser { } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("filters".equals(currentFieldName)) { @@ -78,10 +79,12 @@ public class FiltersParser implements Aggregator.Parser { idx++; } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index 9d08d2ce81a..6f316d901db 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -108,13 +108,15 @@ public class DateHistogramParser implements Aggregator.Parser { } else if (INTERVAL.match(currentFieldName)) { interval = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) { @@ -122,7 +124,8 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) { timeZone = DateTimeZone.forOffsetHours(parser.intValue()); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if ("order".equals(currentFieldName)) { @@ -147,7 +150,8 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.maxAsStr = parser.text(); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("min".equals(currentFieldName)) { @@ -155,23 +159,28 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.max = parser.longValue(); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (interval == null) { - throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, + "Missing required field [interval] for histogram aggregation [" + aggregationName + "]", parser.getTokenLocation()); } TimeZoneRounding.Builder tzRoundingBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index b041ef34fdb..c703058b699 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -56,7 +56,7 @@ public class ExtendedBounds { } if (min != null && max != null && min.compareTo(max) > 0) { throw new SearchParseException(context, "[extended_bounds.min][" + min + "] cannot be greater than " + - "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]"); + "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]", null); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java index f316237d734..c9c885be3f5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java @@ -75,7 +75,8 @@ public class HistogramParser implements Aggregator.Parser { } else if ("offset".equals(currentFieldName)) { offset = parser.longValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if ("order".equals(currentFieldName)) { @@ -86,7 +87,8 @@ public class HistogramParser implements Aggregator.Parser { String dir = parser.text(); boolean asc = "asc".equals(dir); if (!asc && !"desc".equals(dir)) { - throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + aggregationName + "]. Should be either [asc] or [desc]"); + throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + + aggregationName + "]. Should be either [asc] or [desc]", parser.getTokenLocation()); } order = resolveOrder(currentFieldName, asc); } @@ -102,21 +104,25 @@ public class HistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.max = parser.longValue(true); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "].", + parser.getTokenLocation()); } } if (interval < 1) { - throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, + "Missing required field [interval] for histogram aggregation [" + aggregationName + "]", parser.getTokenLocation()); } Rounding rounding = new Rounding.Interval(interval); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java index b37de4c743c..6ecdc129dd0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java @@ -52,7 +52,8 @@ public class MissingParser implements Aggregator.Parser { } else if (vsParser.token(currentFieldName, token, parser)) { continue; } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java index 61044fb4a28..56da7f51b17 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java @@ -49,16 +49,19 @@ public class NestedParser implements Aggregator.Parser { if ("path".equals(currentFieldName)) { path = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (path == null) { // "field" doesn't exist, so we fall back to the context of the ancestors - throw new SearchParseException(context, "Missing [path] field for nested aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [path] field for nested aggregation [" + aggregationName + "]", + parser.getTokenLocation()); } return new NestedAggregator.Factory(aggregationName, path, context.queryParserService().autoFilterCachePolicy()); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 7466bec3b5b..78f997a752b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -131,7 +131,8 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { // Early validation NestedAggregator closestNestedAggregator = findClosestNestedAggregator(parent); if (closestNestedAggregator == null) { - throw new SearchParseException(context.searchContext(), "Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation"); + throw new SearchParseException(context.searchContext(), "Reverse nested aggregation [" + name + + "] can only be used inside a [nested] aggregation", null); } final ObjectMapper objectMapper; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java index 0ab7cefc9e3..80ab9f5eebd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java @@ -49,10 +49,12 @@ public class ReverseNestedParser implements Aggregator.Parser { if ("path".equals(currentFieldName)) { path = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java index dbe05df0998..e30b84bf1de 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java @@ -89,21 +89,25 @@ public class RangeParser implements Aggregator.Parser { ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr)); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalRange.FACTORY, ranges, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java index 06dcba53b95..940e20a79a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java @@ -79,7 +79,8 @@ public class DateRangeParser implements Aggregator.Parser { } else if ("to".equals(toOrFromOrKey)) { to = parser.doubleValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { if ("from".equals(toOrFromOrKey)) { @@ -89,7 +90,7 @@ public class DateRangeParser implements Aggregator.Parser { } else if ("key".equals(toOrFromOrKey)) { key = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } } @@ -100,15 +101,18 @@ public class DateRangeParser implements Aggregator.Parser { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalDateRange.FACTORY, ranges, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index 713b94595f5..77d19e3f5ac 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -98,13 +98,15 @@ public class GeoDistanceParser implements Aggregator.Parser { } else if ("distance_type".equals(currentFieldName) || "distanceType".equals(currentFieldName)) { distanceType = GeoDistance.fromString(parser.text()); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("ranges".equals(currentFieldName)) { @@ -138,20 +140,24 @@ public class GeoDistanceParser implements Aggregator.Parser { ranges.add(new RangeAggregator.Range(key(key, from, to), from, fromAsStr, to, toAsStr)); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in geo_distance aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in geo_distance aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } GeoPoint origin = geoPointParser.geoPoint(); if (origin == null) { - throw new SearchParseException(context, "Missing [origin] in geo_distance aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [origin] in geo_distance aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new GeoDistanceFactory(aggregationName, vsParser.config(), InternalGeoDistance.FACTORY, origin, unit, distanceType, ranges, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java index 49c9c90b16e..37891f6f239 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java @@ -99,21 +99,25 @@ public class IpRangeParser implements Aggregator.Parser { ranges.add(range); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalIPv4Range.FACTORY, ranges, keyed); @@ -122,7 +126,8 @@ public class IpRangeParser implements Aggregator.Parser { private static void parseMaskRange(String cidr, RangeAggregator.Range range, String aggregationName, SearchContext ctx) { long[] fromTo = IPv4RangeBuilder.cidrMaskToMinMax(cidr); if (fromTo == null) { - throw new SearchParseException(ctx, "invalid CIDR mask [" + cidr + "] in aggregation [" + aggregationName + "]"); + throw new SearchParseException(ctx, "invalid CIDR mask [" + cidr + "] in aggregation [" + aggregationName + "]", + null); } range.from = fromTo[0] < 0 ? Double.NEGATIVE_INFINITY : fromTo[0]; range.to = fromTo[1] < 0 ? Double.POSITIVE_INFINITY : fromTo[1]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java index 35a2963187e..d82dd2c6721 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java @@ -73,17 +73,18 @@ public class SamplerParser implements Aggregator.Parser { maxDocsPerValue = parser.intValue(); } else { throw new SearchParseException(context, "Unsupported property \"" + currentFieldName + "\" for aggregation \"" - + aggregationName); + + aggregationName, parser.getTokenLocation()); } } else if (!vsParser.token(currentFieldName, token, parser)) { if (EXECUTION_HINT_FIELD.match(currentFieldName)) { executionHint = parser.text(); } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unsupported property \"" + currentFieldName + "\" for aggregation \"" - + aggregationName); + + aggregationName, parser.getTokenLocation()); } } @@ -93,7 +94,8 @@ public class SamplerParser implements Aggregator.Parser { } else { if (diversityChoiceMade) { throw new SearchParseException(context, "Sampler aggregation has " + MAX_DOCS_PER_VALUE_FIELD.getPreferredName() - + " setting but no \"field\" or \"script\" setting to provide values for aggregation \"" + aggregationName + "\""); + + " setting but no \"field\" or \"script\" setting to provide values for aggregation \"" + aggregationName + "\"", + parser.getTokenLocation()); } return new SamplerAggregator.Factory(aggregationName, shardSize); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java index e9288528363..87a60d43967 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java @@ -68,10 +68,12 @@ public class SignificantTermsParametersParser extends AbstractTermsParametersPar } else if (BACKGROUND_FILTER.match(currentFieldName)) { filter = context.queryParserService().parseInnerFilter(parser).filter(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + + "].", parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java index 6ae88f63c57..63166bca78c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java @@ -56,7 +56,8 @@ public class TermsParametersParser extends AbstractTermsParametersParser { if ("order".equals(currentFieldName)) { this.orderElements = Collections.singletonList(parseOrderParam(aggregationName, parser, context)); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("order".equals(currentFieldName)) { @@ -66,18 +67,21 @@ public class TermsParametersParser extends AbstractTermsParametersParser { OrderElement orderParam = parseOrderParam(aggregationName, parser, context); orderElements.add(orderParam); } else { - throw new SearchParseException(context, "Order elements must be of type object in [" + aggregationName + "]."); + throw new SearchParseException(context, "Order elements must be of type object in [" + aggregationName + "].", + parser.getTokenLocation()); } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if (SHOW_TERM_DOC_COUNT_ERROR.match(currentFieldName)) { showTermDocCountError = parser.booleanValue(); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + + "].", parser.getTokenLocation()); } } @@ -96,14 +100,17 @@ public class TermsParametersParser extends AbstractTermsParametersParser { } else if ("desc".equalsIgnoreCase(dir)) { orderAsc = false; } else { - throw new SearchParseException(context, "Unknown terms order direction [" + dir + "] in terms aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Unknown terms order direction [" + dir + "] in terms aggregation [" + + aggregationName + "]", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " for [order] in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " for [order] in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (orderKey == null) { - throw new SearchParseException(context, "Must specify at least one field for [order] in [" + aggregationName + "]."); + throw new SearchParseException(context, "Must specify at least one field for [order] in [" + aggregationName + "].", + parser.getTokenLocation()); } else { orderParam = new OrderElement(orderKey, orderAsc); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java index ae9e6844e2f..6847a9a5b3d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java @@ -58,7 +58,8 @@ public abstract class NumericValuesSourceMetricsAggregatorParser valuesSourceConfig, double[] keys, double compression, boolean keyed) { if (keys == null) { - throw new SearchParseException(context, "Missing token values in [" + aggregationName + "]."); + throw new SearchParseException(context, "Missing token values in [" + aggregationName + "].", null); } return new PercentileRanksAggregator.Factory(aggregationName, valuesSourceConfig, keys, compression, keyed); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 9d52242b7bf..83787a737cf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -22,15 +22,17 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.*; import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; @@ -190,7 +192,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { clone = original; } else { throw new SearchParseException(context, "Can only clone primitives, String, ArrayList, and HashMap. Found: " - + original.getClass().getCanonicalName()); + + original.getClass().getCanonicalName(), null); } return clone; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java index 1b0b5aa3290..c37d0aaccf8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java @@ -72,14 +72,17 @@ public class ScriptedMetricParser implements Aggregator.Parser { } else if (REDUCE_PARAMS_FIELD.match(currentFieldName)) { reduceParams = parser.map(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token.isValue()) { if (!scriptParameterParser.token(currentFieldName, token, parser)) { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } @@ -114,7 +117,7 @@ public class ScriptedMetricParser implements Aggregator.Parser { scriptLang = scriptParameterParser.lang(); if (mapScript == null) { - throw new SearchParseException(context, "map_script field is required in [" + aggregationName + "]."); + throw new SearchParseException(context, "map_script field is required in [" + aggregationName + "].", parser.getTokenLocation()); } return new ScriptedMetricAggregator.Factory(aggregationName, scriptLang, initScriptType, initScript, mapScriptType, mapScript, combineScriptType, combineScript, reduceScriptType, reduceScript, params, reduceParams); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java index 18ca93495c3..ea48e4b11f8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java @@ -65,15 +65,17 @@ public class ExtendedStatsParser implements Aggregator.Parser { if (SIGMA.match(currentFieldName)) { sigma = parser.doubleValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (sigma < 0) { - throw new SearchParseException(context, "[sigma] must not be negative. Value provided was" + sigma ); + throw new SearchParseException(context, "[sigma] must not be negative. Value provided was" + sigma, parser.getTokenLocation()); } return createFactory(aggregationName, vsParser.config(), sigma); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java index 6300374663b..206587ac6a4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java @@ -94,7 +94,8 @@ public class TopHitsParser implements Aggregator.Parser { subSearchContext.explain(parser.booleanValue()); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { switch (currentFieldName) { @@ -106,7 +107,8 @@ public class TopHitsParser implements Aggregator.Parser { scriptFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { switch (currentFieldName) { @@ -115,10 +117,12 @@ public class TopHitsParser implements Aggregator.Parser { fieldDataFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } } catch (Exception e) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java index fb1d31f41f0..764f6ce9384 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java @@ -49,7 +49,8 @@ public class ValueCountParser implements Aggregator.Parser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (!vsParser.token(currentFieldName, token, parser)) { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java index 35c381ec2a1..b423dd2f755 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java @@ -66,7 +66,8 @@ public class GeoPointParser { lat = parser.doubleValue(); } else { throw new SearchParseException(context, "malformed [" + currentFieldName + "] geo point array in [" + - aggName + "] " + aggType + " aggregation. a geo point array must be of the form [lon, lat]"); + aggName + "] " + aggType + " aggregation. a geo point array must be of the form [lon, lat]", + parser.getTokenLocation()); } } point = new GeoPoint(lat, lon); @@ -88,7 +89,7 @@ public class GeoPointParser { } if (Double.isNaN(lat) || Double.isNaN(lon)) { throw new SearchParseException(context, "malformed [" + currentFieldName + "] geo point object. either [lat] or [lon] (or both) are " + - "missing in [" + aggName + "] " + aggType + " aggregation"); + "missing in [" + aggName + "] " + aggType + " aggregation", parser.getTokenLocation()); } point = new GeoPoint(lat, lon); return true; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index 37182685761..88c3f64b089 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -101,7 +101,8 @@ public class ValuesSourceParser { if (targetValueType != null && input.valueType.isNotA(targetValueType)) { throw new SearchParseException(context, aggType.name() + " aggregation [" + aggName + "] was configured with an incompatible value type [" + input.valueType + "]. [" + aggType + - "] aggregation can only work on value of type [" + targetValueType + "]"); + "] aggregation can only work on value of type [" + targetValueType + "]", + parser.getTokenLocation()); } } else if (!scriptParameterParser.token(currentFieldName, token, parser)) { return false; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java index c4d8aa80ef5..3613327c679 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.highlight; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; @@ -70,7 +71,7 @@ public class HighlighterParseElement implements SearchParseElement { try { context.highlight(parse(parser, context.queryParserService())); } catch (IllegalArgumentException ex) { - throw new SearchParseException(context, "Error while trying to parse Highlighter element in request"); + throw new SearchParseException(context, "Error while trying to parse Highlighter element in request", parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/query/FromParseElement.java b/src/main/java/org/elasticsearch/search/query/FromParseElement.java index 13e58caa471..21063a93d35 100644 --- a/src/main/java/org/elasticsearch/search/query/FromParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/FromParseElement.java @@ -35,7 +35,8 @@ public class FromParseElement implements SearchParseElement { if (token.isValue()) { int from = parser.intValue(); if (from < 0) { - throw new SearchParseException(context, "from is set to [" + from + "] and is expected to be higher or equal to 0"); + throw new SearchParseException(context, "from is set to [" + from + "] and is expected to be higher or equal to 0", + parser.getTokenLocation()); } context.from(from); } diff --git a/src/main/java/org/elasticsearch/search/query/SizeParseElement.java b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java index b729ea4cdb2..5560ec939c4 100644 --- a/src/main/java/org/elasticsearch/search/query/SizeParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java @@ -35,7 +35,8 @@ public class SizeParseElement implements SearchParseElement { if (token.isValue()) { int size = parser.intValue(); if (size < 0) { - throw new SearchParseException(context, "size is set to [" + size + "] and is expected to be higher or equal to 0"); + throw new SearchParseException(context, "size is set to [" + size + "] and is expected to be higher or equal to 0", + parser.getTokenLocation()); } context.size(size); } diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index d0bfebe81f4..7caf89e9c08 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -118,15 +118,15 @@ public class ScriptSortParser implements SortParser { } if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by"); + throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); } if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script"); + throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); } final SearchScript searchScript = context.scriptService().search(context.lookup(), new Script(scriptLang, script, scriptType, params), ScriptContext.Standard.SEARCH); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]"); + throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); } if (sortMode == null) { @@ -196,7 +196,7 @@ public class ScriptSortParser implements SortParser { }; break; default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported"); + throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); } return new SortField("_script", fieldComparatorSource, reverse); diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index 4723f427dbb..aa2f1315960 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -212,12 +212,12 @@ public class SortParseElement implements SearchParseElement { if (unmappedType != null) { fieldMapper = context.mapperService().unmappedFieldMapper(unmappedType); } else { - throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on"); + throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on", null); } } if (!fieldMapper.isSortable()) { - throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]"); + throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]", null); } // Enable when we also know how to detect fields that do tokenize, but only emit one token diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index e88489c7e98..16914ab6eef 100644 --- a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexException; import org.elasticsearch.index.query.QueryParsingException; -import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.index.query.TestQueryParsingException; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchShardTarget; @@ -73,15 +73,17 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { assertEquals(rootCauses.length, 1); assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "index_exception"); assertEquals(rootCauses[0].getMessage(), "index is closed"); - ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); - ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); if (randomBoolean()) { rootCauses = (randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex).guessRootCauses(); } else { rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex); } - assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "query_parsing_exception"); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception"); assertEquals(rootCauses[0].getMessage(), "foobar"); ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar")); @@ -90,18 +92,23 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { assertEquals(rootCauses[0].getMessage(), "foo"); } { - ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); - ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 1)); - ShardSearchFailure failure2 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 2)); + ShardSearchFailure failure = new ShardSearchFailure( + new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo1", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); final ElasticsearchException[] rootCauses = ex.guessRootCauses(); assertEquals(rootCauses.length, 2); - assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "query_parsing_exception"); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception"); assertEquals(rootCauses[0].getMessage(), "foobar"); assertEquals(((QueryParsingException)rootCauses[0]).index().name(), "foo"); - assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "query_parsing_exception"); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "test_query_parsing_exception"); assertEquals(rootCauses[1].getMessage(), "foobar"); - assertEquals(((QueryParsingException)rootCauses[1]).index().name(), "foo1"); + assertEquals(((QueryParsingException) rootCauses[1]).getLineNumber(), 1); + assertEquals(((QueryParsingException) rootCauses[1]).getColumnNumber(), 2); } @@ -118,26 +125,31 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { public void testDeduplicate() throws IOException { { - ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); - ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ex.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]}"; + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]}"; assertEquals(expected, builder.string()); } { - ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); - ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 1)); - ShardSearchFailure failure2 = new ShardSearchFailure(new QueryParsingException(new Index("foo1"), "foobar"), new SearchShardTarget("node_1", "foo1", 2)); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null), + new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null), + new SearchShardTarget("node_1", "foo1", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); ex.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}"; + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}"; assertEquals(expected, builder.string()); } } @@ -182,6 +194,16 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { assertEquals(expected, builder.string()); } + { + QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + String expected = "{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2,\"index\":\"foo\"}"; + assertEquals(expected, builder.string()); + } + { // test equivalence ElasticsearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -200,13 +222,15 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { public void testSerializeElasticsearchException() throws IOException { BytesStreamOutput out = new BytesStreamOutput(); - QueryParsingException ex = new QueryParsingException(new Index("foo"), "foobar"); + QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null); out.writeThrowable(ex); BytesStreamInput in = new BytesStreamInput(out.bytes()); QueryParsingException e = in.readThrowable(); assertEquals(ex.index(), e.index()); assertEquals(ex.getMessage(), e.getMessage()); + assertEquals(ex.getLineNumber(), e.getLineNumber()); + assertEquals(ex.getColumnNumber(), e.getColumnNumber()); } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java b/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java new file mode 100644 index 00000000000..951b31e59a6 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.index.Index; + +/** + * Class used to avoid dragging QueryContext into unit testing framework for + * basic exception handling + */ +public class TestQueryParsingException extends QueryParsingException { + + public TestQueryParsingException(Index index, int line, int col, String msg, Throwable cause) { + super(index, line, col, msg, cause); + } + + public TestQueryParsingException(Index index, String msg, Throwable cause) { + super(index, UNKNOWN_POSITION, UNKNOWN_POSITION, msg, cause); + } +} diff --git a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index e110e6ca70d..579408366e9 100644 --- a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.Index; -import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.query.TestQueryParsingException; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.rest.FakeRestRequest; @@ -141,12 +141,14 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { public void testConvert() throws IOException { RestRequest request = new FakeRestRequest(); RestChannel channel = new DetailedExceptionRestChannel(request); - ShardSearchFailure failure = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 1)); - ShardSearchFailure failure1 = new ShardSearchFailure(new QueryParsingException(new Index("foo"), "foobar"), new SearchShardTarget("node_1", "foo", 2)); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1}); BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex)); String text = response.content().toUtf8(); - String expected = "{\"error\":{\"root_cause\":[{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]},\"status\":400}"; + String expected = "{\"error\":{\"root_cause\":[{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]},\"status\":400}"; assertEquals(expected.trim(), text.trim()); } From 9b76be92b3f1f9ea735ee9009661379a24065294 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 29 Apr 2015 10:53:16 -0400 Subject: [PATCH 202/236] Docs: add notes about using close and awaitClose with bulk processor Closes #10839 --- docs/java-api/bulk.asciidoc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/java-api/bulk.asciidoc b/docs/java-api/bulk.asciidoc index 9ac61f47f30..96b0b2eb6dc 100644 --- a/docs/java-api/bulk.asciidoc +++ b/docs/java-api/bulk.asciidoc @@ -99,3 +99,22 @@ By default, `BulkProcessor`: * does not set flushInterval * sets concurrentRequests to 1 +When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: + +[source,java] +-------------------------------------------------- +bulkProcessor.awaitClose(10, TimeUnit.MINUTES); +-------------------------------------------------- + +or + +[source,java] +-------------------------------------------------- +bulkProcessor.close(); +-------------------------------------------------- + +Both methods flush any remaining documents and disable all other scheduled flushes if they were scheduled by setting +`flushInterval`. If concurrent requests were enabled the `awaitClose` method waits for up to the specified timeout for +all bulk requests to complete then returns `true`, if the specified waiting time elapses before all bulk requests complete, +`false` is returned. The `close` method doesn't wait for any remaining bulk requests to complete and exists immediately. + From a33e77ff9604afb1ad5314a445ffa1bb3b3f2b2b Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 29 Apr 2015 16:04:29 +0100 Subject: [PATCH 203/236] Muted intermittently failing tests To reproduce the failures use `-Dtests.seed=D9EF60095522804F` --- .../aggregations/reducers/moving/avg/MovAvgTests.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index ae0f89ae868..069f9904a3f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -43,7 +43,12 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; @@ -303,6 +308,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { * test simple moving average on single value field */ @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void simpleSingleValuedField() { SearchResponse response = client() @@ -355,6 +361,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void linearSingleValuedField() { SearchResponse response = client() @@ -407,6 +414,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void singleSingleValuedField() { SearchResponse response = client() @@ -459,6 +467,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void doubleSingleValuedField() { SearchResponse response = client() From a202c2a43489aa83c10934e68f1981265ebbb3c6 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 29 Apr 2015 17:06:43 +0200 Subject: [PATCH 204/236] Revert "Write state also on data nodes if not master eligible" This reverts commit 4088dd38cbff19462e610db853ba1e54ee9785e4. --- .../gateway/GatewayMetaState.java | 176 ++------- .../gateway/GatewayMetaStateTests.java | 249 ------------ .../gateway/MetaDataWriteDataNodesTests.java | 354 ------------------ 3 files changed, 36 insertions(+), 743 deletions(-) delete mode 100644 src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java delete mode 100644 src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index ca8edebc571..158a3df5d91 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; -import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -28,7 +27,9 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.HashFunction; +import org.elasticsearch.cluster.routing.SimpleHashFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -42,7 +43,6 @@ import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.*; /** * @@ -57,9 +57,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL private final DanglingIndicesState danglingIndicesState; @Nullable - private volatile MetaData previousMetaData; - - private volatile ImmutableSet previouslyWrittenIndices = ImmutableSet.of(); + private volatile MetaData currentMetaData; @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -78,7 +76,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { nodeEnv.ensureAtomicMoveSupported(); } - if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { + if (DiscoveryNode.masterNode(settings)) { try { ensureNoPre019State(); pre20Upgrade(); @@ -98,12 +96,10 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Override public void clusterChanged(ClusterChangedEvent event) { - Set relevantIndices = new HashSet<>(); final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... - this.previousMetaData = null; - previouslyWrittenIndices= ImmutableSet.of(); + this.currentMetaData = null; return; } @@ -111,47 +107,44 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // we don't check if metaData changed, since we might be called several times and we need to check dangling... boolean success = true; - // write the state if this node is a master eligible node or if it is a data node and has shards allocated on it - if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) { + // only applied to master node, writing the global and index level states + if (state.nodes().localNode().masterNode()) { // check if the global state changed? - if (previousMetaData == null || !MetaData.isGlobalStateEquals(previousMetaData, newMetaData)) { + if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { try { metaStateService.writeGlobalState("changed", newMetaData); - // we determine if or if not we write meta data on data only nodes by looking at the shard routing - // and only write if a shard of this index is allocated on this node - // however, closed indices do not appear in the shard routing. if the meta data for a closed index is - // updated it will therefore not be written in case the list of previouslyWrittenIndices is empty (because state - // persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode(). - // we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list - if (isDataOnlyNode(state)) { - ImmutableSet.Builder previouslyWrittenIndicesBuilder = ImmutableSet.builder(); - for (IndexMetaData indexMetaData : newMetaData) { - IndexMetaData indexMetaDataOnDisk = null; - if (indexMetaData.state().equals(IndexMetaData.State.CLOSE)) { - try { - indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.index()); - } catch (IOException ex) { - throw new ElasticsearchException("failed to load index state", ex); - } - } - if (indexMetaDataOnDisk != null) { - previouslyWrittenIndicesBuilder.add(indexMetaDataOnDisk.index()); - } - } - previouslyWrittenIndices = previouslyWrittenIndicesBuilder.addAll(previouslyWrittenIndices).build(); - } } catch (Throwable e) { success = false; } } - Iterable writeInfo; - relevantIndices = getRelevantIndices(event.state(), previouslyWrittenIndices); - writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); // check and write changes in indices - for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { + for (IndexMetaData indexMetaData : newMetaData) { + String writeReason = null; + IndexMetaData currentIndexMetaData; + if (currentMetaData == null) { + // a new event..., check from the state stored + try { + currentIndexMetaData = metaStateService.loadIndexState(indexMetaData.index()); + } catch (IOException ex) { + throw new ElasticsearchException("failed to load index state", ex); + } + } else { + currentIndexMetaData = currentMetaData.index(indexMetaData.index()); + } + if (currentIndexMetaData == null) { + writeReason = "freshly created"; + } else if (currentIndexMetaData.version() != indexMetaData.version()) { + writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]"; + } + + // we update the writeReason only if we really need to write it + if (writeReason == null) { + continue; + } + try { - metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData); + metaStateService.writeIndex(writeReason, indexMetaData, currentIndexMetaData); } catch (Throwable e) { success = false; } @@ -161,29 +154,10 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL danglingIndicesState.processDanglingIndices(newMetaData); if (success) { - previousMetaData = newMetaData; - ImmutableSet.Builder builder= ImmutableSet.builder(); - previouslyWrittenIndices = builder.addAll(relevantIndices).build(); + currentMetaData = newMetaData; } } - public static Set getRelevantIndices(ClusterState state, ImmutableSet previouslyWrittenIndices) { - Set relevantIndices; - if (isDataOnlyNode(state)) { - relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previouslyWrittenIndices); - } else if (state.nodes().localNode().masterNode() == true) { - relevantIndices = getRelevantIndicesForMasterEligibleNode(state); - } else { - relevantIndices = Collections.emptySet(); - } - return relevantIndices; - } - - - protected static boolean isDataOnlyNode(ClusterState state) { - return ((state.nodes().localNode().masterNode() == false) && state.nodes().localNode().dataNode()); - } - /** * Throws an IAE if a pre 0.19 state is detected */ @@ -255,7 +229,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } - if (hasCustomPre20HashFunction || pre20UseType != null) { + if (hasCustomPre20HashFunction|| pre20UseType != null) { logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they " + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); } @@ -277,82 +251,4 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } - - /** - * Loads the current meta state for each index in the new cluster state and checks if it has to be persisted. - * Each index state that should be written to disk will be returned. This is only run for data only nodes. - * It will return only the states for indices that actually have a shard allocated on the current node. - * - * @param previouslyWrittenIndices A list of indices for which the state was already written before - * @param potentiallyUnwrittenIndices The list of indices for which state should potentially be written - * @param previousMetaData The last meta data we know of. meta data for all indices in previouslyWrittenIndices list is persisted now - * @param newMetaData The new metadata - * @return iterable over all indices states that should be written to disk - */ - public static Iterable resolveStatesToBeWritten(ImmutableSet previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { - List indicesToWrite = new ArrayList<>(); - for (String index : potentiallyUnwrittenIndices) { - IndexMetaData newIndexMetaData = newMetaData.index(index); - IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index); - String writeReason = null; - if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { - writeReason = "freshly created"; - } else if (previousIndexMetaData.version() != newIndexMetaData.version()) { - writeReason = "version changed from [" + previousIndexMetaData.version() + "] to [" + newIndexMetaData.version() + "]"; - } - if (writeReason != null) { - indicesToWrite.add(new GatewayMetaState.IndexMetaWriteInfo(newIndexMetaData, previousIndexMetaData, writeReason)); - } - } - return indicesToWrite; - } - - public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ImmutableSet previouslyWrittenIndices) { - RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId()); - if (newRoutingNode == null) { - throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); - } - Set indices = new HashSet<>(); - for (MutableShardRouting routing : newRoutingNode) { - indices.add(routing.index()); - } - // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously - for (IndexMetaData indexMetaData : state.metaData()) { - if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && state.metaData().getIndices().get(indexMetaData.getIndex()).state().equals(IndexMetaData.State.CLOSE)) { - indices.add(indexMetaData.getIndex()); - } - } - return indices; - } - - public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; - relevantIndices = new HashSet<>(); - // we have to iterate over the metadata to make sure we also capture closed indices - for (IndexMetaData indexMetaData : state.metaData()) { - relevantIndices.add(indexMetaData.getIndex()); - } - return relevantIndices; - } - - - public static class IndexMetaWriteInfo { - final IndexMetaData newMetaData; - final String reason; - final IndexMetaData previousMetaData; - - public IndexMetaWriteInfo(IndexMetaData newMetaData, IndexMetaData previousMetaData, String reason) { - this.newMetaData = newMetaData; - this.reason = reason; - this.previousMetaData = previousMetaData; - } - - public IndexMetaData getNewMetaData() { - return newMetaData; - } - - public String getReason() { - return reason; - } - } } diff --git a/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java deleted file mode 100644 index 06b958d47aa..00000000000 --- a/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gateway; - -import com.google.common.collect.ImmutableSet; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.elasticsearch.test.ElasticsearchAllocationTestCase; -import org.junit.Test; - -import java.util.*; - -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.hamcrest.Matchers.equalTo; - -/** - * Test IndexMetaState for master and data only nodes return correct list of indices to write - * There are many parameters: - * - meta state is not in memory - * - meta state is in memory with old version/ new version - * - meta state is in memory with new version - * - version changed in cluster state event/ no change - * - node is data only node - * - node is master eligible - * for data only nodes: shard initializing on shard - */ -public class GatewayMetaStateTests extends ElasticsearchAllocationTestCase { - - ClusterChangedEvent generateEvent(boolean initializing, boolean versionChanged, boolean masterEligible) { - //ridiculous settings to make sure we don't run into uninitialized because fo default - AllocationService strategy = createAllocationService(settingsBuilder() - .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) - .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) - .build()); - ClusterState newClusterState, previousClusterState; - MetaData metaDataOldClusterState = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) - .build(); - - RoutingTable routingTableOldClusterState = RoutingTable.builder() - .addAsNew(metaDataOldClusterState.index("test")) - .build(); - - // assign all shards - ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) - .metaData(metaDataOldClusterState) - .routingTable(routingTableOldClusterState) - .nodes(generateDiscoveryNodes(masterEligible)) - .build(); - // new cluster state will have initializing shards on node 1 - RoutingTable routingTableNewClusterState = strategy.reroute(init).routingTable(); - if (initializing == false) { - // pretend all initialized, nothing happened - ClusterState temp = ClusterState.builder(init).routingTable(routingTableNewClusterState).metaData(metaDataOldClusterState).build(); - routingTableNewClusterState = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); - routingTableOldClusterState = routingTableNewClusterState; - - } else { - // nothing to do, we have one routing table with unassigned and one with initializing - } - - // create new meta data either with version changed or not - MetaData metaDataNewClusterState = MetaData.builder() - .put(init.metaData().index("test"), versionChanged) - .build(); - - - // create the cluster states with meta data and routing tables as computed before - previousClusterState = ClusterState.builder(init) - .metaData(metaDataOldClusterState) - .routingTable(routingTableOldClusterState) - .nodes(generateDiscoveryNodes(masterEligible)) - .build(); - newClusterState = ClusterState.builder(previousClusterState).routingTable(routingTableNewClusterState).metaData(metaDataNewClusterState).version(previousClusterState.getVersion() + 1).build(); - - ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); - assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); - return event; - } - - ClusterChangedEvent generateCloseEvent(boolean masterEligible) { - //ridiculous settings to make sure we don't run into uninitialized because fo default - AllocationService strategy = createAllocationService(settingsBuilder() - .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") - .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) - .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) - .build()); - ClusterState newClusterState, previousClusterState; - MetaData metaDataIndexCreated = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) - .build(); - - RoutingTable routingTableIndexCreated = RoutingTable.builder() - .addAsNew(metaDataIndexCreated.index("test")) - .build(); - - // assign all shards - ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) - .metaData(metaDataIndexCreated) - .routingTable(routingTableIndexCreated) - .nodes(generateDiscoveryNodes(masterEligible)) - .build(); - RoutingTable routingTableInitializing = strategy.reroute(init).routingTable(); - ClusterState temp = ClusterState.builder(init).routingTable(routingTableInitializing).build(); - RoutingTable routingTableStarted = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); - - // create new meta data either with version changed or not - MetaData metaDataStarted = MetaData.builder() - .put(init.metaData().index("test"), true) - .build(); - - // create the cluster states with meta data and routing tables as computed before - MetaData metaDataClosed = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE).numberOfShards(5).numberOfReplicas(2)).version(metaDataStarted.version() + 1) - .build(); - previousClusterState = ClusterState.builder(init) - .metaData(metaDataStarted) - .routingTable(routingTableStarted) - .nodes(generateDiscoveryNodes(masterEligible)) - .build(); - newClusterState = ClusterState.builder(previousClusterState) - .routingTable(routingTableIndexCreated) - .metaData(metaDataClosed) - .version(previousClusterState.getVersion() + 1).build(); - - ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); - assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); - return event; - } - - private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { - Map masterNodeAttributes = new HashMap<>(); - masterNodeAttributes.put("master", "true"); - masterNodeAttributes.put("data", "true"); - Map dataNodeAttributes = new HashMap<>(); - dataNodeAttributes.put("master", "false"); - dataNodeAttributes.put("data", "true"); - return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? masterNodeAttributes : dataNodeAttributes)).put(newNode("master_node", masterNodeAttributes)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); - } - - public void assertState(ClusterChangedEvent event, - boolean stateInMemory, - boolean expectMetaData) throws Exception { - MetaData inMemoryMetaData = null; - ImmutableSet oldIndicesList = ImmutableSet.of(); - if (stateInMemory) { - inMemoryMetaData = event.previousState().metaData(); - ImmutableSet.Builder relevantIndices = ImmutableSet.builder(); - oldIndicesList = relevantIndices.addAll(GatewayMetaState.getRelevantIndices(event.previousState(), oldIndicesList)).build(); - } - Set newIndicesList = GatewayMetaState.getRelevantIndices(event.state(), oldIndicesList); - // third, get the actual write info - Iterator indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator(); - - if (expectMetaData) { - assertThat(indices.hasNext(), equalTo(true)); - assertThat(indices.next().getNewMetaData().index(), equalTo("test")); - assertThat(indices.hasNext(), equalTo(false)); - } else { - assertThat(indices.hasNext(), equalTo(false)); - } - } - - @Test - public void testVersionChangeIsAlwaysWritten() throws Exception { - // test that version changes are always written - boolean initializing = randomBoolean(); - boolean versionChanged = true; - boolean stateInMemory = randomBoolean(); - boolean masterEligible = randomBoolean(); - boolean expectMetaData = true; - ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); - assertState(event, stateInMemory, expectMetaData); - } - - @Test - public void testNewShardsAlwaysWritten() throws Exception { - // make sure new shards on data only node always written - boolean initializing = true; - boolean versionChanged = randomBoolean(); - boolean stateInMemory = randomBoolean(); - boolean masterEligible = false; - boolean expectMetaData = true; - ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); - assertState(event, stateInMemory, expectMetaData); - } - - @Test - public void testAllUpToDateNothingWritten() throws Exception { - // make sure state is not written again if we wrote already - boolean initializing = false; - boolean versionChanged = false; - boolean stateInMemory = true; - boolean masterEligible = randomBoolean(); - boolean expectMetaData = false; - ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); - assertState(event, stateInMemory, expectMetaData); - } - - @Test - public void testNoWriteIfNothingChanged() throws Exception { - boolean initializing = false; - boolean versionChanged = false; - boolean stateInMemory = true; - boolean masterEligible = randomBoolean(); - boolean expectMetaData = false; - ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); - ClusterChangedEvent newEventWithNothingChanged = new ClusterChangedEvent("test cluster state", event.state(), event.state()); - assertState(newEventWithNothingChanged, stateInMemory, expectMetaData); - } - - @Test - public void testWriteClosedIndex() throws Exception { - // test that the closing of an index is written also on data only node - boolean masterEligible = randomBoolean(); - boolean expectMetaData = true; - boolean stateInMemory = true; - ClusterChangedEvent event = generateCloseEvent(masterEligible); - assertState(event, stateInMemory, expectMetaData); - } -} diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java b/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java deleted file mode 100644 index 7947a6698c7..00000000000 --- a/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gateway; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.base.Predicate; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.InternalTestCluster; -import org.junit.Test; - -import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.Map; - -import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -/** - * - */ -@ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class MetaDataWriteDataNodesTests extends ElasticsearchIntegrationTest { - - @Test - public void testMetaWrittenAlsoOnDataNode() throws Exception { - // this test checks that index state is written on data only nodes - String masterNodeName = startMasterNode(); - String redNode = startDataNode("red"); - assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0))); - index("test", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - waitForConcreteMappingsOnAll("test", "doc", "text"); - ensureGreen("test"); - assertIndexInMetaState(redNode, "test"); - assertIndexInMetaState(masterNodeName, "test"); - //stop master node and start again with an empty data folder - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - String newMasterNode = startMasterNode(); - ensureGreen("test"); - // wait for mapping also on master becasue then we can be sure the state was written - waitForConcreteMappingsOnAll("test", "doc", "text"); - // check for meta data - assertIndexInMetaState(redNode, "test"); - assertIndexInMetaState(newMasterNode, "test"); - // check if index and doc is still there - ensureGreen("test"); - assertTrue(client().prepareGet("test", "doc", "1").get().isExists()); - } - - @Test - public void testMetaWrittenOnlyForIndicesOnNodesThatHaveAShard() throws Exception { - // this test checks that the index state is only written to a data only node if they have a shard of that index allocated on the node - String masterNode = startMasterNode(); - String blueNode = startDataNode("blue"); - String redNode = startDataNode("red"); - - assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); - index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); - index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - ensureGreen(); - waitForConcreteMappingsOnAll("blue_index", "doc", "text"); - waitForConcreteMappingsOnAll("red_index", "doc", "text"); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexNotInMetaState(redNode, "blue_index"); - assertIndexInMetaState(blueNode, "blue_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - assertIndexInMetaState(masterNode, "blue_index"); - - // not the index state for blue_index should only be written on blue_node and the for red_index only on red_node - // we restart red node and master but with empty data folders - stopNode(redNode); - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - masterNode = startMasterNode(); - redNode = startDataNode("red"); - - ensureGreen(); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexInMetaState(blueNode, "blue_index"); - assertIndexNotInMetaState(redNode, "red_index"); - assertIndexNotInMetaState(redNode, "blue_index"); - assertIndexNotInMetaState(masterNode, "red_index"); - assertIndexInMetaState(masterNode, "blue_index"); - // check that blue index is still there - assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); - assertTrue(client().prepareGet("blue_index", "doc", "1").get().isExists()); - // red index should be gone - // if the blue node had stored the index state then cluster health would be red and red_index would exist - assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); - - } - - @Test - public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { - // this test checks that the index state is removed from a data only node once all shards have been allocated away from it - String masterNode = startMasterNode(); - String blueNode = startDataNode("blue"); - String redNode = startDataNode("red"); - - // create blue_index on blue_node and same for red - client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); - assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); - index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); - index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - - ensureGreen(); - assertIndexNotInMetaState(redNode, "blue_index"); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(blueNode, "blue_index"); - assertIndexInMetaState(masterNode, "red_index"); - assertIndexInMetaState(masterNode, "blue_index"); - - // now relocate blue_index to red_node and red_index to blue_node - logger.debug("relocating indices..."); - client().admin().indices().prepareUpdateSettings("blue_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")).get(); - client().admin().indices().prepareUpdateSettings("red_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")).get(); - client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get(); - ensureGreen(); - assertIndexNotInMetaState(redNode, "red_index"); - assertIndexNotInMetaState(blueNode, "blue_index"); - assertIndexInMetaState(redNode, "blue_index"); - assertIndexInMetaState(blueNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - assertIndexInMetaState(masterNode, "blue_index"); - waitForConcreteMappingsOnAll("blue_index", "doc", "text"); - waitForConcreteMappingsOnAll("red_index", "doc", "text"); - - //at this point the blue_index is on red node and the red_index on blue node - // now, when we start red and master node again but without data folder, the red index should be gone but the blue index should initialize fine - stopNode(redNode); - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - masterNode = startMasterNode(); - redNode = startDataNode("red"); - ensureGreen(); - assertIndexNotInMetaState(redNode, "blue_index"); - assertIndexNotInMetaState(blueNode, "blue_index"); - assertIndexNotInMetaState(redNode, "red_index"); - assertIndexInMetaState(blueNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - assertIndexNotInMetaState(masterNode, "blue_index"); - assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); - // if the red_node had stored the index state then cluster health would be red and blue_index would exist - assertFalse(client().admin().indices().prepareExists("blue_index").get().isExists()); - } - - @Test - public void testMetaWrittenWhenIndexIsClosed() throws Exception { - String masterNode = startMasterNode(); - String redNodeDataPath = createTempDir().toString(); - String redNode = startDataNode("red", redNodeDataPath); - String blueNode = startDataNode("blue"); - // create red_index on red_node and same for red - client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); - assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); - index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - - ensureGreen(); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - - waitForConcreteMappingsOnAll("red_index", "doc", "text"); - client().admin().indices().prepareClose("red_index").get(); - // close the index - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); - - // restart master with empty data folder and maybe red node - boolean restartRedNode = randomBoolean(); - //at this point the red_index on red node - if (restartRedNode) { - stopNode(redNode); - } - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - masterNode = startMasterNode(); - if (restartRedNode) { - redNode = startDataNode("red", redNodeDataPath); - } - - ensureGreen("red_index"); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); - - // open the index again - client().admin().indices().prepareOpen("red_index").get(); - clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); - // restart again - ensureGreen(); - if (restartRedNode) { - stopNode(redNode); - } - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - masterNode = startMasterNode(); - if (restartRedNode) { - redNode = startDataNode("red", redNodeDataPath); - } - ensureGreen("red_index"); - assertIndexNotInMetaState(blueNode, "red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); - assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); - } - @Test - public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { - String masterNode = startMasterNode(); - String redNodeDataPath = createTempDir().toString(); - String redNode = startDataNode("red", redNodeDataPath); - // create red_index on red_node and same for red - client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2")).get(); - assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); - index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); - - logger.info("--> wait for green red_index"); - ensureGreen(); - logger.info("--> wait for meta state written for red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - - waitForConcreteMappingsOnAll("red_index", "doc", "text"); - - logger.info("--> close red_index"); - client().admin().indices().prepareClose("red_index").get(); - // close the index - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); - - logger.info("--> restart red node"); - stopNode(redNode); - redNode = startDataNode("red", redNodeDataPath); - client().admin().indices().preparePutMapping("red_index").setType("doc").setSource(jsonBuilder().startObject() - .startObject("properties") - .startObject("integer_field") - .field("type", "integer") - .endObject() - .endObject() - .endObject()).get(); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); - assertNotNull(((LinkedHashMap)(getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); - // restart master with empty data folder and maybe red node - ((InternalTestCluster) cluster()).stopCurrentMasterNode(); - masterNode = startMasterNode(); - - ensureGreen("red_index"); - assertIndexInMetaState(redNode, "red_index"); - assertIndexInMetaState(masterNode, "red_index"); - clusterStateResponse = client().admin().cluster().prepareState().get(); - assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); - getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); - assertNotNull(((LinkedHashMap)(getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); - - } - - private String startDataNode(String color) { - return startDataNode(color, createTempDir().toString()); - } - - private String startDataNode(String color, String newDataPath) { - ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() - .put("node.data", true) - .put("node.master", false) - .put("node.color", color) - .put("path.data", newDataPath); - return internalCluster().startNode(settingsBuilder.build()); - } - - private String startMasterNode() { - ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() - .put("node.data", false) - .put("node.master", true) - .put("path.data", createTempDir().toString()); - return internalCluster().startNode(settingsBuilder.build()); - } - - private void stopNode(String name) throws IOException { - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(name)); - } - - protected void assertIndexNotInMetaState(String nodeName, String indexName) throws Exception { - assertMetaState(nodeName, indexName, false); - } - - protected void assertIndexInMetaState(String nodeName, String indexName) throws Exception { - assertMetaState(nodeName, indexName, true); - } - - private void assertMetaState(final String nodeName, final String indexName, final boolean shouldBe) throws Exception { - awaitBusy(new Predicate() { - @Override - public boolean apply(Object o) { - logger.info("checking if meta state exists..."); - return shouldBe == metaStateExists(nodeName, indexName); - } - }); - boolean inMetaSate = metaStateExists(nodeName, indexName); - if (shouldBe) { - assertTrue("expected " + indexName + " in meta state of node " + nodeName, inMetaSate); - } else { - assertFalse("expected " + indexName + " to not be in meta state of node " + nodeName, inMetaSate); - } - } - - private boolean metaStateExists(String nodeName, String indexName) { - GatewayMetaState redNodeMetaState = ((InternalTestCluster) cluster()).getInstance(GatewayMetaState.class, nodeName); - MetaData redNodeMetaData = null; - try { - redNodeMetaData = redNodeMetaState.loadMetaState(); - } catch (Exception e) { - fail("failed to load meta state"); - } - ImmutableOpenMap indices = redNodeMetaData.getIndices(); - boolean inMetaSate = false; - for (ObjectObjectCursor index : indices) { - inMetaSate = inMetaSate || index.key.equals(indexName); - } - return inMetaSate; - } -} From eb44e950d494d1edb16bf03a84f13f26ef093630 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 29 Apr 2015 17:23:33 +0200 Subject: [PATCH 205/236] Java Api: remove unused private static class PartialField from SearchSourceBuilder Partial fields have been removed from master a while ago, this is a leftover. --- .../search/builder/SearchSourceBuilder.java | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 892cc2085ae..4f54fc1768f 100644 --- a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -885,34 +885,4 @@ public class SearchSourceBuilder implements ToXContent { return params; } } - - private static class PartialField { - private final String name; - private final String[] includes; - private final String[] excludes; - - private PartialField(String name, String[] includes, String[] excludes) { - this.name = name; - this.includes = includes; - this.excludes = excludes; - } - - private PartialField(String name, String include, String exclude) { - this.name = name; - this.includes = include == null ? null : new String[]{include}; - this.excludes = exclude == null ? null : new String[]{exclude}; - } - - public String name() { - return name; - } - - public String[] includes() { - return includes; - } - - public String[] excludes() { - return excludes; - } - } } From d4463602f68f039069d9fe8ceaf16a639fc52c9f Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Apr 2015 17:51:21 +0200 Subject: [PATCH 206/236] [TEST] Use a high shard delete timeout when clusterstates are delayed `IndiceStore#indexCleanup` uses a disruption scheme to delay cluster state processing. Yet, the delay is [1..2] seconds but tests are setting the shard deletion timeout to 1 second to speed up tests. This can cause random not reproducible failures in this test since the timeouts and delays are bascially overlapping. This commit adds a longer timeout for this test to prevent these problems. --- .../indices/store/IndicesStore.java | 97 ++++++++++--------- .../store/IndicesStoreIntegrationTests.java | 15 ++- 2 files changed, 60 insertions(+), 52 deletions(-) diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 32e695a828c..36c9be862ee 100644 --- a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -332,56 +332,57 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe // make sure shard is really there before register cluster state observer if (indexShard == null) { channel.sendResponse(new ShardActiveResponse(false, clusterService.localNode())); - } - // create observer here. we need to register it here because we need to capture the current cluster state - // which will then be compared to the one that is applied when we call waitForNextChange(). if we create it - // later we might miss an update and wait forever in case no new cluster state comes in. - // in general, using a cluster state observer here is a workaround for the fact that we cannot listen on shard state changes explicitly. - // instead we wait for the cluster state changes because we know any shard state change will trigger or be - // triggered by a cluster state change. - ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.timeout, logger); - // check if shard is active. if so, all is good - boolean shardActive = shardActive(indexShard); - if (shardActive) { - channel.sendResponse(new ShardActiveResponse(true, clusterService.localNode())); } else { - // shard is not active, might be POST_RECOVERY so check if cluster state changed inbetween or wait for next change - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - sendResult(shardActive(getShard(request))); - } - - @Override - public void onClusterServiceClose() { - sendResult(false); - } - - @Override - public void onTimeout(TimeValue timeout) { - sendResult(shardActive(getShard(request))); - } - - public void sendResult(boolean shardActive) { - try { - channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); - } catch (IOException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); - } catch (EsRejectedExecutionException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + // create observer here. we need to register it here because we need to capture the current cluster state + // which will then be compared to the one that is applied when we call waitForNextChange(). if we create it + // later we might miss an update and wait forever in case no new cluster state comes in. + // in general, using a cluster state observer here is a workaround for the fact that we cannot listen on shard state changes explicitly. + // instead we wait for the cluster state changes because we know any shard state change will trigger or be + // triggered by a cluster state change. + ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.timeout, logger); + // check if shard is active. if so, all is good + boolean shardActive = shardActive(indexShard); + if (shardActive) { + channel.sendResponse(new ShardActiveResponse(true, clusterService.localNode())); + } else { + // shard is not active, might be POST_RECOVERY so check if cluster state changed inbetween or wait for next change + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + sendResult(shardActive(getShard(request))); } - } - }, new ClusterStateObserver.ValidationPredicate() { - @Override - protected boolean validate(ClusterState newState) { - // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified - // or the shard is active in which case we want to send back that the shard is active - // here we could also evaluate the cluster state and get the information from there. we - // don't do it because we would have to write another method for this that would have the same effect - IndexShard indexShard = getShard(request); - return indexShard == null || shardActive(indexShard); - } - }); + + @Override + public void onClusterServiceClose() { + sendResult(false); + } + + @Override + public void onTimeout(TimeValue timeout) { + sendResult(shardActive(getShard(request))); + } + + public void sendResult(boolean shardActive) { + try { + channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); + } catch (IOException e) { + logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + } catch (EsRejectedExecutionException e) { + logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + } + } + }, new ClusterStateObserver.ValidationPredicate() { + @Override + protected boolean validate(ClusterState newState) { + // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified + // or the shard is active in which case we want to send back that the shard is active + // here we could also evaluate the cluster state and get the information from there. we + // don't do it because we would have to write another method for this that would have the same effect + IndexShard indexShard = getShard(request); + return indexShard == null || shardActive(indexShard); + } + }); + } } } diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index e1efe59776d..386c778b07e 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -45,6 +46,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -58,7 +60,12 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "").build(); + return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "") + // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here + // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. + // to prevent this we are setting the timeout here to something highish ie. the default in practice + .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS)) + .build(); } @Test @@ -97,9 +104,8 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false)); logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish"); - SlowClusterStateProcessing disruption = null; - if (randomBoolean()) { - disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000); + if (randomBoolean()) { // sometimes add cluster-state delay to trigger observers in IndicesStore.ShardActiveRequestHandler + final SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000); internalCluster().setDisruptionScheme(disruption); disruption.startDisrupting(); } @@ -116,6 +122,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true)); + } @Test From 3c3e9b63a7ae59c276cdf098ae96f24a6faaba6f Mon Sep 17 00:00:00 2001 From: David Pilato Date: Wed, 29 Apr 2015 18:10:02 +0200 Subject: [PATCH 207/236] fix: query string time zone not working If you define exactly the same date range query using either `DATE+0200` notation or `DATE` and set `timezone: +0200`, elasticsearch gives back different results: ``` DELETE foo PUT /foo { "mapping": { "tweets": { "properties": { "tweet_date": { "type": "date" } } } } } POST /foo/tweets/1/ { "tweet_date": "2015-04-05T23:00:00+0000" } POST /foo/tweets/2/ { "tweet_date": "2015-04-06T00:00:00+0000" } GET /foo/tweets/_search?pretty { "query": { "query_string": { "query": "tweet_date:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]" } } } GET /foo/tweets/_search?pretty { "query": { "query_string": { "query": "tweet_date:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]", "time_zone": "+0200" } } } ``` This PR fixes it and will also allow us to add the same feature to simple_query_string as well in another PR. Closes #10477. (cherry picked from commit 880f4a0) --- .../classic/MapperQueryParser.java | 13 +++++-- .../search/query/SearchQueryTests.java | 38 +++++++++++++++++++ 2 files changed, 47 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index c87f9144709..d55374cd5b9 100644 --- a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -131,9 +132,6 @@ public class MapperQueryParser extends QueryParser { setFuzzyMinSim(settings.fuzzyMinSim()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); setLocale(settings.locale()); - if (settings.timeZone() != null) { - setTimeZone(settings.timeZone().toTimeZone()); - } this.analyzeWildcard = settings.analyzeWildcard(); } @@ -377,7 +375,14 @@ public class MapperQueryParser extends QueryParser { } try { - return currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext); + Query rangeQuery; + if (currentMapper instanceof DateFieldMapper && settings.timeZone() != null) { + DateFieldMapper dateFieldMapper = (DateFieldMapper) this.currentMapper; + rangeQuery = dateFieldMapper.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, parseContext); + } else { + rangeQuery = currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext); + } + return rangeQuery; } catch (RuntimeException e) { if (settings.lenient()) { return null; diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index 6a575fc38c7..8d8e948f769 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -587,6 +587,44 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertHitCount(searchResponse, 1l); } + @Test // https://github.com/elasticsearch/elasticsearch/issues/10477 + public void testDateRangeInQueryStringWithTimeZone_10477() { + //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back + //as with dynamic mappings some shards might be lacking behind and parse a different query + assertAcked(prepareCreate("test").addMapping( + "type", "past", "type=date" + )); + ensureGreen(); + + client().prepareIndex("test", "type", "1").setSource("past", "2015-04-05T23:00:00+0000").get(); + client().prepareIndex("test", "type", "2").setSource("past", "2015-04-06T00:00:00+0000").get(); + refresh(); + + // Timezone set with dates + SearchResponse searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) + .get(); + assertHitCount(searchResponse, 2l); + + // Same timezone set with time_zone + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")) + .get(); + assertHitCount(searchResponse, 2l); + + // We set a timezone which will give no result + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")) + .get(); + assertHitCount(searchResponse, 0l); + + // Same timezone set with time_zone but another timezone is set directly within dates which has the precedence + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")) + .get(); + assertHitCount(searchResponse, 0l); + } + @Test public void typeFilterTypeIndexedTests() throws Exception { typeFilterTests("not_analyzed"); From 6e1c99574180c7798a2d544f0f60dc4509f4f69b Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 29 Apr 2015 10:40:51 -0600 Subject: [PATCH 208/236] Clarify logging about disk thresholds in DiskThresholdDecider --- .../routing/allocation/decider/DiskThresholdDecider.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 726a588d1bf..a3969dcc232 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -142,19 +142,19 @@ public class DiskThresholdDecider extends AllocationDecider { private void warnAboutDiskIfNeeded(DiskUsage usage) { // Check absolute disk values if (usage.getFreeBytes() < DiskThresholdDecider.this.freeBytesThresholdHigh.bytes()) { - logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", + logger.warn("high disk watermark [{} free] exceeded on {}, shards will be relocated away from this node", DiskThresholdDecider.this.freeBytesThresholdHigh, usage); } else if (usage.getFreeBytes() < DiskThresholdDecider.this.freeBytesThresholdLow.bytes()) { - logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", + logger.info("low disk watermark [{} free] exceeded on {}, replicas will not be assigned to this node", DiskThresholdDecider.this.freeBytesThresholdLow, usage); } // Check percentage disk values if (usage.getFreeDiskAsPercentage() < DiskThresholdDecider.this.freeDiskThresholdHigh) { - logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", + logger.warn("high disk watermark [{} free] exceeded on {}, shards will be relocated away from this node", Strings.format1Decimals(DiskThresholdDecider.this.freeDiskThresholdHigh, "%"), usage); } else if (usage.getFreeDiskAsPercentage() < DiskThresholdDecider.this.freeDiskThresholdLow) { - logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", + logger.info("low disk watermark [{} free] exceeded on {}, replicas will not be assigned to this node", Strings.format1Decimals(DiskThresholdDecider.this.freeDiskThresholdLow, "%"), usage); } } From 478c253f8929682675298cd9e491963090b897b8 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Sun, 26 Apr 2015 21:04:41 -0400 Subject: [PATCH 209/236] Add support for cluster state diffs Adds support for calculating and sending diffs instead of full cluster state of the most frequently changing elements - cluster state, meta data and routing table. Closes #6295 --- .../reroute/ClusterRerouteResponse.java | 2 +- .../cluster/state/ClusterStateResponse.java | 2 +- .../state/TransportClusterStateAction.java | 15 +- .../indices/alias/get/GetAliasesResponse.java | 2 +- .../indices/create/CreateIndexRequest.java | 10 +- .../admin/indices/get/GetIndexResponse.java | 6 +- .../mapping/get/GetMappingsResponse.java | 4 +- .../get/GetIndexTemplatesResponse.java | 2 +- .../template/put/PutIndexTemplateRequest.java | 10 +- .../cluster/AbstractDiffable.java | 108 +++ .../elasticsearch/cluster/ClusterState.java | 272 ++++++-- .../java/org/elasticsearch/cluster/Diff.java | 42 ++ .../org/elasticsearch/cluster/Diffable.java | 42 ++ .../elasticsearch/cluster/DiffableUtils.java | 283 ++++++++ ...ompatibleClusterStateVersionException.java | 35 + .../cluster/block/ClusterBlocks.java | 75 ++- .../cluster/metadata/AliasMetaData.java | 85 ++- .../cluster/metadata/IndexMetaData.java | 234 ++++--- .../metadata/IndexTemplateMetaData.java | 105 +-- .../cluster/metadata/MappingMetaData.java | 48 +- .../cluster/metadata/MetaData.java | 266 +++++--- .../metadata/MetaDataCreateIndexService.java | 2 +- .../metadata/RepositoriesMetaData.java | 236 +++---- .../cluster/metadata/RepositoryMetaData.java | 21 + .../cluster/metadata/RestoreMetaData.java | 220 +++--- .../cluster/metadata/SnapshotMetaData.java | 223 ++++--- .../cluster/node/DiscoveryNodes.java | 73 +- .../cluster/routing/IndexRoutingTable.java | 72 +- .../routing/IndexShardRoutingTable.java | 22 + .../cluster/routing/RoutingTable.java | 90 ++- .../service/InternalClusterService.java | 8 +- .../ClusterDynamicSettingsModule.java | 1 + .../common/io/stream/StreamableReader.java | 30 + .../common/io/stream/Writeable.java | 30 + .../elasticsearch/discovery/Discovery.java | 3 +- .../discovery/DiscoveryService.java | 5 +- .../discovery/DiscoverySettings.java | 13 + .../discovery/local/LocalDiscovery.java | 46 +- .../discovery/zen/ZenDiscovery.java | 9 +- .../publish/PublishClusterStateAction.java | 194 ++++-- .../org/elasticsearch/gateway/Gateway.java | 2 +- .../gateway/LocalAllocateDangledIndices.java | 2 +- .../TransportNodesListGatewayMetaState.java | 2 +- .../get/RestGetRepositoriesAction.java | 2 +- .../indices/get/RestGetIndicesAction.java | 2 +- .../warmer/get/RestGetWarmerAction.java | 2 +- .../search/warmer/IndexWarmersMetaData.java | 318 +++++---- .../ClusterStateDiffPublishingTests.java | 625 ++++++++++++++++++ .../cluster/ClusterStateDiffTests.java | 534 +++++++++++++++ .../ClusterSerializationTests.java | 2 +- .../cluster/serialization/DiffableTests.java | 127 ++++ .../common/xcontent/XContentTestUtils.java | 100 +++ .../discovery/ZenUnicastDiscoveryTests.java | 1 + .../discovery/zen/ZenDiscoveryTests.java | 10 +- .../timestamp/TimestampMappingTests.java | 12 +- .../store/IndicesStoreIntegrationTests.java | 7 + .../template/SimpleIndexTemplateTests.java | 1 + .../DedicatedClusterSnapshotRestoreTests.java | 218 +++--- .../test/ElasticsearchIntegrationTest.java | 38 +- .../test/ElasticsearchTestCase.java | 14 + 60 files changed, 3831 insertions(+), 1134 deletions(-) create mode 100644 src/main/java/org/elasticsearch/cluster/AbstractDiffable.java create mode 100644 src/main/java/org/elasticsearch/cluster/Diff.java create mode 100644 src/main/java/org/elasticsearch/cluster/Diffable.java create mode 100644 src/main/java/org/elasticsearch/cluster/DiffableUtils.java create mode 100644 src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java create mode 100644 src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java create mode 100644 src/main/java/org/elasticsearch/common/io/stream/Writeable.java create mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java create mode 100644 src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java create mode 100644 src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java create mode 100644 src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 79b31f620d5..28f9cb1db90 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -68,7 +68,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - ClusterState.Builder.writeTo(state, out); + state.writeTo(out); writeAcknowledged(out); RoutingExplanations.writeTo(explanations, out); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index 861a84a9e71..e9aa9b723fa 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -62,6 +62,6 @@ public class ClusterStateResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); - ClusterState.Builder.writeTo(clusterState, out); + clusterState.writeTo(out); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 7b114c92d43..5c8905fd97b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.state; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -39,11 +37,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; - -import static com.google.common.collect.Lists.newArrayList; -import static org.elasticsearch.cluster.metadata.MetaData.lookupFactorySafe; - /** * */ @@ -84,6 +77,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); + builder.uuid(currentState.uuid()); if (request.nodes()) { builder.nodes(currentState.nodes()); } @@ -122,10 +116,9 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio } // Filter our metadata that shouldn't be returned by API - for(ObjectCursor type : currentState.metaData().customs().keys()) { - Custom.Factory factory = lookupFactorySafe(type.value); - if(!factory.context().contains(MetaData.XContentContext.API)) { - mdBuilder.removeCustom(type.value); + for(ObjectObjectCursor custom : currentState.metaData().customs()) { + if(!custom.value.context().contains(MetaData.XContentContext.API)) { + mdBuilder.removeCustom(custom.key); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 765a9395afc..106e864a367 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -74,7 +74,7 @@ public class GetAliasesResponse extends ActionResponse { out.writeString(entry.key); out.writeVInt(entry.value.size()); for (AliasMetaData aliasMetaData : entry.value) { - AliasMetaData.Builder.writeTo(aliasMetaData, out); + aliasMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index d79c2128611..60a265de785 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -396,11 +396,11 @@ public class CreateIndexRequest extends AcknowledgedRequest aliases((Map) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { found = true; try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -448,7 +448,7 @@ public class CreateIndexRequest extends AcknowledgedRequest int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); customs.put(type, customIndexMetaData); } int aliasesSize = in.readVInt(); @@ -472,7 +472,7 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeVInt(customs.size()); for (Map.Entry entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 35e6cfa4804..7080a694a11 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -134,7 +134,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -181,7 +181,7 @@ public class GetIndexResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor mappingEntry : indexEntry.value) { out.writeString(mappingEntry.key); - MappingMetaData.writeTo(mappingEntry.value, out); + mappingEntry.value.writeTo(out); } } out.writeVInt(aliases.size()); @@ -189,7 +189,7 @@ public class GetIndexResponse extends ActionResponse { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { - AliasMetaData.Builder.writeTo(aliasEntry, out); + aliasEntry.writeTo(out); } } out.writeVInt(settings.size()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index b27577f8da3..30e9e24c493 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -59,7 +59,7 @@ public class GetMappingsResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } @@ -75,7 +75,7 @@ public class GetMappingsResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor typeEntry : indexEntry.value) { out.writeString(typeEntry.key); - MappingMetaData.writeTo(typeEntry.value, out); + typeEntry.value.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 56de19872f2..2ce6d8d2c1a 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -60,7 +60,7 @@ public class GetIndexTemplatesResponse extends ActionResponse { super.writeTo(out); out.writeVInt(indexTemplates.size()); for (IndexTemplateMetaData indexTemplate : indexTemplates) { - IndexTemplateMetaData.Builder.writeTo(indexTemplate, out); + indexTemplate.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 41dd9ec2b45..1b752855c20 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -292,10 +292,10 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -440,7 +440,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java new file mode 100644 index 00000000000..4e6da2bd569 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamableReader; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or + * nothing is object remained the same. + */ +public abstract class AbstractDiffable> implements Diffable { + + @Override + public Diff diff(T previousState) { + if (this.get().equals(previousState)) { + return new CompleteDiff<>(); + } else { + return new CompleteDiff<>(get()); + } + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new CompleteDiff<>(this, in); + } + + public static > Diff readDiffFrom(StreamableReader reader, StreamInput in) throws IOException { + return new CompleteDiff(reader, in); + } + + private static class CompleteDiff> implements Diff { + + @Nullable + private final T part; + + /** + * Creates simple diff with changes + */ + public CompleteDiff(T part) { + this.part = part; + } + + /** + * Creates simple diff without changes + */ + public CompleteDiff() { + this.part = null; + } + + /** + * Read simple diff from the stream + */ + public CompleteDiff(StreamableReader reader, StreamInput in) throws IOException { + if (in.readBoolean()) { + this.part = reader.readFrom(in); + } else { + this.part = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (part != null) { + out.writeBoolean(true); + part.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public T apply(T part) { + if (this.part != null) { + return this.part; + } else { + return part; + } + } + } + + @SuppressWarnings("unchecked") + public T get() { + return (T) this; + } +} + diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index b90bc0bb2ac..4f63d9e00e3 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -55,7 +56,9 @@ import java.util.Map; /** * */ -public class ClusterState implements ToXContent { +public class ClusterState implements ToXContent, Diffable { + + public static final ClusterState PROTO = builder(ClusterName.DEFAULT).build(); public static enum ClusterStateStatus { UNKNOWN((byte) 0), @@ -74,47 +77,43 @@ public class ClusterState implements ToXContent { } } - public interface Custom { + public interface Custom extends Diffable, ToXContent { - interface Factory { - - String type(); - - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customState, StreamOutput out) throws IOException; - - void toXContent(T customState, XContentBuilder builder, ToXContent.Params params); - } + String type(); } - private final static Map customFactories = new HashMap<>(); + private final static Map customPrototypes = new HashMap<>(); /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new IllegalArgumentException("No custom state factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + @SuppressWarnings("unchecked") + T proto = (T)customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); } - return factory; + return proto; } + public static final String UNKNOWN_UUID = "_na_"; public static final long UNKNOWN_VERSION = -1; private final long version; + private final String uuid; + private final RoutingTable routingTable; private final DiscoveryNodes nodes; @@ -127,17 +126,20 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; + private final boolean wasReadFromDiff; + // built on demand private volatile RoutingNodes routingNodes; private volatile ClusterStateStatus status; - public ClusterState(long version, ClusterState state) { - this(state.clusterName, version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs()); + public ClusterState(long version, String uuid, ClusterState state) { + this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); } - public ClusterState(ClusterName clusterName, long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs) { + public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { this.version = version; + this.uuid = uuid; this.clusterName = clusterName; this.metaData = metaData; this.routingTable = routingTable; @@ -145,6 +147,7 @@ public class ClusterState implements ToXContent { this.blocks = blocks; this.customs = customs; this.status = ClusterStateStatus.UNKNOWN; + this.wasReadFromDiff = wasReadFromDiff; } public ClusterStateStatus status() { @@ -164,6 +167,14 @@ public class ClusterState implements ToXContent { return version(); } + /** + * This uuid is automatically generated for for each version of cluster state. It is used to make sure that + * we are applying diffs to the right previous state. + */ + public String uuid() { + return this.uuid; + } + public DiscoveryNodes nodes() { return this.nodes; } @@ -216,6 +227,11 @@ public class ClusterState implements ToXContent { return this.clusterName; } + // Used for testing and logging to determine how this cluster state was send over the wire + boolean wasReadFromDiff() { + return wasReadFromDiff; + } + /** * Returns a built (on demand) routing nodes view of the routing table. NOTE, the routing nodes * are mutable, use them just for read operations @@ -231,6 +247,8 @@ public class ClusterState implements ToXContent { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("version: ").append(version).append("\n"); + sb.append("uuid: ").append(uuid).append("\n"); + sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); sb.append(nodes().prettyPrint()); sb.append(routingTable().prettyPrint()); @@ -302,14 +320,13 @@ public class ClusterState implements ToXContent { } } - - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); if (metrics.contains(Metric.VERSION)) { builder.field("version", version); + builder.field("uuid", uuid); } if (metrics.contains(Metric.MASTER_NODE)) { @@ -434,7 +451,7 @@ public class ClusterState implements ToXContent { for (ObjectObjectCursor cursor : metaData.customs()) { builder.startObject(cursor.key); - MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -473,7 +490,7 @@ public class ClusterState implements ToXContent { builder.startObject("nodes"); for (RoutingNode routingNode : readOnlyRoutingNodes()) { - builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); + builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); for (ShardRouting shardRouting : routingNode) { shardRouting.toXContent(builder, params); } @@ -486,7 +503,7 @@ public class ClusterState implements ToXContent { if (metrics.contains(Metric.CUSTOMS)) { for (ObjectObjectCursor cursor : customs) { builder.startObject(cursor.key); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -506,21 +523,25 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; private long version = 0; + private String uuid = UNKNOWN_UUID; private MetaData metaData = MetaData.EMPTY_META_DATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; + private boolean fromDiff; public Builder(ClusterState state) { this.clusterName = state.clusterName; this.version = state.version(); + this.uuid = state.uuid(); this.nodes = state.nodes(); this.routingTable = state.routingTable(); this.metaData = state.metaData(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); + this.fromDiff = false; } public Builder(ClusterName clusterName) { @@ -574,6 +595,17 @@ public class ClusterState implements ToXContent { return this; } + public Builder incrementVersion() { + this.version = version + 1; + this.uuid = UNKNOWN_UUID; + return this; + } + + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -588,13 +620,26 @@ public class ClusterState implements ToXContent { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + + public Builder fromDiff(boolean fromDiff) { + this.fromDiff = fromDiff; + return this; + } + public ClusterState build() { - return new ClusterState(clusterName, version, metaData, routingTable, nodes, blocks, customs.build()); + if (UNKNOWN_UUID.equals(uuid)) { + uuid = Strings.randomBase64UUID(); + } + return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), fromDiff); } public static byte[] toBytes(ClusterState state) throws IOException { BytesStreamOutput os = new BytesStreamOutput(); - writeTo(state, os); + state.writeTo(os); return os.bytes().toBytes(); } @@ -606,39 +651,152 @@ public class ClusterState implements ToXContent { return readFrom(new BytesStreamInput(data), localNode); } - public static void writeTo(ClusterState state, StreamOutput out) throws IOException { - state.clusterName.writeTo(out); - out.writeLong(state.version()); - MetaData.Builder.writeTo(state.metaData(), out); - RoutingTable.Builder.writeTo(state.routingTable(), out); - DiscoveryNodes.Builder.writeTo(state.nodes(), out); - ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out); - out.writeVInt(state.customs().size()); - for (ObjectObjectCursor cursor : state.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } - } - /** * @param in input stream * @param localNode used to set the local node in the cluster state. can be null. */ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - ClusterName clusterName = ClusterName.readClusterName(in); + return PROTO.readFrom(in, localNode); + } + + } + + @Override + public Diff diff(ClusterState previousState) { + return new ClusterStateDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new ClusterStateDiff(in, this); + } + + public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + ClusterName clusterName = ClusterName.readClusterName(in); + Builder builder = new Builder(clusterName); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.metaData = MetaData.Builder.readFrom(in); + builder.routingTable = RoutingTable.Builder.readFrom(in); + builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); + builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public ClusterState readFrom(StreamInput in) throws IOException { + return readFrom(in, nodes.localNode()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeLong(version); + out.writeString(uuid); + metaData.writeTo(out); + routingTable.writeTo(out); + nodes.writeTo(out); + blocks.writeTo(out); + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + + private static class ClusterStateDiff implements Diff { + + private final long toVersion; + + private final String fromUuid; + + private final String toUuid; + + private final ClusterName clusterName; + + private final Diff routingTable; + + private final Diff nodes; + + private final Diff metaData; + + private final Diff blocks; + + private final Diff> customs; + + public ClusterStateDiff(ClusterState before, ClusterState after) { + fromUuid = before.uuid; + toUuid = after.uuid; + toVersion = after.version; + clusterName = after.clusterName; + routingTable = after.routingTable.diff(before.routingTable); + nodes = after.nodes.diff(before.nodes); + metaData = after.metaData.diff(before.metaData); + blocks = after.blocks.diff(before.blocks); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { + clusterName = ClusterName.readClusterName(in); + fromUuid = in.readString(); + toUuid = in.readString(); + toVersion = in.readLong(); + routingTable = proto.routingTable.readDiffFrom(in); + nodes = proto.nodes.readDiffFrom(in); + metaData = proto.metaData.readDiffFrom(in); + blocks = proto.blocks.readDiffFrom(in); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeString(fromUuid); + out.writeString(toUuid); + out.writeLong(toVersion); + routingTable.writeTo(out); + nodes.writeTo(out); + metaData.writeTo(out); + blocks.writeTo(out); + customs.writeTo(out); + } + + @Override + public ClusterState apply(ClusterState state) { Builder builder = new Builder(clusterName); - builder.version = in.readLong(); - builder.metaData = MetaData.Builder.readFrom(in); - builder.routingTable = RoutingTable.Builder.readFrom(in); - builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); - builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (toUuid.equals(state.uuid)) { + // no need to read the rest - cluster state didn't change + return state; } + if (fromUuid.equals(state.uuid) == false) { + throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid); + } + builder.uuid(toUuid); + builder.version(toVersion); + builder.routingTable(routingTable.apply(state.routingTable)); + builder.nodes(nodes.apply(state.nodes)); + builder.metaData(metaData.apply(state.metaData)); + builder.blocks(blocks.apply(state.blocks)); + builder.customs(customs.apply(state.customs)); + builder.fromDiff(true); return builder.build(); } } + } diff --git a/src/main/java/org/elasticsearch/cluster/Diff.java b/src/main/java/org/elasticsearch/cluster/Diff.java new file mode 100644 index 00000000000..2e571f43bca --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/Diff.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Represents difference between states of cluster state parts + */ +public interface Diff { + + /** + * Applies difference to the specified part and retunrs the resulted part + */ + T apply(T part); + + /** + * Writes the differences into the output stream + * @param out + * @throws IOException + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/cluster/Diffable.java b/src/main/java/org/elasticsearch/cluster/Diffable.java new file mode 100644 index 00000000000..7ce60047a2b --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/Diffable.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Cluster state part, changes in which can be serialized + */ +public interface Diffable extends Writeable { + + /** + * Returns serializable object representing differences between this and previousState + */ + Diff diff(T previousState); + + /** + * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput + */ + Diff readDiffFrom(StreamInput in) throws IOException; + +} diff --git a/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java new file mode 100644 index 00000000000..4e912a34f97 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -0,0 +1,283 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; + +public final class DiffableUtils { + private DiffableUtils() { + } + + /** + * Calculates diff between two ImmutableOpenMaps of Diffable objects + */ + public static > Diff> diff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + return new ImmutableOpenMapDiff<>(before, after); + } + + /** + * Calculates diff between two ImmutableMaps of Diffable objects + */ + public static > Diff> diff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + return new ImmutableMapDiff<>(before, after); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableOpenMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's + * used in custom metadata deserialization. + */ + public interface KeyedReader { + + /** + * reads an object of the type T from the stream input + */ + T readFrom(StreamInput in, String key) throws IOException; + + /** + * reads an object that respresents differences between two objects with the type T from the stream input + */ + Diff readDiffFrom(StreamInput in, String key) throws IOException; + } + + /** + * Implementation of the KeyedReader that is using a prototype object for reading operations + * + * Note: this implementation is ignoring the key. + */ + public static class PrototypeReader> implements KeyedReader { + private T proto; + + public PrototypeReader(T proto) { + this.proto = proto; + } + + @Override + public T readFrom(StreamInput in, String key) throws IOException { + return proto.readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return proto.readDiffFrom(in); + } + } + + /** + * Represents differences between two ImmutableMaps of diffable objects + * + * @param the diffable object + */ + private static class ImmutableMapDiff> extends MapDiff> { + + protected ImmutableMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableMapDiff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + for (String key : before.keySet()) { + if (!after.containsKey(key)) { + deletes.add(key); + } + } + for (ImmutableMap.Entry partIter : after.entrySet()) { + T beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + adds.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart)); + } + } + } + + @Override + public ImmutableMap apply(ImmutableMap map) { + HashMap builder = newHashMap(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return ImmutableMap.copyOf(builder); + } + } + + /** + * Represents differences between two ImmutableOpenMap of diffable objects + * + * @param the diffable object + */ + private static class ImmutableOpenMapDiff> extends MapDiff> { + + protected ImmutableOpenMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + for (ObjectCursor key : before.keys()) { + if (!after.containsKey(key.value)) { + deletes.add(key.value); + } + } + for (ObjectObjectCursor partIter : after) { + T beforePart = before.get(partIter.key); + if (beforePart == null) { + adds.put(partIter.key, partIter.value); + } else if (partIter.value.equals(beforePart) == false) { + diffs.put(partIter.key, partIter.value.diff(beforePart)); + } + } + } + + @Override + public ImmutableOpenMap apply(ImmutableOpenMap map) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return builder.build(); + } + } + + /** + * Represents differences between two maps of diffable objects + * + * This class is used as base class for different map implementations + * + * @param the diffable object + */ + private static abstract class MapDiff, M> implements Diff { + + protected final List deletes; + protected final Map> diffs; + protected final Map adds; + + protected MapDiff() { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + } + + protected MapDiff(StreamInput in, KeyedReader reader) throws IOException { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + + int diffsCount = in.readVInt(); + for (int i = 0; i < diffsCount; i++) { + String key = in.readString(); + Diff diff = reader.readDiffFrom(in, key); + diffs.put(key, diff); + } + + int addsCount = in.readVInt(); + for (int i = 0; i < addsCount; i++) { + String key = in.readString(); + T part = reader.readFrom(in, key); + adds.put(key, part); + } + } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + + out.writeVInt(diffs.size()); + for (Map.Entry> entry : diffs.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + + out.writeVInt(adds.size()); + for (Map.Entry entry : adds.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java new file mode 100644 index 00000000000..92f5897bf2e --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.ElasticsearchException; + +/** + * Thrown by {@link Diffable#readDiffAndApply(org.elasticsearch.common.io.stream.StreamInput)} method + */ +public class IncompatibleClusterStateVersionException extends ElasticsearchException { + public IncompatibleClusterStateVersionException(String msg) { + super(msg); + } + + public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { + super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); + } +} diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index bb7d332de4f..95c0ba7127e 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,10 +37,12 @@ import java.util.Set; /** * Represents current cluster level blocks to block dirty operations done against the cluster. */ -public class ClusterBlocks { +public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.of(), ImmutableMap.>of()); + public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK; + private final ImmutableSet global; private final ImmutableMap> indicesBlocks; @@ -203,6 +206,43 @@ public class ClusterBlocks { return new ClusterBlockException(builder.build()); } + @Override + public void writeTo(StreamOutput out) throws IOException { + writeBlockSet(global, out); + out.writeVInt(indicesBlocks.size()); + for (Map.Entry> entry : indicesBlocks.entrySet()) { + out.writeString(entry.getKey()); + writeBlockSet(entry.getValue(), out); + } + } + + private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { + out.writeVInt(blocks.size()); + for (ClusterBlock block : blocks) { + block.writeTo(out); + } + } + + @Override + public ClusterBlocks readFrom(StreamInput in) throws IOException { + ImmutableSet global = readBlockSet(in); + ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); + int size = in.readVInt(); + for (int j = 0; j < size; j++) { + indicesBuilder.put(in.readString().intern(), readBlockSet(in)); + } + return new ClusterBlocks(global, indicesBuilder.build()); + } + + private static ImmutableSet readBlockSet(StreamInput in) throws IOException { + ImmutableSet.Builder builder = ImmutableSet.builder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.add(ClusterBlock.readClusterBlock(in)); + } + return builder.build(); + } + static class ImmutableLevelHolder { static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.of(), ImmutableMap.>of()); @@ -313,38 +353,7 @@ public class ClusterBlocks { } public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException { - ImmutableSet global = readBlockSet(in); - ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); - int size = in.readVInt(); - for (int j = 0; j < size; j++) { - indicesBuilder.put(in.readString().intern(), readBlockSet(in)); - } - return new ClusterBlocks(global, indicesBuilder.build()); - } - - public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException { - writeBlockSet(blocks.global(), out); - out.writeVInt(blocks.indices().size()); - for (Map.Entry> entry : blocks.indices().entrySet()) { - out.writeString(entry.getKey()); - writeBlockSet(entry.getValue(), out); - } - } - - private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { - out.writeVInt(blocks.size()); - for (ClusterBlock block : blocks) { - block.writeTo(out); - } - } - - private static ImmutableSet readBlockSet(StreamInput in) throws IOException { - ImmutableSet.Builder builder = ImmutableSet.builder(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.add(ClusterBlock.readClusterBlock(in)); - } - return builder.build(); + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 008935ec026..0f7e55c8087 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +39,9 @@ import java.util.Set; /** * */ -public class AliasMetaData { +public class AliasMetaData extends AbstractDiffable { + + public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); private final String alias; @@ -146,6 +149,48 @@ public class AliasMetaData { return result; } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(alias()); + if (filter() != null) { + out.writeBoolean(true); + filter.writeTo(out); + } else { + out.writeBoolean(false); + } + if (indexRouting() != null) { + out.writeBoolean(true); + out.writeString(indexRouting()); + } else { + out.writeBoolean(false); + } + if (searchRouting() != null) { + out.writeBoolean(true); + out.writeString(searchRouting()); + } else { + out.writeBoolean(false); + } + + } + + @Override + public AliasMetaData readFrom(StreamInput in) throws IOException { + String alias = in.readString(); + CompressedString filter = null; + if (in.readBoolean()) { + filter = CompressedString.readCompressedString(in); + } + String indexRouting = null; + if (in.readBoolean()) { + indexRouting = in.readString(); + } + String searchRouting = null; + if (in.readBoolean()) { + searchRouting = in.readString(); + } + return new AliasMetaData(alias, filter, indexRouting, searchRouting); + } + public static class Builder { private final String alias; @@ -294,44 +339,12 @@ public class AliasMetaData { return builder.build(); } - public static void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { - out.writeString(aliasMetaData.alias()); - if (aliasMetaData.filter() != null) { - out.writeBoolean(true); - aliasMetaData.filter.writeTo(out); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.indexRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.indexRouting()); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.searchRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.searchRouting()); - } else { - out.writeBoolean(false); - } - + public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { + aliasMetaData.writeTo(out); } public static AliasMetaData readFrom(StreamInput in) throws IOException { - String alias = in.readString(); - CompressedString filter = null; - if (in.readBoolean()) { - filter = CompressedString.readCompressedString(in); - } - String indexRouting = null; - if (in.readBoolean()) { - indexRouting = in.readString(); - } - String searchRouting = null; - if (in.readBoolean()) { - searchRouting = in.readString(); - } - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index d6bcacf1615..fe76d0f3f2b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -24,6 +24,9 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; @@ -59,60 +62,54 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class IndexMetaData { +public class IndexMetaData implements Diffable { + public static final IndexMetaData PROTO = IndexMetaData.builder("") + .settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); - public interface Custom { + public interface Custom extends Diffable, ToXContent { String type(); - interface Factory { + Custom fromMap(Map map) throws IOException; - String type(); + Custom fromXContent(XContentParser parser) throws IOException; - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - T fromMap(Map map) throws IOException; - - T fromXContent(XContentParser parser) throws IOException; - - void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - /** - * Merges from first to second, with first being more important, i.e., if something exists in first and second, - * first will prevail. - */ - T merge(T first, T second); - } + /** + * Merges from this to another, with this being more important, i.e., if something exists in this and another, + * this will prevail. + */ + Custom mergeWith(Custom another); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.FACTORY); + registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new IllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -451,7 +448,9 @@ public class IndexMetaData { if (state != that.state) { return false; } - + if (!customs.equals(that.customs)) { + return false; + } return true; } @@ -465,6 +464,126 @@ public class IndexMetaData { return result; } + @Override + public Diff diff(IndexMetaData previousState) { + return new IndexMetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new IndexMetaDataDiff(in); + } + + private static class IndexMetaDataDiff implements Diff { + + private final String index; + private final long version; + private final State state; + private final Settings settings; + private final Diff> mappings; + private final Diff> aliases; + private Diff> customs; + + public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { + index = after.index; + version = after.version; + state = after.state; + settings = after.settings; + mappings = DiffableUtils.diff(before.mappings, after.mappings); + aliases = DiffableUtils.diff(before.aliases, after.aliases); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public IndexMetaDataDiff(StreamInput in) throws IOException { + index = in.readString(); + version = in.readLong(); + state = State.fromId(in.readByte()); + settings = ImmutableSettings.readSettingsFromStream(in); + mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO); + aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id); + ImmutableSettings.writeSettingsToStream(settings, out); + mappings.writeTo(out); + aliases.writeTo(out); + customs.writeTo(out); + } + + @Override + public IndexMetaData apply(IndexMetaData part) { + Builder builder = builder(index); + builder.version(version); + builder.state(state); + builder.settings(settings); + builder.mappings.putAll(mappings.apply(part.mappings)); + builder.aliases.putAll(aliases.apply(part.aliases)); + builder.customs.putAll(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public IndexMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.version(in.readLong()); + builder.state(State.fromId(in.readByte())); + builder.settings(readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); + builder.putMapping(mappingMd); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id()); + writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectCursor cursor : mappings.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -660,7 +779,7 @@ public class IndexMetaData { for (ObjectObjectCursor cursor : indexMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -707,12 +826,13 @@ public class IndexMetaData { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -741,47 +861,7 @@ public class IndexMetaData { } public static IndexMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.version(in.readLong()); - builder.state(State.fromId(in.readByte())); - builder.settings(readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - MappingMetaData mappingMd = MappingMetaData.readFrom(in); - builder.putMapping(mappingMd); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException { - out.writeString(indexMetaData.index()); - out.writeLong(indexMetaData.version()); - out.writeByte(indexMetaData.state().id()); - writeSettingsToStream(indexMetaData.settings(), out); - out.writeVInt(indexMetaData.mappings().size()); - for (ObjectCursor cursor : indexMetaData.mappings().values()) { - MappingMetaData.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.aliases().size()); - for (ObjectCursor cursor : indexMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexMetaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 582e008550d..54150ee6a1e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.Sets; -import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; @@ -42,7 +42,9 @@ import java.util.Set; /** * */ -public class IndexTemplateMetaData { +public class IndexTemplateMetaData extends AbstractDiffable { + + public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); private final String name; @@ -161,11 +163,57 @@ public class IndexTemplateMetaData { return result; } + @Override + public IndexTemplateMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.order(in.readInt()); + builder.template(in.readString()); + builder.settings(ImmutableSettings.readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeInt(order); + out.writeString(template); + ImmutableSettings.writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectObjectCursor cursor : mappings) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings"); static { - VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet()); + VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); } private String name; @@ -305,7 +353,7 @@ public class IndexTemplateMetaData { for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -347,12 +395,13 @@ public class IndexTemplateMetaData { } } else { // check if its a custom index metadata - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(currentFieldName); - if (factory == null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + IndexMetaData.Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -401,47 +450,7 @@ public class IndexTemplateMetaData { } public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.order(in.readInt()); - builder.template(in.readString()); - builder.settings(ImmutableSettings.readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException { - out.writeString(indexTemplateMetaData.name()); - out.writeInt(indexTemplateMetaData.order()); - out.writeString(indexTemplateMetaData.template()); - ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out); - out.writeVInt(indexTemplateMetaData.mappings().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.mappings()) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - out.writeVInt(indexTemplateMetaData.aliases().size()); - for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexTemplateMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { - out.writeString(cursor.key); - IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index f80c6072bfc..7225a43d5ef 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,8 +19,10 @@ package org.elasticsearch.cluster.metadata; +import com.google.common.collect.Maps; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; @@ -38,14 +40,18 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; +import static com.google.common.collect.Maps.newHashMap; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Mapping configuration for a type. */ -public class MappingMetaData { +public class MappingMetaData extends AbstractDiffable { + + public static final MappingMetaData PROTO = new MappingMetaData(); public static class Id { @@ -317,6 +323,15 @@ public class MappingMetaData { initMappers(withoutType); } + private MappingMetaData() { + this.type = ""; + try { + this.source = new CompressedString(""); + } catch (IOException ex) { + throw new IllegalStateException("Cannot create MappingMetaData prototype", ex); + } + } + private void initMappers(Map withoutType) { if (withoutType.containsKey("_id")) { String path = null; @@ -532,34 +547,35 @@ public class MappingMetaData { } } - public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws IOException { - out.writeString(mappingMd.type()); - mappingMd.source().writeTo(out); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type()); + source().writeTo(out); // id - if (mappingMd.id().hasPath()) { + if (id().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.id().path()); + out.writeString(id().path()); } else { out.writeBoolean(false); } // routing - out.writeBoolean(mappingMd.routing().required()); - if (mappingMd.routing().hasPath()) { + out.writeBoolean(routing().required()); + if (routing().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.routing().path()); + out.writeString(routing().path()); } else { out.writeBoolean(false); } // timestamp - out.writeBoolean(mappingMd.timestamp().enabled()); - out.writeOptionalString(mappingMd.timestamp().path()); - out.writeString(mappingMd.timestamp().format()); - out.writeOptionalString(mappingMd.timestamp().defaultTimestamp()); + out.writeBoolean(timestamp().enabled()); + out.writeOptionalString(timestamp().path()); + out.writeString(timestamp().format()); + out.writeOptionalString(timestamp().defaultTimestamp()); // TODO Remove the test in elasticsearch 2.0.0 if (out.getVersion().onOrAfter(Version.V_1_5_0)) { - out.writeOptionalBoolean(mappingMd.timestamp().ignoreMissing()); + out.writeOptionalBoolean(timestamp().ignoreMissing()); } - out.writeBoolean(mappingMd.hasParentField()); + out.writeBoolean(hasParentField()); } @Override @@ -588,7 +604,7 @@ public class MappingMetaData { return result; } - public static MappingMetaData readFrom(StreamInput in) throws IOException { + public MappingMetaData readFrom(StreamInput in) throws IOException { String type = in.readString(); CompressedString source = CompressedString.readCompressedString(in); // id diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index ea25a6d5256..97a1367d8e8 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -25,7 +25,9 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; +import org.elasticsearch.cluster.*; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.Nullable; @@ -55,7 +57,9 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class MetaData implements Iterable { +public class MetaData implements Iterable, Diffable { + + public static final MetaData PROTO = builder().build(); public static final String ALL = "_all"; @@ -67,60 +71,51 @@ public class MetaData implements Iterable { GATEWAY, /* Custom metadata should be stored as part of a snapshot */ - SNAPSHOT; + SNAPSHOT } public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); - public interface Custom { + public interface Custom extends Diffable, ToXContent { - abstract class Factory { + String type(); - public abstract String type(); + Custom fromXContent(XContentParser parser) throws IOException; - public abstract T readFrom(StreamInput in) throws IOException; - - public abstract void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - public abstract T fromXContent(XContentParser parser) throws IOException; - - public abstract void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - public EnumSet context() { - return API_ONLY; - } - } + EnumSet context(); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY); - registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY); - registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY); + registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO); + registerPrototype(SnapshotMetaData.TYPE, SnapshotMetaData.PROTO); + registerPrototype(RestoreMetaData.TYPE, RestoreMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new IllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } @@ -644,14 +639,22 @@ public class MetaData implements Iterable { /** * Translates the provided indices or aliases, eventually containing wildcard expressions, into actual indices. * - * @param indicesOptions how the aliases or indices need to be resolved to concrete indices + * @param indicesOptions how the aliases or indices need to be resolved to concrete indices * @param aliasesOrIndices the aliases or indices to be resolved to concrete indices * @return the obtained concrete indices +<<<<<<< HEAD * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options * don't allow such a case. * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options don't allow such a case. +======= + * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options + * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options + * don't allow such a case. + * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options don't allow such a case. +>>>>>>> Add support for cluster state diffs */ public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException { if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { @@ -1139,14 +1142,14 @@ public class MetaData implements Iterable { // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { if (!cursor.value.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; for (ObjectObjectCursor cursor : metaData2.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -1154,6 +1157,129 @@ public class MetaData implements Iterable { return true; } + @Override + public Diff diff(MetaData previousState) { + return new MetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new MetaDataDiff(in); + } + + private static class MetaDataDiff implements Diff { + + private long version; + + private String uuid; + + private Settings transientSettings; + private Settings persistentSettings; + private Diff> indices; + private Diff> templates; + private Diff> customs; + + + public MetaDataDiff(MetaData before, MetaData after) { + uuid = after.uuid; + version = after.version; + transientSettings = after.transientSettings; + persistentSettings = after.persistentSettings; + indices = DiffableUtils.diff(before.indices, after.indices); + templates = DiffableUtils.diff(before.templates, after.templates); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public MetaDataDiff(StreamInput in) throws IOException { + uuid = in.readString(); + version = in.readLong(); + transientSettings = ImmutableSettings.readSettingsFromStream(in); + persistentSettings = ImmutableSettings.readSettingsFromStream(in); + indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO); + templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uuid); + out.writeLong(version); + ImmutableSettings.writeSettingsToStream(transientSettings, out); + ImmutableSettings.writeSettingsToStream(persistentSettings, out); + indices.writeTo(out); + templates.writeTo(out); + customs.writeTo(out); + } + + @Override + public MetaData apply(MetaData part) { + Builder builder = builder(); + builder.uuid(uuid); + builder.version(version); + builder.transientSettings(transientSettings); + builder.persistentSettings(persistentSettings); + builder.indices(indices.apply(part.indices)); + builder.templates(templates.apply(part.templates)); + builder.customs(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public MetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.transientSettings(readSettingsFromStream(in)); + builder.persistentSettings(readSettingsFromStream(in)); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexMetaData.Builder.readFrom(in), false); + } + size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexTemplateMetaData.Builder.readFrom(in)); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeString(uuid); + writeSettingsToStream(transientSettings, out); + writeSettingsToStream(persistentSettings, out); + out.writeVInt(indices.size()); + for (IndexMetaData indexMetaData : this) { + indexMetaData.writeTo(out); + } + out.writeVInt(templates.size()); + for (ObjectCursor cursor : templates.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -1225,6 +1351,11 @@ public class MetaData implements Iterable { return this; } + public Builder indices(ImmutableOpenMap indices) { + this.indices.putAll(indices); + return this; + } + public Builder put(IndexTemplateMetaData.Builder template) { return put(template.build()); } @@ -1239,6 +1370,11 @@ public class MetaData implements Iterable { return this; } + public Builder templates(ImmutableOpenMap templates) { + this.templates.putAll(templates); + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -1253,6 +1389,11 @@ public class MetaData implements Iterable { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); @@ -1305,6 +1446,11 @@ public class MetaData implements Iterable { return this; } + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Builder generateUuidIfNeeded() { if (uuid.equals("_na_")) { uuid = Strings.randomBase64UUID(); @@ -1363,10 +1509,10 @@ public class MetaData implements Iterable { } for (ObjectObjectCursor cursor : metaData.customs()) { - Custom.Factory factory = lookupFactorySafe(cursor.key); - if (factory.context().contains(context)) { + Custom proto = lookupPrototypeSafe(cursor.key); + if (proto.context().contains(context)) { builder.startObject(cursor.key); - factory.toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -1410,12 +1556,13 @@ public class MetaData implements Iterable { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token.isValue()) { @@ -1430,46 +1577,7 @@ public class MetaData implements Iterable { } public static MetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - builder.uuid = in.readString(); - builder.transientSettings(readSettingsFromStream(in)); - builder.persistentSettings(readSettingsFromStream(in)); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexMetaData.Builder.readFrom(in), false); - } - size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexTemplateMetaData.Builder.readFrom(in)); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(MetaData metaData, StreamOutput out) throws IOException { - out.writeLong(metaData.version); - out.writeString(metaData.uuid); - writeSettingsToStream(metaData.transientSettings(), out); - writeSettingsToStream(metaData.persistentSettings(), out); - out.writeVInt(metaData.indices.size()); - for (IndexMetaData indexMetaData : metaData) { - IndexMetaData.Builder.writeTo(indexMetaData, out); - } - out.writeVInt(metaData.templates.size()); - for (ObjectCursor cursor : metaData.templates.values()) { - IndexTemplateMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(metaData.customs().size()); - for (ObjectObjectCursor cursor : metaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 9fcb5182180..732561f66f1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -272,7 +272,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (existing == null) { customs.put(type, custom); } else { - IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom); + IndexMetaData.Custom merged = existing.mergeWith(custom); customs.put(type, merged); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 81b11fc14b1..51cd5db086b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; @@ -39,11 +41,11 @@ import java.util.Map; /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetaData implements MetaData.Custom { +public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "repositories"; - public static final Factory FACTORY = new Factory(); + public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); private final ImmutableList repositories; @@ -80,122 +82,132 @@ public class RepositoriesMetaData implements MetaData.Custom { return null; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoriesMetaData that = (RepositoriesMetaData) o; + + return repositories.equals(that.repositories); + + } + + @Override + public int hashCode() { + return repositories.hashCode(); + } + /** - * Repository metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; + /** + * {@inheritDoc} + */ + @Override + public Custom readFrom(StreamInput in) throws IOException { + RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; + for (int i = 0; i < repository.length; i++) { + repository[i] = RepositoryMetaData.readFrom(in); } + return new RepositoriesMetaData(repository); + } - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData readFrom(StreamInput in) throws IOException { - RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; - for (int i = 0; i < repository.length; i++) { - repository[i] = RepositoryMetaData.readFrom(in); - } - return new RepositoriesMetaData(repository); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.repositories().size()); - for (RepositoryMetaData repository : repositories.repositories()) { - repository.writeTo(out); - } - } - - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - List repository = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String name = parser.currentName(); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); - } - String type = null; - Settings settings = ImmutableSettings.EMPTY; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("type".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); - } - type = parser.text(); - } else if ("settings".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); - } - settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); - } - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); - } - } - if (type == null) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); - } - repository.add(new RepositoryMetaData(name, type, settings)); - } else { - throw new ElasticsearchParseException("failed to parse repositories"); - } - } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - for (RepositoryMetaData repository : customIndexMetaData.repositories()) { - toXContent(repository, builder, params); - } - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - - /** - * Serializes information about a single repository - * - * @param repository repository metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("type", repository.type()); - builder.startObject("settings"); - for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { - builder.field(settingEntry.getKey(), settingEntry.getValue()); - } - builder.endObject(); - - builder.endObject(); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(repositories.size()); + for (RepositoryMetaData repository : repositories) { + repository.writeTo(out); } } + /** + * {@inheritDoc} + */ + @Override + public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + List repository = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String name = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); + } + String type = null; + Settings settings = ImmutableSettings.EMPTY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("type".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); + } + type = parser.text(); + } else if ("settings".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); + } + settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); + } + } + if (type == null) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); + } + repository.add(new RepositoryMetaData(name, type, settings)); + } else { + throw new ElasticsearchParseException("failed to parse repositories"); + } + } + return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + for (RepositoryMetaData repository : repositories) { + toXContent(repository, builder, params); + } + return builder; + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + + /** + * Serializes information about a single repository + * + * @param repository repository metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public static void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("type", repository.type()); + builder.startObject("settings"); + for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { + builder.field(settingEntry.getKey(), settingEntry.getValue()); + } + builder.endObject(); + + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java index ea50b30ba88..a283f1f43c1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java @@ -99,4 +99,25 @@ public class RepositoryMetaData { out.writeString(type); ImmutableSettings.writeSettingsToStream(settings, out); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoryMetaData that = (RepositoryMetaData) o; + + if (!name.equals(that.name)) return false; + if (!type.equals(that.type)) return false; + return settings.equals(that.settings); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + settings.hashCode(); + return result; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 642136d7b7e..51fd5e0514a 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,16 +30,17 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; /** * Meta data about restore processes that are currently executing */ -public class RestoreMetaData implements MetaData.Custom { +public class RestoreMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "restore"; - public static final Factory FACTORY = new Factory(); + public static final RestoreMetaData PROTO = new RestoreMetaData(); private final ImmutableList entries; @@ -394,124 +396,122 @@ public class RestoreMetaData implements MetaData.Custom { } /** - * Restore metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; - } - - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); - builder.put(shardId, shardState); - } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new RestoreMetaData(entries); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - shardEntry.getValue().writeTo(out); - } + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); + builder.put(shardId, shardState); } + entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); } + return new RestoreMetaData(entries); + } - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - /** - * Serializes single restore operation - * - * @param entry restore operation metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("snapshot", entry.snapshotId().getSnapshot()); - builder.field("repository", entry.snapshotId().getRepository()); - builder.field("state", entry.state()); - builder.startArray("indices"); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + shardEntry.getValue().writeTo(out); } - builder.endArray(); - builder.startArray("shards"); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardRestoreStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field("index", shardId.getIndex()); - builder.field("shard", shardId.getId()); - builder.field("state", status.state()); - } - builder.endObject(); - } - } - - builder.endArray(); - builder.endObject(); } } + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray("snapshots"); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + /** + * Serializes single restore operation + * + * @param entry restore operation metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("snapshot", entry.snapshotId().getSnapshot()); + builder.field("repository", entry.snapshotId().getRepository()); + builder.field("state", entry.state()); + builder.startArray("indices"); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.startArray("shards"); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardRestoreStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field("index", shardId.getIndex()); + builder.field("shard", shardId.getId()); + builder.field("state", status.state()); + } + builder.endObject(); + } + } + + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index b1bcc92b8bd..b23c58710a0 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -30,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -37,10 +40,10 @@ import static com.google.common.collect.Maps.newHashMap; /** * Meta data about snapshots that are currently executing */ -public class SnapshotMetaData implements MetaData.Custom { +public class SnapshotMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "snapshots"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotMetaData PROTO = new SnapshotMetaData(); @Override public boolean equals(Object o) { @@ -329,123 +332,123 @@ public class SnapshotMetaData implements MetaData.Custom { return null; } + @Override + public String type() { + return TYPE; + } - public static class Factory extends MetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public SnapshotMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - boolean includeGlobalState = in.readBoolean(); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - long startTime = in.readLong(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - String nodeId = in.readOptionalString(); - State shardState = State.fromValue(in.readByte()); - builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); - } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + @Override + public SnapshotMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + boolean includeGlobalState = in.readBoolean(); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new SnapshotMetaData(entries); - } - - @Override - public void writeTo(SnapshotMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeBoolean(entry.includeGlobalState()); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeLong(entry.startTime()); - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - out.writeOptionalString(shardEntry.getValue().nodeId()); - out.writeByte(shardEntry.getValue().state().value()); - } + long startTime = in.readLong(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + String nodeId = in.readOptionalString(); + State shardState = State.fromValue(in.readByte()); + builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } + entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); } + return new SnapshotMetaData(entries); + } - @Override - public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - static final class Fields { - static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); - static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); - static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); - static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); - static final XContentBuilderString STATE = new XContentBuilderString("state"); - static final XContentBuilderString INDICES = new XContentBuilderString("indices"); - static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); - static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); - static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString NODE = new XContentBuilderString("node"); - } - - @Override - public void toXContent(SnapshotMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeBoolean(entry.includeGlobalState()); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); - builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); - builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); - builder.field(Fields.STATE, entry.state()); - builder.startArray(Fields.INDICES); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeLong(entry.startTime()); + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + out.writeOptionalString(shardEntry.getValue().nodeId()); + out.writeByte(shardEntry.getValue().state().value()); } - builder.endArray(); - builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); - builder.startArray(Fields.SHARDS); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardSnapshotStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field(Fields.INDEX, shardId.getIndex()); - builder.field(Fields.SHARD, shardId.getId()); - builder.field(Fields.STATE, status.state()); - builder.field(Fields.NODE, status.nodeId()); - } - builder.endObject(); - } - } - builder.endArray(); - builder.endObject(); } } + @Override + public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + static final class Fields { + static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); + static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); + static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); + static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString STATE = new XContentBuilderString("state"); + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); + static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString INDEX = new XContentBuilderString("index"); + static final XContentBuilderString SHARD = new XContentBuilderString("shard"); + static final XContentBuilderString NODE = new XContentBuilderString("node"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray(Fields.SNAPSHOTS); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); + builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); + builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.STATE, entry.state()); + builder.startArray(Fields.INDICES); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); + builder.startArray(Fields.SHARDS); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardSnapshotStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field(Fields.INDEX, shardId.getIndex()); + builder.field(Fields.SHARD, shardId.getId()); + builder.field(Fields.STATE, status.state()); + builder.field(Fields.NODE, status.nodeId()); + } + builder.endObject(); + } + } + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 2831af8183d..8692e5fb006 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -44,9 +45,10 @@ import static com.google.common.collect.Lists.newArrayList; * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. */ -public class DiscoveryNodes implements Iterable { +public class DiscoveryNodes extends AbstractDiffable implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); + public static final DiscoveryNodes PROTO = EMPTY_NODES; private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; @@ -567,6 +569,44 @@ public class DiscoveryNodes implements Iterable { } } + public void writeTo(StreamOutput out) throws IOException { + if (masterNodeId == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(masterNodeId); + } + out.writeVInt(nodes.size()); + for (DiscoveryNode node : this) { + node.writeTo(out); + } + } + + public DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + Builder builder = new Builder(); + if (in.readBoolean()) { + builder.masterNodeId(in.readString()); + } + if (localNode != null) { + builder.localNodeId(localNode.id()); + } + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + DiscoveryNode node = DiscoveryNode.readNode(in); + if (localNode != null && node.id().equals(localNode.id())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + builder.put(node); + } + return builder.build(); + } + + @Override + public DiscoveryNodes readFrom(StreamInput in) throws IOException { + return readFrom(in, localNode()); + } + public static Builder builder() { return new Builder(); } @@ -631,37 +671,8 @@ public class DiscoveryNodes implements Iterable { return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion); } - public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException { - if (nodes.masterNodeId() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(nodes.masterNodeId); - } - out.writeVInt(nodes.size()); - for (DiscoveryNode node : nodes) { - node.writeTo(out); - } - } - public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - Builder builder = new Builder(); - if (in.readBoolean()) { - builder.masterNodeId(in.readString()); - } - if (localNode != null) { - builder.localNodeId(localNode.id()); - } - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - DiscoveryNode node = DiscoveryNode.readNode(in); - if (localNode != null && node.id().equals(localNode.id())) { - // reuse the same instance of our address and local node id for faster equality - node = localNode; - } - builder.put(node); - } - return builder.build(); + return PROTO.readFrom(in, localNode); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index de4ed5434e1..6aaa260c4b5 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -55,7 +56,9 @@ import static com.google.common.collect.Lists.newArrayList; * represented as {@link ShardRouting}. *

    */ -public class IndexRoutingTable implements Iterable { +public class IndexRoutingTable extends AbstractDiffable implements Iterable { + + public static final IndexRoutingTable PROTO = builder("").build(); private final String index; private final ShardShuffler shuffler; @@ -314,9 +317,51 @@ public class IndexRoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexRoutingTable that = (IndexRoutingTable) o; + + if (!index.equals(that.index)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = index.hashCode(); + result = 31 * result + shards.hashCode(); + return result; + } + public void validate() throws RoutingValidationException { } + @Override + public IndexRoutingTable readFrom(StreamInput in) throws IOException { + String index = in.readString(); + Builder builder = new Builder(index); + + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shards.size()); + for (IndexShardRoutingTable indexShard : this) { + IndexShardRoutingTable.Builder.writeToThin(indexShard, out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -338,30 +383,7 @@ public class IndexRoutingTable implements Iterable { * @throws IOException if something happens during read */ public static IndexRoutingTable readFrom(StreamInput in) throws IOException { - String index = in.readString(); - Builder builder = new Builder(index); - - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); - } - - return builder.build(); - } - - /** - * Writes an {@link IndexRoutingTable} to a {@link StreamOutput}. - * - * @param index {@link IndexRoutingTable} to write - * @param out {@link StreamOutput} to write to - * @throws IOException if something happens during write - */ - public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException { - out.writeString(index.index()); - out.writeVInt(index.shards.size()); - for (IndexShardRoutingTable indexShard : index) { - IndexShardRoutingTable.Builder.writeToThin(indexShard, out); - } + return PROTO.readFrom(in); } /** diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 00e50b76129..2371b96f5b0 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -347,6 +347,28 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexShardRoutingTable that = (IndexShardRoutingTable) o; + + if (primaryAllocatedPostApi != that.primaryAllocatedPostApi) return false; + if (!shardId.equals(that.shardId)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = shardId.hashCode(); + result = 31 * result + shards.hashCode(); + result = 31 * result + (primaryAllocatedPostApi ? 1 : 0); + return result; + } + /** * Returns true iff all shards in the routing table are started otherwise false */ diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 9f1b5db6c6b..25a8bac2f88 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.google.common.collect.*; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,7 +44,9 @@ import static com.google.common.collect.Maps.newHashMap; * * @see IndexRoutingTable */ -public class RoutingTable implements Iterable { +public class RoutingTable implements Iterable, Diffable { + + public static RoutingTable PROTO = builder().build(); public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); @@ -254,6 +256,66 @@ public class RoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public Diff diff(RoutingTable previousState) { + return new RoutingTableDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new RoutingTableDiff(in); + } + + @Override + public RoutingTable readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); + builder.add(index); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeVInt(indicesRouting.size()); + for (IndexRoutingTable index : indicesRouting.values()) { + index.writeTo(out); + } + } + + private static class RoutingTableDiff implements Diff { + + private final long version; + + private final Diff> indicesRouting; + + public RoutingTableDiff(RoutingTable before, RoutingTable after) { + version = after.version; + indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting); + } + + public RoutingTableDiff(StreamInput in) throws IOException { + version = in.readLong(); + indicesRouting = DiffableUtils.readImmutableMapDiff(in, IndexRoutingTable.PROTO); + } + + @Override + public RoutingTable apply(RoutingTable part) { + return new RoutingTable(version, indicesRouting.apply(part.indicesRouting)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + indicesRouting.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -403,6 +465,11 @@ public class RoutingTable implements Iterable { return this; } + public Builder indicesRouting(ImmutableMap indicesRouting) { + this.indicesRouting.putAll(indicesRouting); + return this; + } + public Builder remove(String index) { indicesRouting.remove(index); return this; @@ -422,23 +489,7 @@ public class RoutingTable implements Iterable { } public static RoutingTable readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); - builder.add(index); - } - - return builder.build(); - } - - public static void writeTo(RoutingTable table, StreamOutput out) throws IOException { - out.writeLong(table.version); - out.writeVInt(table.indicesRouting.size()); - for (IndexRoutingTable index : table.indicesRouting.values()) { - IndexRoutingTable.Builder.writeTo(index, out); - } + return PROTO.readFrom(in); } } @@ -450,5 +501,4 @@ public class RoutingTable implements Iterable { return sb.toString(); } - } diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index 17350ba6c04..b1823e5d74e 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -401,7 +401,7 @@ public class InternalClusterService extends AbstractLifecycleComponent { + /** + * Reads a copy of an object with the same type form the stream input + * + * The caller object remains unchanged. + */ + T readFrom(StreamInput in) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java new file mode 100644 index 00000000000..9025315dc43 --- /dev/null +++ b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.io.stream; + +import java.io.IOException; + +public interface Writeable extends StreamableReader { + + /** + * Writes the current object into the output stream out + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/discovery/Discovery.java b/src/main/java/org/elasticsearch/discovery/Discovery.java index dfd51e6348f..36b8e5da6f5 100644 --- a/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -59,7 +60,7 @@ public interface Discovery extends LifecycleComponent { * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ - void publish(ClusterState clusterState, AckListener ackListener); + void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener); public static interface AckListener { void onNodeAck(DiscoveryNode node, @Nullable Throwable t); diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index 1f7207abd5b..a95c313447b 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -21,6 +21,7 @@ package org.elasticsearch.discovery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -132,9 +133,9 @@ public class DiscoveryService extends AbstractLifecycleComponent implem private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); + private volatile ClusterState lastProcessedClusterState; + @Inject public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) { @@ -273,7 +279,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { if (!master) { throw new IllegalStateException("Shouldn't publish state when not master"); } @@ -286,7 +292,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } nodesToPublishTo.add(localDiscovery.localNode); } - publish(members, clusterState, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } } @@ -299,17 +305,47 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem return members.toArray(new LocalDiscovery[members.size()]); } - private void publish(LocalDiscovery[] members, ClusterState clusterState, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { + private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { try { // we do the marshaling intentionally, to check it works well... - final byte[] clusterStateBytes = Builder.toBytes(clusterState); + byte[] clusterStateBytes = null; + byte[] clusterStateDiffBytes = null; + ClusterState clusterState = clusterChangedEvent.state(); for (final LocalDiscovery discovery : members) { if (discovery.master) { continue; } - final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + ClusterState newNodeSpecificClusterState = null; + synchronized (this) { + // we do the marshaling intentionally, to check it works well... + // check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time + if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) { + // both conditions are true - which means we can try sending cluster state as diffs + if (clusterStateDiffBytes == null) { + Diff diff = clusterState.diff(clusterChangedEvent.previousState()); + BytesStreamOutput os = new BytesStreamOutput(); + diff.writeTo(os); + clusterStateDiffBytes = os.bytes().toBytes(); + } + try { + newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(new BytesStreamInput(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); + logger.debug("sending diff cluster state version with size {} to [{}]", clusterStateDiffBytes.length, discovery.localNode.getName()); + } catch (IncompatibleClusterStateVersionException ex) { + logger.warn("incompatible cluster state version - resending complete cluster state", ex); + } + } + if (newNodeSpecificClusterState == null) { + if (clusterStateBytes == null) { + clusterStateBytes = Builder.toBytes(clusterState); + } + newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + } + discovery.lastProcessedClusterState = newNodeSpecificClusterState; + } + final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; + nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); // ignore cluster state messages that do not include "me", not in the game yet... if (nodeSpecificClusterState.nodes().localNode() != null) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 0defcb7edd5..5bec60abf04 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -22,7 +22,6 @@ package org.elasticsearch.discovery.zen; import com.google.common.base.Objects; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -329,12 +328,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Override - public void publish(ClusterState clusterState, AckListener ackListener) { - if (!clusterState.getNodes().localNodeMaster()) { + public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { + if (!clusterChangedEvent.state().getNodes().localNodeMaster()) { throw new IllegalStateException("Shouldn't publish state when not master"); } - nodesFD.updateNodesAndPing(clusterState); - publishClusterState.publish(clusterState, ackListener); + nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + publishClusterState.publish(clusterChangedEvent, ackListener); } /** diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index fd1ba85c25c..c4ad8895e79 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -21,8 +21,12 @@ package org.elasticsearch.discovery.zen.publish; import com.google.common.collect.Maps; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; @@ -40,10 +44,13 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; +import java.io.IOException; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; /** * @@ -83,73 +90,43 @@ public class PublishClusterStateAction extends AbstractComponent { transportService.removeHandler(ACTION_NAME); } - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { - Set nodesToPublishTo = new HashSet<>(clusterState.nodes().size()); + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { + Set nodesToPublishTo = new HashSet<>(clusterChangedEvent.state().nodes().size()); DiscoveryNode localNode = nodesProvider.nodes().localNode(); - for (final DiscoveryNode node : clusterState.nodes()) { + for (final DiscoveryNode node : clusterChangedEvent.state().nodes()) { if (node.equals(localNode)) { continue; } nodesToPublishTo.add(node); } - publish(clusterState, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(clusterChangedEvent, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } - private void publish(final ClusterState clusterState, final Set nodesToPublishTo, + private void publish(final ClusterChangedEvent clusterChangedEvent, final Set nodesToPublishTo, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { Map serializedStates = Maps.newHashMap(); + Map serializedDiffs = Maps.newHashMap(); + final ClusterState clusterState = clusterChangedEvent.state(); + final ClusterState previousState = clusterChangedEvent.previousState(); final AtomicBoolean timedOutWaitingForNodes = new AtomicBoolean(false); final TimeValue publishTimeout = discoverySettings.getPublishTimeout(); + final boolean sendFullVersion = !discoverySettings.getPublishDiff() || previousState == null; + Diff diff = null; for (final DiscoveryNode node : nodesToPublishTo) { // try and serialize the cluster state once (or per version), so we don't serialize it // per node when we send it over the wire, compress it while we are at it... - BytesReference bytes = serializedStates.get(node.version()); - if (bytes == null) { - try { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(clusterState, stream); - stream.close(); - bytes = bStream.bytes(); - serializedStates.put(node.version(), bytes); - } catch (Throwable e) { - logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); - publishResponseHandler.onFailure(node, e); - continue; + // we don't send full version if node didn't exist in the previous version of cluster state + if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) { + sendFullClusterState(clusterState, serializedStates, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + if (diff == null) { + diff = clusterState.diff(previousState); } - } - try { - TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); - // no need to put a timeout on the options here, because we want the response to eventually be received - // and not log an error if it arrives after the timeout - transportService.sendRequest(node, ACTION_NAME, - new BytesTransportRequest(bytes, node.version()), - options, // no need to compress, we already compressed the bytes - - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - - @Override - public void handleResponse(TransportResponse.Empty response) { - if (timedOutWaitingForNodes.get()) { - logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); - } - publishResponseHandler.onResponse(node); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("failed to send cluster state to {}", exp, node); - publishResponseHandler.onFailure(node, exp); - } - }); - } catch (Throwable t) { - logger.debug("error sending cluster state to {}", t, node); - publishResponseHandler.onFailure(node, t); + sendClusterStateDiff(clusterState, diff, serializedDiffs, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); } } @@ -171,7 +148,107 @@ public class PublishClusterStateAction extends AbstractComponent { } } + private void sendFullClusterState(ClusterState clusterState, @Nullable Map serializedStates, + DiscoveryNode node, AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = null; + if (serializedStates != null) { + bytes = serializedStates.get(node.version()); + } + if (bytes == null) { + try { + bytes = serializeFullClusterState(clusterState, node.version()); + if (serializedStates != null) { + serializedStates.put(node.version(), bytes); + } + } catch (Throwable e) { + logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, false); + } + + private void sendClusterStateDiff(ClusterState clusterState, Diff diff, Map serializedDiffs, DiscoveryNode node, + AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = serializedDiffs.get(node.version()); + if (bytes == null) { + try { + bytes = serializeDiffClusterState(diff, node.version()); + serializedDiffs.put(node.version(), bytes); + } catch (Throwable e) { + logger.warn("failed to serialize diff of cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, true); + } + + private void publishClusterStateToNode(final ClusterState clusterState, BytesReference bytes, + final DiscoveryNode node, final AtomicBoolean timedOutWaitingForNodes, + final TimeValue publishTimeout, + final BlockingClusterStatePublishResponseHandler publishResponseHandler, + final boolean sendDiffs) { + try { + TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); + // no need to put a timeout on the options here, because we want the response to eventually be received + // and not log an error if it arrives after the timeout + transportService.sendRequest(node, ACTION_NAME, + new BytesTransportRequest(bytes, node.version()), + options, // no need to compress, we already compressed the bytes + + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + + @Override + public void handleResponse(TransportResponse.Empty response) { + if (timedOutWaitingForNodes.get()) { + logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); + } + publishResponseHandler.onResponse(node); + } + + @Override + public void handleException(TransportException exp) { + if (sendDiffs && exp.unwrapCause() instanceof IncompatibleClusterStateVersionException) { + logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); + sendFullClusterState(clusterState, null, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + logger.debug("failed to send cluster state to {}", exp, node); + publishResponseHandler.onFailure(node, exp); + } + } + }); + } catch (Throwable t) { + logger.warn("error sending cluster state to {}", t, node); + publishResponseHandler.onFailure(node, t); + } + } + + public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(true); + clusterState.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + + public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(false); + diff.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + private class PublishClusterStateRequestHandler implements TransportRequestHandler { + private ClusterState lastSeenClusterState; @Override public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { @@ -183,11 +260,24 @@ public class PublishClusterStateAction extends AbstractComponent { in = request.bytes().streamInput(); } in.setVersion(request.version()); - ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); - clusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - logger.debug("received cluster state version {}", clusterState.version()); + synchronized (this) { + // If true we received full cluster state - otherwise diffs + if (in.readBoolean()) { + lastSeenClusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + logger.debug("received full cluster state version {} with size {}", lastSeenClusterState.version(), request.bytes().length()); + } else if (lastSeenClusterState != null) { + Diff diff = lastSeenClusterState.readDiffFrom(in); + lastSeenClusterState = diff.apply(lastSeenClusterState); + logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length()); + } else { + logger.debug("received diff for but don't have any local cluster state - requesting full state"); + throw new IncompatibleClusterStateVersionException("have no local cluster state"); + } + lastSeenClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); + } + try { - listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { + listener.onNewClusterState(lastSeenClusterState, new NewClusterStateListener.NewStateProcessed() { @Override public void onNewClusterStateProcessed() { try { @@ -207,7 +297,7 @@ public class PublishClusterStateAction extends AbstractComponent { } }); } catch (Exception e) { - logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version()); + logger.warn("unexpected error while processing cluster state version [{}]", e, lastSeenClusterState.version()); try { channel.sendResponse(e); } catch (Throwable e1) { diff --git a/src/main/java/org/elasticsearch/gateway/Gateway.java b/src/main/java/org/elasticsearch/gateway/Gateway.java index cd15bccdc4a..139b5763489 100644 --- a/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.indices.IndicesService; + import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 43dec7edb51..5538ef6d043 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -198,7 +198,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode.writeTo(out); out.writeVInt(indices.length); for (IndexMetaData indexMetaData : indices) { - IndexMetaData.Builder.writeTo(indexMetaData, out); + indexMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 36372009f87..900a2e7ffc7 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -221,7 +221,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA out.writeBoolean(false); } else { out.writeBoolean(true); - MetaData.Builder.writeTo(metaData, out); + metaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java index be4e1b4e3f3..85b46925b5f 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java @@ -58,7 +58,7 @@ public class RestGetRepositoriesAction extends BaseRestHandler { public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { builder.startObject(); for (RepositoryMetaData repositoryMetaData : response.repositories()) { - RepositoriesMetaData.FACTORY.toXContent(repositoryMetaData, builder, request); + RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index 7e4e56710b7..dd1dca34bbc 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -146,7 +146,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { - IndexWarmersMetaData.FACTORY.toXContent(warmer, builder, params); + IndexWarmersMetaData.toXContent(warmer, builder, params); } } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index 7023eecedd4..be83ccbe4b5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -72,7 +72,7 @@ public class RestGetWarmerAction extends BaseRestHandler { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { - IndexWarmersMetaData.FACTORY.toXContent(warmerEntry, builder, request); + IndexWarmersMetaData.toXContent(warmerEntry, builder, request); } builder.endObject(); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java index de56f823eac..ef1ef44ffb9 100644 --- a/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ b/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java @@ -22,7 +22,9 @@ package org.elasticsearch.search.warmer; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -33,16 +35,33 @@ import org.elasticsearch.common.xcontent.*; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; /** */ -public class IndexWarmersMetaData implements IndexMetaData.Custom { +public class IndexWarmersMetaData extends AbstractDiffable implements IndexMetaData.Custom { public static final String TYPE = "warmers"; - public static final Factory FACTORY = new Factory(); + public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexWarmersMetaData that = (IndexWarmersMetaData) o; + + return entries.equals(that.entries); + + } + + @Override + public int hashCode() { + return entries.hashCode(); + } public static class Entry { private final String name; @@ -74,6 +93,29 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { public Boolean queryCache() { return this.queryCache; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Entry entry = (Entry) o; + + if (!name.equals(entry.name)) return false; + if (!Arrays.equals(types, entry.types)) return false; + if (!source.equals(entry.source)) return false; + return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + Arrays.hashCode(types); + result = 31 * result + source.hashCode(); + result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0); + return result; + } } private final ImmutableList entries; @@ -92,149 +134,143 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { return TYPE; } - public static class Factory implements IndexMetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; + @Override + public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + String name = in.readString(); + String[] types = in.readStringArray(); + BytesReference source = null; + if (in.readBoolean()) { + source = in.readBytesReference(); + } + Boolean queryCache; + queryCache = in.readOptionalBoolean(); + entries[i] = new Entry(name, types, queryCache, source); } + return new IndexWarmersMetaData(entries); + } - @Override - public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - String name = in.readString(); - String[] types = in.readStringArray(); - BytesReference source = null; - if (in.readBoolean()) { - source = in.readBytesReference(); - } - Boolean queryCache = null; - queryCache = in.readOptionalBoolean(); - entries[i] = new Entry(name, types, queryCache, source); - } - return new IndexWarmersMetaData(entries); - } - - @Override - public void writeTo(IndexWarmersMetaData warmers, StreamOutput out) throws IOException { - out.writeVInt(warmers.entries().size()); - for (Entry entry : warmers.entries()) { - out.writeString(entry.name()); - out.writeStringArray(entry.types()); - if (entry.source() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeBytesReference(entry.source()); - } - out.writeOptionalBoolean(entry.queryCache()); - } - } - - @Override - public IndexWarmersMetaData fromMap(Map map) throws IOException { - // if it starts with the type, remove it - if (map.size() == 1 && map.containsKey(TYPE)) { - map = (Map) map.values().iterator().next(); - } - XContentBuilder builder = XContentFactory.smileBuilder().map(map); - try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { - // move to START_OBJECT - parser.nextToken(); - return fromXContent(parser); - } - } - - @Override - public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { - // we get here after we are at warmers token - String currentFieldName = null; - XContentParser.Token token; - List entries = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - String name = currentFieldName; - List types = new ArrayList<>(2); - BytesReference source = null; - Boolean queryCache = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if ("types".equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - types.add(parser.text()); - } - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("source".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); - source = builder.bytes(); - } - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - if ("source".equals(currentFieldName)) { - source = new BytesArray(parser.binaryValue()); - } - } else if (token.isValue()) { - if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { - queryCache = parser.booleanValue(); - } - } - } - entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - @Override - public void toXContent(IndexWarmersMetaData warmers, XContentBuilder builder, ToXContent.Params params) throws IOException { - //No need, IndexMetaData already writes it - //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (Entry entry : warmers.entries()) { - toXContent(entry, builder, params); - } - //No need, IndexMetaData already writes it - //builder.endObject(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - boolean binary = params.paramAsBoolean("binary", false); - builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("types", entry.types()); - if (entry.queryCache() != null) { - builder.field("queryCache", entry.queryCache()); - } - builder.field("source"); - if (binary) { - builder.value(entry.source()); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries().size()); + for (Entry entry : entries()) { + out.writeString(entry.name()); + out.writeStringArray(entry.types()); + if (entry.source() == null) { + out.writeBoolean(false); } else { - Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); - builder.map(mapping); + out.writeBoolean(true); + out.writeBytesReference(entry.source()); } - builder.endObject(); - } - - @Override - public IndexWarmersMetaData merge(IndexWarmersMetaData first, IndexWarmersMetaData second) { - List entries = Lists.newArrayList(); - entries.addAll(first.entries()); - for (Entry secondEntry : second.entries()) { - boolean found = false; - for (Entry firstEntry : first.entries()) { - if (firstEntry.name().equals(secondEntry.name())) { - found = true; - break; - } - } - if (!found) { - entries.add(secondEntry); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + out.writeOptionalBoolean(entry.queryCache()); } } + + @Override + public IndexWarmersMetaData fromMap(Map map) throws IOException { + // if it starts with the type, remove it + if (map.size() == 1 && map.containsKey(TYPE)) { + map = (Map) map.values().iterator().next(); + } + XContentBuilder builder = XContentFactory.smileBuilder().map(map); + try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { + // move to START_OBJECT + parser.nextToken(); + return fromXContent(parser); + } + } + + @Override + public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { + // we get here after we are at warmers token + String currentFieldName = null; + XContentParser.Token token; + List entries = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + String name = currentFieldName; + List types = new ArrayList<>(2); + BytesReference source = null; + Boolean queryCache = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if ("types".equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + types.add(parser.text()); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + if ("source".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); + source = builder.bytes(); + } + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + if ("source".equals(currentFieldName)) { + source = new BytesArray(parser.binaryValue()); + } + } else if (token.isValue()) { + if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { + queryCache = parser.booleanValue(); + } + } + } + entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + //No need, IndexMetaData already writes it + //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); + for (Entry entry : entries()) { + toXContent(entry, builder, params); + } + //No need, IndexMetaData already writes it + //builder.endObject(); + return builder; + } + + public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + boolean binary = params.paramAsBoolean("binary", false); + builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("types", entry.types()); + if (entry.queryCache() != null) { + builder.field("queryCache", entry.queryCache()); + } + builder.field("source"); + if (binary) { + builder.value(entry.source()); + } else { + Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); + builder.map(mapping); + } + builder.endObject(); + } + + @Override + public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) { + IndexWarmersMetaData second = (IndexWarmersMetaData) other; + List entries = Lists.newArrayList(); + entries.addAll(entries()); + for (Entry secondEntry : second.entries()) { + boolean found = false; + for (Entry firstEntry : entries()) { + if (firstEntry.name().equals(secondEntry.name())) { + found = true; + break; + } + } + if (!found) { + entries.add(secondEntry); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } } diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java new file mode 100644 index 00000000000..33008fd63d2 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java @@ -0,0 +1,625 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.Matchers.*; + +public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { + + protected ThreadPool threadPool; + protected Map nodes = newHashMap(); + + public static class MockNode { + public final DiscoveryNode discoveryNode; + public final MockTransportService service; + public final PublishClusterStateAction action; + public final MockDiscoveryNodesProvider nodesProvider; + + public MockNode(DiscoveryNode discoveryNode, MockTransportService service, PublishClusterStateAction action, MockDiscoveryNodesProvider nodesProvider) { + this.discoveryNode = discoveryNode; + this.service = service; + this.action = action; + this.nodesProvider = nodesProvider; + } + + public void connectTo(DiscoveryNode node) { + service.connectToNode(node); + nodesProvider.addNode(node); + } + } + + public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception { + return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + } + + public MockNode createMockNode(String name, Settings settings, Version version, PublishClusterStateAction.NewClusterStateListener listener) throws Exception { + MockTransportService service = buildTransportService( + ImmutableSettings.builder().put(settings).put("name", name, TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + version + ); + DiscoveryNode discoveryNode = new DiscoveryNode(name, name, service.boundAddress().publishAddress(), ImmutableMap.of(), version); + MockDiscoveryNodesProvider nodesProvider = new MockDiscoveryNodesProvider(discoveryNode); + PublishClusterStateAction action = buildPublishClusterStateAction(settings, service, nodesProvider, listener); + MockNode node = new MockNode(discoveryNode, service, action, nodesProvider); + nodesProvider.addNode(discoveryNode); + final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); + TransportConnectionListener waitForConnection = new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node) { + latch.countDown(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + fail("disconnect should not be called " + node); + } + }; + node.service.addConnectionListener(waitForConnection); + for (MockNode curNode : nodes.values()) { + curNode.service.addConnectionListener(waitForConnection); + curNode.connectTo(node.discoveryNode); + node.connectTo(curNode.discoveryNode); + } + node.connectTo(node.discoveryNode); + assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true)); + for (MockNode curNode : nodes.values()) { + curNode.service.removeConnectionListener(waitForConnection); + } + node.service.removeConnectionListener(waitForConnection); + if (nodes.put(name, node) != null) { + fail("Node with the name " + name + " already exist"); + } + return node; + } + + public MockTransportService service(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.service; + } + return null; + } + + public PublishClusterStateAction action(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.action; + } + return null; + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new ThreadPool(getClass().getName()); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + for (MockNode curNode : nodes.values()) { + curNode.action.close(); + curNode.service.close(); + } + terminate(threadPool); + } + + protected MockTransportService buildTransportService(Settings settings, Version version) { + MockTransportService transportService = new MockTransportService(settings, new LocalTransport(settings, threadPool, version), threadPool); + transportService.start(); + return transportService; + } + + protected PublishClusterStateAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, MockDiscoveryNodesProvider nodesProvider, + PublishClusterStateAction.NewClusterStateListener listener) { + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + return new PublishClusterStateAction(settings, transportService, nodesProvider, listener, discoverySettings); + } + + + static class MockDiscoveryNodesProvider implements DiscoveryNodesProvider { + + private DiscoveryNodes discoveryNodes = DiscoveryNodes.EMPTY_NODES; + + public MockDiscoveryNodesProvider(DiscoveryNode localNode) { + discoveryNodes = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build(); + } + + public void addNode(DiscoveryNode node) { + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(node).build(); + } + + @Override + public DiscoveryNodes nodes() { + return discoveryNodes; + } + + @Override + public NodeService nodeService() { + assert false; + throw new UnsupportedOperationException("Shouldn't be here"); + } + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimpleClusterStatePublishing() throws Exception { + MockNewClusterStateListener mockListenerA = new MockNewClusterStateListener(); + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerA); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(1)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - remove block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // Adding new node - this node should get full cluster state while nodeB should still be getting diffs + + MockNewClusterStateListener mockListenerC = new MockNewClusterStateListener(); + MockNode nodeC = createMockNode("nodeC", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerC); + + // cluster state update 3 - register node C + previousClusterState = clusterState; + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + mockListenerC.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + // First state + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update 4 - update settings + previousClusterState = clusterState; + MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(ImmutableSettings.settingsBuilder().put("foo", "bar").build()).build(); + clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); + NewClusterStateExpectation expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // node B becomes the master and sends a version of the cluster state that goes back + discoveryNodes = DiscoveryNodes.builder(discoveryNodes) + .put(nodeA.discoveryNode) + .put(nodeB.discoveryNode) + .put(nodeC.discoveryNode) + .build(); + previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerA.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeB.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testUnexpectedDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testDisablingDiffPublishing() throws Exception { + Settings noDiffPublishingSettings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + + MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff()); + assertFalse(clusterState.wasReadFromDiff()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimultaneousClusterStatePublishing() throws Exception { + int numberOfNodes = randomIntBetween(2, 10); + int numberOfIterations = randomIntBetween(50, 200); + Settings settings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "100ms").put(DiscoverySettings.PUBLISH_DIFF_ENABLE, true).build(); + MockNode[] nodes = new MockNode[numberOfNodes]; + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < nodes.length; i++) { + final String name = "node" + i; + nodes[i] = createMockNode(name, settings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public synchronized void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + assertProperMetaDataForVersion(clusterState.metaData(), clusterState.version()); + if (randomInt(10) < 2) { + // Cause timeouts from time to time + try { + Thread.sleep(randomInt(110)); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + newStateProcessed.onNewClusterStateProcessed(); + } + }); + discoveryNodesBuilder.put(nodes[i].discoveryNode); + } + + AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; + DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); + MetaData metaData = MetaData.EMPTY_META_DATA; + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(metaData).build(); + ClusterState previousState; + for (int i = 0; i < numberOfIterations; i++) { + previousState = clusterState; + metaData = buildMetaDataForVersion(metaData, i + 1); + clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build(); + listeners[i] = publishStateDiff(nodes[0].action, clusterState, previousState); + } + + for (int i = 0; i < numberOfIterations; i++) { + listeners[i].await(1, TimeUnit.SECONDS); + } + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSerializationFailureDuringDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + + ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) { + @Override + public Diff diff(ClusterState previousState) { + return new Diff() { + @Override + public ClusterState apply(ClusterState part) { + fail("this diff shouldn't be applied"); + return part; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("Simulated failure of diff serialization"); + } + }; + } + }; + List> errors = publishStateDiff(nodeA.action, unserializableClusterState, previousClusterState).awaitErrors(1, TimeUnit.SECONDS); + assertThat(errors.size(), equalTo(1)); + assertThat(errors.get(0).v2().getMessage(), containsString("Simulated failure of diff serialization")); + } + + private MetaData buildMetaDataForVersion(MetaData metaData, long version) { + ImmutableOpenMap.Builder indices = ImmutableOpenMap.builder(metaData.indices()); + indices.put("test" + version, IndexMetaData.builder("test" + version).settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards((int) version).numberOfReplicas(0).build()); + return MetaData.builder(metaData) + .transientSettings(ImmutableSettings.builder().put("test", version).build()) + .indices(indices.build()) + .build(); + } + + private void assertProperMetaDataForVersion(MetaData metaData, long version) { + for (long i = 1; i <= version; i++) { + assertThat(metaData.index("test" + i), notNullValue()); + assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i)); + } + assertThat(metaData.index("test" + (version + 1)), nullValue()); + assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); + } + + public void publishStateDiffAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + publishStateDiff(action, state, previousState).await(1, TimeUnit.SECONDS); + } + + public AssertingAckListener publishStateDiff(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); + ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); + action.publish(changedEvent, assertingAckListener); + return assertingAckListener; + } + + public static class AssertingAckListener implements Discovery.AckListener { + private final List> errors = new CopyOnWriteArrayList<>(); + private final AtomicBoolean timeoutOccured = new AtomicBoolean(); + private final CountDownLatch countDown; + + public AssertingAckListener(int nodeCount) { + countDown = new CountDownLatch(nodeCount); + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + if (t != null) { + errors.add(new Tuple<>(node, t)); + } + countDown.countDown(); + } + + @Override + public void onTimeout() { + timeoutOccured.set(true); + // Fast forward the counter - no reason to wait here + long currentCount = countDown.getCount(); + for (long i = 0; i < currentCount; i++) { + countDown.countDown(); + } + } + + public void await(long timeout, TimeUnit unit) throws InterruptedException { + assertThat(awaitErrors(timeout, unit), emptyIterable()); + } + + public List> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { + countDown.await(timeout, unit); + assertFalse(timeoutOccured.get()); + return errors; + } + + } + + public interface NewClusterStateExpectation { + void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed); + } + + public static class MockNewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener { + CopyOnWriteArrayList expectations = new CopyOnWriteArrayList(); + + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + final NewClusterStateExpectation expectation; + try { + expectation = expectations.remove(0); + } catch (ArrayIndexOutOfBoundsException ex) { + fail("Unexpected cluster state update " + clusterState.prettyPrint()); + return; + } + expectation.check(clusterState, newStateProcessed); + newStateProcessed.onNewClusterStateProcessed(); + } + + public void add(NewClusterStateExpectation expectation) { + expectations.add(expectation); + } + } + + public static class DelegatingClusterState extends ClusterState { + + public DelegatingClusterState(ClusterState clusterState) { + super(clusterState.version(), clusterState.uuid(), clusterState); + } + + + } + +} diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java new file mode 100644 index 00000000000..84df1eaf209 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java @@ -0,0 +1,534 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.*; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.query.FilterBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.List; + +import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; +import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; +import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.Matchers.equalTo; + + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) +public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { + + @Test + public void testClusterStateDiffSerialization() throws Exception { + DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), Version.CURRENT); + DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); + + int iterationCount = randomIntBetween(10, 300); + for (int iteration = 0; iteration < iterationCount; iteration++) { + ClusterState previousClusterState = clusterState; + ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs; + int changesCount = randomIntBetween(1, 4); + ClusterState.Builder builder = null; + for (int i = 0; i < changesCount; i++) { + if (i > 0) { + clusterState = builder.build(); + } + switch (randomInt(4)) { + case 0: + builder = randomNodes(clusterState); + break; + case 1: + builder = randomRoutingTable(clusterState); + break; + case 2: + builder = randomBlocks(clusterState); + break; + case 3: + case 4: + builder = randomMetaDataChanges(clusterState); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + clusterState = builder.incrementVersion().build(); + + if (randomIntBetween(0, 10) < 1) { + // Update cluster state via full serialization from time to time + clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().localNode()); + } else { + // Update cluster states using diffs + Diff diffBeforeSerialization = clusterState.diff(previousClusterState); + BytesStreamOutput os = new BytesStreamOutput(); + diffBeforeSerialization.writeTo(os); + byte[] diffBytes = os.bytes().toBytes(); + Diff diff; + try (BytesStreamInput input = new BytesStreamInput(diffBytes)) { + diff = previousClusterStateFromDiffs.readDiffFrom(input); + clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs); + } + } + + + try { + // Check non-diffable elements + assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version())); + assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid())); + + // Check nodes + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + assertThat(clusterStateFromDiffs.nodes().localNodeId(), equalTo(previousClusterStateFromDiffs.nodes().localNodeId())); + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + for (ObjectCursor node : clusterStateFromDiffs.nodes().nodes().keys()) { + DiscoveryNode node1 = clusterState.nodes().get(node.value); + DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value); + assertThat(node1.version(), equalTo(node2.version())); + assertThat(node1.address(), equalTo(node2.address())); + assertThat(node1.attributes(), equalTo(node2.attributes())); + } + + // Check routing table + assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version())); + assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting())); + + // Check cluster blocks + assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); + assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); + assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); + + // Check metadata + assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); + assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid())); + assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings())); + assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings())); + assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices())); + assertThat(clusterStateFromDiffs.metaData().templates(), equalTo(clusterState.metaData().templates())); + assertThat(clusterStateFromDiffs.metaData().customs(), equalTo(clusterState.metaData().customs())); + assertThat(clusterStateFromDiffs.metaData().aliases(), equalTo(clusterState.metaData().aliases())); + + // JSON Serialization test - make sure that both states produce similar JSON + assertThat(mapsEqualIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)), equalTo(true)); + + // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order + // however, serialized size should remain the same + assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); + } catch (AssertionError error) { + logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); + throw error; + } + } + + logger.info("Final cluster state:[{}]", clusterState.toString()); + + } + + private ClusterState.Builder randomNodes(ClusterState clusterState) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().nodes().size() - 1), clusterState.nodes().nodes().keys().toArray(String.class)); + for (String nodeId : nodeIds) { + if (nodeId.startsWith("node-")) { + if (randomBoolean()) { + nodes.remove(nodeId); + } else { + nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + } + } + int additionalNodeCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalNodeCount; i++) { + nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + return ClusterState.builder(clusterState).nodes(nodes); + } + + private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { + RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); + int numberOfIndices = clusterState.routingTable().indicesRouting().size(); + if (numberOfIndices > 0) { + List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keySet().toArray(new String[numberOfIndices])); + for (String index : randomIndices) { + if (randomBoolean()) { + builder.remove(index); + } else { + builder.add(randomIndexRoutingTable(index, clusterState.nodes().nodes().keys().toArray(String.class))); + } + } + } + int additionalIndexCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalIndexCount; i++) { + builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().nodes().keys().toArray(String.class))); + } + return ClusterState.builder(clusterState).routingTable(builder.build()); + } + + private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds) { + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index); + int shardCount = randomInt(10); + + for (int i = 0; i < shardCount; i++) { + IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, i), randomBoolean()); + int replicaCount = randomIntBetween(1, 10); + for (int j = 0; j < replicaCount; j++) { + indexShard.addShard( + new MutableShardRouting(index, i, randomFrom(nodeIds), j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(1, 4)), 1)); + } + builder.addIndexShard(indexShard.build()); + } + return builder.build(); + } + + private ClusterState.Builder randomBlocks(ClusterState clusterState) { + ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); + int globalBlocksCount = clusterState.blocks().global().size(); + if (globalBlocksCount > 0) { + List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); + for (ClusterBlock block : blocks) { + builder.removeGlobalBlock(block); + } + } + int additionalGlobalBlocksCount = randomIntBetween(1, 3); + for (int i = 0; i < additionalGlobalBlocksCount; i++) { + builder.addGlobalBlock(randomGlobalBlock()); + } + return ClusterState.builder(clusterState).blocks(builder); + } + + private ClusterBlock randomGlobalBlock() { + switch (randomInt(2)) { + case 0: + return DiscoverySettings.NO_MASTER_BLOCK_ALL; + case 1: + return DiscoverySettings.NO_MASTER_BLOCK_WRITES; + default: + return GatewayService.STATE_NOT_RECOVERED_BLOCK; + } + } + + private ClusterState.Builder randomMetaDataChanges(ClusterState clusterState) { + MetaData metaData = clusterState.metaData(); + int changesCount = randomIntBetween(1, 10); + for (int i = 0; i < changesCount; i++) { + switch (randomInt(3)) { + case 0: + metaData = randomMetaDataSettings(metaData); + break; + case 1: + metaData = randomIndices(metaData); + break; + case 2: + metaData = randomTemplates(metaData); + break; + case 3: + metaData = randomMetaDataCustoms(metaData); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + return ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).version(metaData.version() + 1).build()); + } + + private Settings randomSettings(Settings settings) { + ImmutableSettings.Builder builder = ImmutableSettings.builder(); + if (randomBoolean()) { + builder.put(settings); + } + int settingsCount = randomInt(10); + for (int i = 0; i < settingsCount; i++) { + builder.put(randomAsciiOfLength(10), randomAsciiOfLength(10)); + } + return builder.build(); + + } + + private MetaData randomMetaDataSettings(MetaData metaData) { + if (randomBoolean()) { + return MetaData.builder(metaData).persistentSettings(randomSettings(metaData.persistentSettings())).build(); + } else { + return MetaData.builder(metaData).transientSettings(randomSettings(metaData.transientSettings())).build(); + } + } + + private interface RandomPart { + /** + * Returns list of parts from metadata + */ + ImmutableOpenMap parts(MetaData metaData); + + /** + * Puts the part back into metadata + */ + MetaData.Builder put(MetaData.Builder builder, T part); + + /** + * Remove the part from metadata + */ + MetaData.Builder remove(MetaData.Builder builder, String name); + + /** + * Returns a random part with the specified name + */ + T randomCreate(String name); + + /** + * Makes random modifications to the part + */ + T randomChange(T part); + + } + + private MetaData randomParts(MetaData metaData, String prefix, RandomPart randomPart) { + MetaData.Builder builder = MetaData.builder(metaData); + ImmutableOpenMap parts = randomPart.parts(metaData); + int partCount = parts.size(); + if (partCount > 0) { + List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); + for (String part : randomParts) { + if (randomBoolean()) { + randomPart.remove(builder, part); + } else { + randomPart.put(builder, randomPart.randomChange(parts.get(part))); + } + } + } + int additionalPartCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalPartCount; i++) { + String name = randomName(prefix); + randomPart.put(builder, randomPart.randomCreate(name)); + } + return builder.build(); + } + + private MetaData randomIndices(MetaData metaData) { + return randomParts(metaData, "index", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.indices(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexMetaData part) { + return builder.put(part, true); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.remove(name); + } + + @Override + public IndexMetaData randomCreate(String name) { + IndexMetaData.Builder builder = IndexMetaData.builder(name); + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + setRandomSettings(getRandom(), settingsBuilder); + settingsBuilder.put(randomSettings(ImmutableSettings.EMPTY)).put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion(random())); + builder.settings(settingsBuilder); + builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); + int aliasCount = randomInt(10); + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + return builder.build(); + } + + @Override + public IndexMetaData randomChange(IndexMetaData part) { + IndexMetaData.Builder builder = IndexMetaData.builder(part); + switch (randomIntBetween(0, 3)) { + case 0: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(randomSettings(ImmutableSettings.EMPTY))); + break; + case 1: + if (randomBoolean() && part.aliases().isEmpty() == false) { + builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class))); + } else { + builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10))); + } + break; + case 2: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); + break; + case 3: + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + return builder.build(); + } + }); + } + + private IndexWarmersMetaData randomWarmers() { + if (randomBoolean()) { + return new IndexWarmersMetaData( + new IndexWarmersMetaData.Entry( + randomName("warm"), + new String[]{randomName("type")}, + randomBoolean(), + new BytesArray(randomAsciiOfLength(1000))) + ); + } else { + return new IndexWarmersMetaData(); + } + } + + private MetaData randomTemplates(MetaData metaData) { + return randomParts(metaData, "template", new RandomPart() { + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.templates(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexTemplateMetaData part) { + return builder.put(part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeTemplate(name); + } + + @Override + public IndexTemplateMetaData randomCreate(String name) { + IndexTemplateMetaData.Builder builder = IndexTemplateMetaData.builder(name); + builder.order(randomInt(1000)) + .template(randomName("temp")) + .settings(randomSettings(ImmutableSettings.EMPTY)); + int aliasCount = randomIntBetween(0, 10); + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + return builder.build(); + } + + @Override + public IndexTemplateMetaData randomChange(IndexTemplateMetaData part) { + IndexTemplateMetaData.Builder builder = new IndexTemplateMetaData.Builder(part); + builder.order(randomInt(1000)); + return builder.build(); + } + }); + } + + private AliasMetaData randomAlias() { + AliasMetaData.Builder builder = newAliasMetaDataBuilder(randomName("alias")); + if (randomBoolean()) { + builder.filter(FilterBuilders.termFilter("test", randomRealisticUnicodeOfCodepointLength(10)).toString()); + } + if (randomBoolean()) { + builder.routing(randomAsciiOfLength(10)); + } + return builder.build(); + } + + private MetaData randomMetaDataCustoms(final MetaData metaData) { + return randomParts(metaData, "custom", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.customs(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) { + if (part instanceof SnapshotMetaData) { + return builder.putCustom(SnapshotMetaData.TYPE, part); + } else if (part instanceof RepositoriesMetaData) { + return builder.putCustom(RepositoriesMetaData.TYPE, part); + } else if (part instanceof RestoreMetaData) { + return builder.putCustom(RestoreMetaData.TYPE, part); + } + throw new IllegalArgumentException("Unknown custom part " + part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeCustom(name); + } + + @Override + public MetaData.Custom randomCreate(String name) { + switch (randomIntBetween(0, 2)) { + case 0: + return new SnapshotMetaData(new SnapshotMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + randomBoolean(), + SnapshotMetaData.State.fromValue((byte) randomIntBetween(0, 6)), + ImmutableList.of(), + Math.abs(randomLong()), + ImmutableMap.of())); + case 1: + return new RepositoriesMetaData(); + case 2: + return new RestoreMetaData(new RestoreMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + RestoreMetaData.State.fromValue((byte) randomIntBetween(0, 3)), + ImmutableList.of(), + ImmutableMap.of())); + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + + @Override + public MetaData.Custom randomChange(MetaData.Custom part) { + return part; + } + }); + } + + private String randomName(String prefix) { + return prefix + Strings.randomBase64UUID(getRandom()); + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index cbbff463f20..83a27850591 100644 --- a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -81,7 +81,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase { RoutingTable source = strategy.reroute(clusterState).routingTable(); BytesStreamOutput outStream = new BytesStreamOutput(); - RoutingTable.Builder.writeTo(source, outStream); + source.writeTo(outStream); BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes()); RoutingTable target = RoutingTable.Builder.readFrom(inStream); diff --git a/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java new file mode 100644 index 00000000000..d87d900a0e8 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.serialization; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.util.Map; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.CoreMatchers.equalTo; + +public class DiffableTests extends ElasticsearchTestCase { + + @Test + public void testImmutableMapDiff() throws IOException { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableMap before = builder.build(); + Map map = newHashMap(); + map.putAll(before); + map.remove("bar"); + map.put("baz", new TestDiffable("4")); + map.put("new", new TestDiffable("5")); + ImmutableMap after = ImmutableMap.copyOf(map); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableMap serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + } + + @Test + public void testImmutableOpenMapDiff() throws IOException { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableOpenMap before = builder.build(); + builder = ImmutableOpenMap.builder(before); + builder.remove("bar"); + builder.put("baz", new TestDiffable("4")); + builder.put("new", new TestDiffable("5")); + ImmutableOpenMap after = builder.build(); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableOpenMap serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public TestDiffable readFrom(StreamInput in, String key) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return AbstractDiffable.readDiffFrom(new StreamableReader() { + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + }, in); + } + }).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + + } + public static class TestDiffable extends AbstractDiffable { + + public static final TestDiffable PROTO = new TestDiffable(""); + + private final String value; + + public TestDiffable(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } + +} diff --git a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java b/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java new file mode 100644 index 00000000000..9ebffe58783 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; + +public final class XContentTestUtils { + private XContentTestUtils() { + + } + + public static Map convertToMap(ToXContent part) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + part.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return XContentHelper.convertToMap(builder.bytes(), false).v2(); + } + + + /** + * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored + */ + public static boolean mapsEqualIgnoringArrayOrder(Map first, Map second) { + if (first.size() != second.size()) { + return false; + } + + for (String key : first.keySet()) { + if (objectsEqualIgnoringArrayOrder(first.get(key), second.get(key)) == false) { + return false; + } + } + return true; + } + + @SuppressWarnings("unchecked") + private static boolean objectsEqualIgnoringArrayOrder(Object first, Object second) { + if (first == null ) { + return second == null; + } else if (first instanceof List) { + if (second instanceof List) { + List secondList = Lists.newArrayList((List) second); + List firstList = (List) first; + if (firstList.size() == secondList.size()) { + for (Object firstObj : firstList) { + boolean found = false; + for (Object secondObj : secondList) { + if (objectsEqualIgnoringArrayOrder(firstObj, secondObj)) { + secondList.remove(secondObj); + found = true; + break; + } + } + if (found == false) { + return false; + } + } + return secondList.isEmpty(); + } else { + return false; + } + } else { + return false; + } + } else if (first instanceof Map) { + if (second instanceof Map) { + return mapsEqualIgnoringArrayOrder((Map) first, (Map) second); + } else { + return false; + } + } else { + return first.equals(second); + } + } + +} diff --git a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java index f265869ec75..f1e7a249c59 100644 --- a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index 58e177b1115..228faa8cf4d 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -32,9 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -196,12 +193,7 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(builder.build(), stream); - stream.close(); - BytesReference bytes = bStream.bytes(); + BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.version()); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index c5adf8cb50e..c97fa5b789d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -443,11 +443,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -460,11 +460,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -477,11 +477,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index 386c778b07e..9e05d915803 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.junit.Test; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; @@ -68,6 +69,12 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { .build(); } + @Override + protected void ensureClusterStateConsistency() throws IOException { + // testShardActiveElseWhere might change the state of a non-master node + // so we cannot check state consistency of this cluster + } + @Test public void indexCleanup() throws Exception { final String masterNode = internalCluster().startNode(ImmutableSettings.builder().put("node.data", false)); diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index ce96576ce15..bd664694c9f 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -284,6 +284,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/8802") public void testBrokenMapping() throws Exception { // clean all templates setup by the framework. client().admin().indices().prepareDeleteTemplate("*").get(); diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java index ff8264fdc03..8d569275aea 100644 --- a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java @@ -38,7 +38,9 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; @@ -748,7 +750,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests )); } - public static abstract class TestCustomMetaData implements MetaData.Custom { + public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { private final String data; protected TestCustomMetaData(String data) { @@ -776,194 +778,182 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests return data.hashCode(); } - public static abstract class TestCustomMetaDataFactory extends MetaData.Custom.Factory { + protected abstract TestCustomMetaData newTestCustomMetaData(String data); - protected abstract TestCustomMetaData newTestCustomMetaData(String data); + @Override + public Custom readFrom(StreamInput in) throws IOException { + return newTestCustomMetaData(in.readString()); + } - @Override - public T readFrom(StreamInput in) throws IOException { - return (T) newTestCustomMetaData(in.readString()); - } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getData()); + } - @Override - public void writeTo(T metadata, StreamOutput out) throws IOException { - out.writeString(metadata.getData()); - } - - @Override - public T fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); + @Override + public Custom fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String data = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("data".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); } + data = parser.text(); } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); + throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); } + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata"); } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return (T) newTestCustomMetaData(data); } + if (data == null) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); + } + return newTestCustomMetaData(data); + } - @Override - public void toXContent(T metadata, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", metadata.getData()); - } + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.field("data", getData()); + return builder; } } + static { - MetaData.registerFactory(SnapshottableMetadata.TYPE, SnapshottableMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.FACTORY); - MetaData.registerFactory(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.FACTORY); + MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO); + MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO); } public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableMetadata PROTO = new SnapshottableMetadata(""); public SnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_SNAPSHOT; - } + @Override + public EnumSet context() { + return MetaData.API_AND_SNAPSHOT; } } public static class NonSnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata(""); public NonSnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected NonSnapshottableMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableMetadata(data); + } - @Override - protected NonSnapshottableMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableMetadata(data); - } + @Override + public EnumSet context() { + return MetaData.API_ONLY; } } public static class SnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata(""); public SnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableGatewayMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); - } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); } } public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata(""); public NonSnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { - - @Override - public String type() { - return TYPE; - } - - @Override - protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - + @Override + public String type() { + return TYPE; } + + @Override + protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableGatewayMetadata(data); + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + } public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway_no_api"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata(""); public SnapshotableGatewayNoApiMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } - - @Override - protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { - return new SnapshotableGatewayNoApiMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); - } + @Override + protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { + return new SnapshotableGatewayNoApiMetadata(data); + } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); } } diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 9e68d16caa0..f30a47755ed 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -166,6 +166,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; +import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -357,7 +359,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas */ - private void randomIndexTemplate() throws IOException { + public void randomIndexTemplate() throws IOException { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { @@ -647,6 +649,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); @@ -1085,8 +1088,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) - .get().isAcknowledged()); + settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) + .get().isAcknowledged()); } /** @@ -1133,6 +1136,35 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } } + /** + * Verifies that all nodes that have the same version of the cluster state as master have same cluster state + */ + protected void ensureClusterStateConsistency() throws IOException { + if (cluster() != null) { + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + for (Client client : cluster()) { + ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + if (masterClusterState.version() == localClusterState.version()) { + try { + assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; + assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); + + // Compare JSON serialization + assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, convertToMap(localClusterState)), equalTo(true)); + } catch (AssertionError error) { + logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); + throw error; + } + } + } + } + } + /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 1276089b182..0f71b7239e0 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -71,6 +71,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static com.google.common.collect.Lists.newArrayList; /** * Base testcase for randomized unit testing with Elasticsearch @@ -595,4 +596,17 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { return threadGroup.getName(); } } + + /** + * Returns size random values + */ + public static List randomSubsetOf(int size, T... values) { + if (size > values.length) { + throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); + } + List list = newArrayList(values); + Collections.shuffle(list); + return list.subList(0, size); + } + } From 58eed45ee58bf12b01a1f05719806c82d719040d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 27 Apr 2015 10:23:18 +0200 Subject: [PATCH 210/236] [TEST] Move XContentTestUtils.java into o.e.test folder Classes referenced by the Test base classes must be under this package otherwise the test jar can't be used in a 3rd party application. --- .../org/elasticsearch/cluster/ClusterStateDiffTests.java | 4 ++-- .../elasticsearch/test/ElasticsearchIntegrationTest.java | 4 ++-- .../{common/xcontent => test}/XContentTestUtils.java | 6 +++++- 3 files changed, 9 insertions(+), 5 deletions(-) rename src/test/java/org/elasticsearch/{common/xcontent => test}/XContentTestUtils.java (93%) diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java index 84df1eaf209..b49b7586dc3 100644 --- a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java @@ -48,8 +48,8 @@ import org.junit.Test; import java.util.List; import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; -import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; -import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; +import static org.elasticsearch.test.XContentTestUtils.convertToMap; +import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.Matchers.equalTo; diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index f30a47755ed..2550bf8a417 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -166,8 +166,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.common.xcontent.XContentTestUtils.convertToMap; -import static org.elasticsearch.common.xcontent.XContentTestUtils.mapsEqualIgnoringArrayOrder; +import static org.elasticsearch.test.XContentTestUtils.convertToMap; +import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; diff --git a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java b/src/test/java/org/elasticsearch/test/XContentTestUtils.java similarity index 93% rename from src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java rename to src/test/java/org/elasticsearch/test/XContentTestUtils.java index 9ebffe58783..1f1b8eff710 100644 --- a/src/test/java/org/elasticsearch/common/xcontent/XContentTestUtils.java +++ b/src/test/java/org/elasticsearch/test/XContentTestUtils.java @@ -17,9 +17,13 @@ * under the License. */ -package org.elasticsearch.common.xcontent; +package org.elasticsearch.test; import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import java.io.IOException; import java.util.List; From 9828e955f3537e09955551e7fd23c6b243ec9c7a Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 27 Apr 2015 14:23:42 +0200 Subject: [PATCH 211/236] [TEST] enable host name resolving to gain consistent transport addresses in clusterstate --- src/test/java/org/elasticsearch/test/InternalTestCluster.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 5d2d00c4870..cc487f30258 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -295,6 +295,7 @@ public final class InternalTestCluster extends TestCluster { builder.put("http.port", BASE_PORT+101 + "-" + (BASE_PORT+200)); builder.put("config.ignore_system_properties", true); builder.put("node.mode", NODE_MODE); + builder.put("network.address.serialization.resolve", true); // this makes adresses in the clusterstate consistent builder.put("http.pipelining", enableHttpPipelining); builder.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false); builder.put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true); From 38be1e8a1a8fe870538487285cabd5e5f6c88e53 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 27 Apr 2015 08:47:09 -0400 Subject: [PATCH 212/236] Test: remove reference to the local node before comparing cluster states in ensureClusterStateConsistency --- .../test/ElasticsearchIntegrationTest.java | 12 +++++++++--- .../org/elasticsearch/test/InternalTestCluster.java | 1 - 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 2550bf8a417..0a5208f6763 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1142,20 +1142,26 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase protected void ensureClusterStateConsistency() throws IOException { if (cluster() != null) { ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); + // remove local node reference + masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); Map masterStateMap = convertToMap(masterClusterState); int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; for (Client client : cluster()) { ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); + // remove local node reference + localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); + Map localStateMap = convertToMap(localClusterState); + int localClusterStateSize = localClusterStateBytes.length; if (masterClusterState.version() == localClusterState.version()) { try { assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); // We cannot compare serialization bytes since serialization order of maps is not guaranteed // but we can compare serialization sizes - they should be the same - int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); - // Compare JSON serialization - assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, convertToMap(localClusterState)), equalTo(true)); + assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, localStateMap), equalTo(true)); } catch (AssertionError error) { logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); throw error; diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index cc487f30258..5d2d00c4870 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -295,7 +295,6 @@ public final class InternalTestCluster extends TestCluster { builder.put("http.port", BASE_PORT+101 + "-" + (BASE_PORT+200)); builder.put("config.ignore_system_properties", true); builder.put("node.mode", NODE_MODE); - builder.put("network.address.serialization.resolve", true); // this makes adresses in the clusterstate consistent builder.put("http.pipelining", enableHttpPipelining); builder.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false); builder.put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true); From bac135261cf4c0a04e4d55f1062a94b2179153b3 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 28 Apr 2015 12:18:43 -0400 Subject: [PATCH 213/236] Test: make sure that tests are not affected by changing in address resolution settings --- .../transport/InetSocketTransportAddress.java | 4 ++ .../test/ElasticsearchIntegrationTest.java | 53 +++++++++++-------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java index 1bc519435de..bfa4233d917 100644 --- a/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java +++ b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java @@ -38,6 +38,10 @@ public class InetSocketTransportAddress implements TransportAddress { InetSocketTransportAddress.resolveAddress = resolveAddress; } + public static boolean getResolveAddress() { + return resolveAddress; + } + private InetSocketAddress address; InetSocketTransportAddress() { diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 0a5208f6763..f160e4cb653 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1141,34 +1141,41 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ protected void ensureClusterStateConsistency() throws IOException { if (cluster() != null) { - ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); - byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); - // remove local node reference - masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); - Map masterStateMap = convertToMap(masterClusterState); - int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; - for (Client client : cluster()) { - ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); - byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); + boolean getResolvedAddress = InetSocketTransportAddress.getResolveAddress(); + try { + InetSocketTransportAddress.setResolveAddress(false); + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); // remove local node reference - localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); - Map localStateMap = convertToMap(localClusterState); - int localClusterStateSize = localClusterStateBytes.length; - if (masterClusterState.version() == localClusterState.version()) { - try { - assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); - // Compare JSON serialization - assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, localStateMap), equalTo(true)); - } catch (AssertionError error) { - logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); - throw error; + masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + for (Client client : cluster()) { + ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); + // remove local node reference + localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); + Map localStateMap = convertToMap(localClusterState); + int localClusterStateSize = localClusterStateBytes.length; + if (masterClusterState.version() == localClusterState.version()) { + try { + assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); + // Compare JSON serialization + assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, localStateMap), equalTo(true)); + } catch (AssertionError error) { + logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); + throw error; + } } } + } finally { + InetSocketTransportAddress.setResolveAddress(getResolvedAddress); } } + } /** From 8e5543dea05124f99552912c1c445add2e122dde Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 28 Apr 2015 15:59:57 -0400 Subject: [PATCH 214/236] Test: ignore cluster state differences on the nodes that disconnected from the master --- .../org/elasticsearch/test/ElasticsearchIntegrationTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index f160e4cb653..187afe1b658 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1150,6 +1150,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); Map masterStateMap = convertToMap(masterClusterState); int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + String masterId = masterClusterState.nodes().masterNodeId(); for (Client client : cluster()) { ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); @@ -1157,7 +1158,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); Map localStateMap = convertToMap(localClusterState); int localClusterStateSize = localClusterStateBytes.length; - if (masterClusterState.version() == localClusterState.version()) { + // Check that the non-master node has the same version of the cluster state as the master and that this node didn't disconnect from the master + if (masterClusterState.version() == localClusterState.version() && localClusterState.nodes().nodes().containsKey(masterId)) { try { assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); // We cannot compare serialization bytes since serialization order of maps is not guaranteed From 351a4d3315e66989dce9b47f10e3057db4eb72df Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 29 Apr 2015 13:33:54 -0400 Subject: [PATCH 215/236] [DOCS] Fix movavg images and naming --- .../reducers_movavg}/double_0.2beta.png | Bin .../reducers_movavg}/double_0.7beta.png | Bin .../double_prediction_global.png | Bin .../double_prediction_local.png | Bin .../reducers_movavg}/linear_100window.png | Bin .../reducers_movavg}/linear_10window.png | Bin .../reducers_movavg}/movavg_100window.png | Bin .../reducers_movavg}/movavg_10window.png | Bin .../reducers_movavg}/simple_prediction.png | Bin .../reducers_movavg}/single_0.2alpha.png | Bin .../reducers_movavg}/single_0.7alpha.png | Bin .../search/aggregations/reducer.asciidoc | 4 ++-- ...r.asciidoc => movavg-aggregation.asciidoc} | 22 +++++++++--------- 13 files changed, 13 insertions(+), 13 deletions(-) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/double_0.2beta.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/double_0.7beta.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/double_prediction_global.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/double_prediction_local.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/linear_100window.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/linear_10window.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/movavg_100window.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/movavg_10window.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/simple_prediction.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/single_0.2alpha.png (100%) rename docs/reference/{search/aggregations/reducer/images => images/reducers_movavg}/single_0.7alpha.png (100%) rename docs/reference/search/aggregations/reducer/{movavg-reducer.asciidoc => movavg-aggregation.asciidoc} (95%) diff --git a/docs/reference/search/aggregations/reducer/images/double_0.2beta.png b/docs/reference/images/reducers_movavg/double_0.2beta.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/double_0.2beta.png rename to docs/reference/images/reducers_movavg/double_0.2beta.png diff --git a/docs/reference/search/aggregations/reducer/images/double_0.7beta.png b/docs/reference/images/reducers_movavg/double_0.7beta.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/double_0.7beta.png rename to docs/reference/images/reducers_movavg/double_0.7beta.png diff --git a/docs/reference/search/aggregations/reducer/images/double_prediction_global.png b/docs/reference/images/reducers_movavg/double_prediction_global.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/double_prediction_global.png rename to docs/reference/images/reducers_movavg/double_prediction_global.png diff --git a/docs/reference/search/aggregations/reducer/images/double_prediction_local.png b/docs/reference/images/reducers_movavg/double_prediction_local.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/double_prediction_local.png rename to docs/reference/images/reducers_movavg/double_prediction_local.png diff --git a/docs/reference/search/aggregations/reducer/images/linear_100window.png b/docs/reference/images/reducers_movavg/linear_100window.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/linear_100window.png rename to docs/reference/images/reducers_movavg/linear_100window.png diff --git a/docs/reference/search/aggregations/reducer/images/linear_10window.png b/docs/reference/images/reducers_movavg/linear_10window.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/linear_10window.png rename to docs/reference/images/reducers_movavg/linear_10window.png diff --git a/docs/reference/search/aggregations/reducer/images/movavg_100window.png b/docs/reference/images/reducers_movavg/movavg_100window.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/movavg_100window.png rename to docs/reference/images/reducers_movavg/movavg_100window.png diff --git a/docs/reference/search/aggregations/reducer/images/movavg_10window.png b/docs/reference/images/reducers_movavg/movavg_10window.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/movavg_10window.png rename to docs/reference/images/reducers_movavg/movavg_10window.png diff --git a/docs/reference/search/aggregations/reducer/images/simple_prediction.png b/docs/reference/images/reducers_movavg/simple_prediction.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/simple_prediction.png rename to docs/reference/images/reducers_movavg/simple_prediction.png diff --git a/docs/reference/search/aggregations/reducer/images/single_0.2alpha.png b/docs/reference/images/reducers_movavg/single_0.2alpha.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/single_0.2alpha.png rename to docs/reference/images/reducers_movavg/single_0.2alpha.png diff --git a/docs/reference/search/aggregations/reducer/images/single_0.7alpha.png b/docs/reference/images/reducers_movavg/single_0.7alpha.png similarity index 100% rename from docs/reference/search/aggregations/reducer/images/single_0.7alpha.png rename to docs/reference/images/reducers_movavg/single_0.7alpha.png diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc index d460fd5e450..11b0826e9eb 100644 --- a/docs/reference/search/aggregations/reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-reducer]] -include::reducer/derivative.asciidoc[] +include::reducer/derivative-aggregation.asciidoc[] include::reducer/max-bucket-aggregation.asciidoc[] -include::reducer/movavg-reducer.asciidoc[] +include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc b/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc similarity index 95% rename from docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc rename to docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc index a01141f0fec..9b2f89ca43e 100644 --- a/docs/reference/search/aggregations/reducer/movavg-reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc @@ -132,14 +132,14 @@ track the data and only smooth out small scale fluctuations: [[movavg_10window]] .Moving average with window of size 10 -image::images/movavg_10window.png[] +image::images/reducers_movavg/movavg_10window.png[] In contrast, a `simple` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount: [[movavg_100window]] .Moving average with window of size 100 -image::images/movavg_100window.png[] +image::images/reducers_movavg/movavg_100window.png[] ==== Linear @@ -166,7 +166,7 @@ will closely track the data and only smooth out small scale fluctuations: [[linear_10window]] .Linear moving average with window of size 10 -image::images/linear_10window.png[] +image::images/reducers_movavg/linear_10window.png[] In contrast, a `linear` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount, @@ -174,7 +174,7 @@ although typically less than the `simple` model: [[linear_100window]] .Linear moving average with window of size 100 -image::images/linear_100window.png[] +image::images/reducers_movavg/linear_100window.png[] ==== Single Exponential @@ -204,11 +204,11 @@ The default value of `alpha` is `0.5`, and the setting accepts any float from 0- [[single_0.2alpha]] .Single Exponential moving average with window of size 10, alpha = 0.2 -image::images/single_0.2alpha.png[] +image::images/reducers_movavg/single_0.2alpha.png[] [[single_0.7alpha]] .Single Exponential moving average with window of size 10, alpha = 0.7 -image::images/single_0.7alpha.png[] +image::images/reducers_movavg/single_0.7alpha.png[] ==== Double Exponential @@ -247,11 +247,11 @@ values emphasize short-term trends. This will become more apparently when you a [[double_0.2beta]] .Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.2 -image::images/double_0.2beta.png[] +image::images/reducers_movavg/double_0.2beta.png[] [[double_0.7beta]] .Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7 -image::images/double_0.7beta.png[] +image::images/reducers_movavg/double_0.7beta.png[] === Prediction @@ -279,7 +279,7 @@ of the last value in the series, producing a flat: [[simple_prediction]] .Simple moving average with window of size 10, predict = 50 -image::images/simple_prediction.png[] +image::images/reducers_movavg/simple_prediction.png[] In contrast, the `double_exp` model can extrapolate based on local or global constant trends. If we set a high `beta` value, we can extrapolate based on local constant trends (in this case the predictions head down, because the data at the end @@ -287,11 +287,11 @@ of the series was heading in a downward direction): [[double_prediction_local]] .Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.8 -image::images/double_prediction_local.png[] +image::images/reducers_movavg/double_prediction_local.png[] In contrast, if we choose a small `beta`, the predictions are based on the global constant trend. In this series, the global trend is slightly positive, so the prediction makes a sharp u-turn and begins a positive slope: [[double_prediction_global]] .Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.1 -image::images/double_prediction_global.png[] \ No newline at end of file +image::images/reducers_movavg/double_prediction_global.png[] \ No newline at end of file From 79a1c38ed624ad8253688ebd064b56f00bf8f425 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Apr 2015 23:12:16 -0700 Subject: [PATCH 216/236] Mappings: Fix _field_names to not have doc values When doc values were turned on a by default, most meta fields had it explicitly disabled. However, _field_names was missed. This change forces doc values to be off always for _field_names and removes the unnecessary support when creating index fields. closes #10892 --- .../mapper/internal/FieldNamesFieldMapper.java | 5 +---- .../internal/FieldNamesFieldMapperTests.java | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 04e715f9942..2026579dc2f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -139,7 +139,7 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement } public FieldNamesFieldMapper(String name, String indexName, float boost, FieldType fieldType, EnabledAttributeMapper enabledState, @Nullable Settings fieldDataSettings, Settings indexSettings) { - super(new Names(name, indexName, indexName, name), boost, fieldType, null, Lucene.KEYWORD_ANALYZER, + super(new Names(name, indexName, indexName, name), boost, fieldType, false, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, fieldDataSettings, indexSettings); this.defaultFieldType = Defaults.FIELD_TYPE; this.pre13Index = Version.indexCreated(indexSettings).before(Version.V_1_3_0); @@ -240,9 +240,6 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { document.add(new Field(names().indexName(), fieldName, fieldType)); } - if (hasDocValues()) { - document.add(new SortedSetDocValuesField(names().indexName(), new BytesRef(fieldName))); - } } } } diff --git a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index dfd8705612e..fdf9d00938b 100644 --- a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.internal; +import org.apache.lucene.index.IndexOptions; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; @@ -62,6 +63,20 @@ public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest { assertEquals(set("", ".", ".."), extract("..")); } + public void testFieldType() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_field_names").field("store", "yes").endObject() + .endObject().endObject().string(); + + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class); + assertFalse(fieldNamesMapper.hasDocValues()); + assertEquals(IndexOptions.DOCS, fieldNamesMapper.fieldType().indexOptions()); + assertFalse(fieldNamesMapper.fieldType().tokenized()); + assertFalse(fieldNamesMapper.fieldType().stored()); + assertTrue(fieldNamesMapper.fieldType().omitNorms()); + } + public void testInjectIntoDocDuringParsing() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); From 2fd387d3784a40963b337303f9e814762f475aa1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Apr 2015 23:33:43 -0700 Subject: [PATCH 217/236] fix dumb test copy/paste mistake --- .../index/mapper/internal/FieldNamesFieldMapperTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index fdf9d00938b..de606371e1e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -65,7 +65,7 @@ public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest { public void testFieldType() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_field_names").field("store", "yes").endObject() + .startObject("_field_names").endObject() .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); From 77ac4528fb2cca4b631eaf144de6ae0567fb2201 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 29 Apr 2015 14:29:15 +0200 Subject: [PATCH 218/236] Transport: read/writeGenericValue to support BytesRef Add support for BytesRef to existing StreamInput#readGenericValue and StreamOutput#writeGenericValue Closes #10878 --- .../org/elasticsearch/common/io/stream/StreamInput.java | 3 ++- .../org/elasticsearch/common/io/stream/StreamOutput.java | 3 +++ .../common/io/streams/BytesStreamsTests.java | 9 ++++++--- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index fea34cd94c3..c529bc850ff 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.io.stream; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -413,6 +412,8 @@ public abstract class StreamInput extends InputStream { return readFloatArray(); case 20: return readDoubleArray(); + case 21: + return readBytesRef(); default: throw new IOException("Can't read unknown type [" + type + "]"); } diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 754e38ceadb..ad9af359fad 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -385,6 +385,9 @@ public abstract class StreamOutput extends OutputStream { } else if (type == double[].class) { writeByte((byte) 20); writeDoubleArray((double[]) value); + } else if (value instanceof BytesRef) { + writeByte((byte) 21); + writeBytesRef((BytesRef) value); } else { throw new IOException("Can't write type [" + type + "]"); } diff --git a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java index 9ee4c272580..54e2c735ed0 100644 --- a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java +++ b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.io.streams; import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Ignore; @@ -263,8 +264,8 @@ public class BytesStreamsTests extends ElasticsearchTestCase { assumeTrue("requires a 64-bit JRE ... ?!", Constants.JRE_IS_64BIT); BytesStreamOutput out = new BytesStreamOutput(); out.writeBoolean(false); - out.writeByte((byte)1); - out.writeShort((short)-1); + out.writeByte((byte) 1); + out.writeShort((short) -1); out.writeInt(-1); out.writeVInt(2); out.writeLong(-3); @@ -281,6 +282,7 @@ public class BytesStreamsTests extends ElasticsearchTestCase { out.writeGenericValue(doubleArray); out.writeString("hello"); out.writeString("goodbye"); + out.writeGenericValue(BytesRefs.toBytesRef("bytesref")); BytesStreamInput in = new BytesStreamInput(out.bytes().toBytes()); assertThat(in.readBoolean(), equalTo(false)); assertThat(in.readByte(), equalTo((byte)1)); @@ -291,12 +293,13 @@ public class BytesStreamsTests extends ElasticsearchTestCase { assertThat(in.readVLong(), equalTo((long)4)); assertThat((double)in.readFloat(), closeTo(1.1, 0.0001)); assertThat(in.readDouble(), closeTo(2.2, 0.0001)); - assertThat(in.readGenericValue(), equalTo((Object)intArray)); + assertThat(in.readGenericValue(), equalTo((Object) intArray)); assertThat(in.readGenericValue(), equalTo((Object)longArray)); assertThat(in.readGenericValue(), equalTo((Object)floatArray)); assertThat(in.readGenericValue(), equalTo((Object)doubleArray)); assertThat(in.readString(), equalTo("hello")); assertThat(in.readString(), equalTo("goodbye")); + assertThat(in.readGenericValue(), equalTo((Object)BytesRefs.toBytesRef("bytesref"))); in.close(); out.close(); } From d16bf992a92a53aba20b51150fc1a1bbd86c393c Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 30 Apr 2015 11:24:36 +0100 Subject: [PATCH 219/236] Aggregations: min_bucket aggregation An aggregation to calculate the minimum value in a set of buckets. Closes #9999 --- .../search/aggregations/reducer.asciidoc | 1 + .../reducer/min-bucket-aggregation.asciidoc | 82 ++++ .../aggregations/AggregationModule.java | 4 +- .../TransportAggregationModule.java | 4 +- .../reducers/ReducerBuilders.java | 7 +- .../{ => max}/MaxBucketBuilder.java | 2 +- .../{ => max}/MaxBucketParser.java | 2 +- .../{ => max}/MaxBucketReducer.java | 3 +- .../bucketmetrics/min/MinBucketBuilder.java | 59 +++ .../bucketmetrics/min/MinBucketParser.java | 97 ++++ .../bucketmetrics/min/MinBucketReducer.java | 151 ++++++ .../aggregations/reducers/MinBucketTests.java | 433 ++++++++++++++++++ 12 files changed, 839 insertions(+), 6 deletions(-) create mode 100644 docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc rename src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/{ => max}/MaxBucketBuilder.java (99%) rename src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/{ => max}/MaxBucketParser.java (99%) rename src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/{ => max}/MaxBucketReducer.java (98%) create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java create mode 100644 src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java create mode 100644 src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc index 11b0826e9eb..a725bc77e38 100644 --- a/docs/reference/search/aggregations/reducer.asciidoc +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -2,4 +2,5 @@ include::reducer/derivative-aggregation.asciidoc[] include::reducer/max-bucket-aggregation.asciidoc[] +include::reducer/min-bucket-aggregation.asciidoc[] include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..8eb69f9d683 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc @@ -0,0 +1,82 @@ +[[search-aggregations-reducer-min-bucket-aggregation]] +=== Max Bucket Aggregation + +A sibling reducer aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation +and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must +be a multi-bucket aggregation. + +The following snippet calculates the minimum of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "min_monthly_sales": { + "min_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the +`sales_per_month` date histogram. + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + } + } + ] + }, + "min_monthly_sales": { + "keys": ["2015/02/01 00:00:00"], <1> + "value": 60 + } + } +} +-------------------------------------------------- + +<1> `keys` is an array of strings since the minimum value may be present in multiple buckets + diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index 803b52bc0bf..2f6e929071f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -57,7 +57,8 @@ import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; import org.elasticsearch.search.aggregations.reducers.Reducer; -import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketParser; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelModule; @@ -107,6 +108,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ reducerParsers.add(DerivativeParser.class); reducerParsers.add(MaxBucketParser.class); + reducerParsers.add(MinBucketParser.class); reducerParsers.add(MovAvgParser.class); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index b0fe986a081..27f5cbcf0aa 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -61,7 +61,8 @@ import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketReducer; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer; import org.elasticsearch.search.aggregations.reducers.movavg.models.TransportMovAvgModelModule; @@ -118,6 +119,7 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM InternalSimpleValue.registerStreams(); InternalBucketMetricValue.registerStreams(); MaxBucketReducer.registerStreams(); + MinBucketReducer.registerStreams(); MovAvgReducer.registerStreams(); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java index ba6d3ebe7c2..d2632721c64 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -19,7 +19,8 @@ package org.elasticsearch.search.aggregations.reducers; -import org.elasticsearch.search.aggregations.reducers.bucketmetrics.MaxBucketBuilder; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketBuilder; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketBuilder; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgBuilder; @@ -36,6 +37,10 @@ public final class ReducerBuilders { return new MaxBucketBuilder(name); } + public static final MinBucketBuilder minBucket(String name) { + return new MinBucketBuilder(name); + } + public static final MovAvgBuilder movingAvg(String name) { return new MovAvgBuilder(name); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java similarity index 99% rename from src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java index 7fbcd54f789..31d588a6497 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.reducers.bucketmetrics; +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java similarity index 99% rename from src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java index 28fe0110238..c8f3bad49f1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.reducers.bucketmetrics; +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java similarity index 98% rename from src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java index 22bc30fd730..1d2d5c8d26c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/MaxBucketReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.reducers.bucketmetrics; +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,6 +32,7 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java new file mode 100644 index 00000000000..b792b7bbac9 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; + +import java.io.IOException; + +public class MinBucketBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + + public MinBucketBuilder(String name) { + super(name, MinBucketReducer.TYPE.name()); + } + + public MinBucketBuilder format(String format) { + this.format = format; + return this; + } + + public MinBucketBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MinBucketParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java new file mode 100644 index 00000000000..b956bdb6d79 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MinBucketParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + + @Override + public String type() { + return MinBucketReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new MinBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java new file mode 100644 index 00000000000..7ab257c9fb0 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class MinBucketReducer extends SiblingReducer { + + public final static Type TYPE = new Type("min_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MinBucketReducer readResult(StreamInput in) throws IOException { + MinBucketReducer result = new MinBucketReducer(); + result.readFrom(in); + return result; + } + }; + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private MinBucketReducer() { + } + + protected MinBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, metaData); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { + List minBucketKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); + for (Aggregation aggregation : aggregations) { + if (aggregation.getName().equals(bucketsPath.get(0))) { + bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + for (int i = 0; i < buckets.size(); i++) { + Bucket bucket = buckets.get(i); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); + if (bucketValue != null) { + if (bucketValue < minValue) { + minBucketKeys.clear(); + minBucketKeys.add(bucket.getKeyAsString()); + minValue = bucketValue; + } else if (bucketValue.equals(minValue)) { + minBucketKeys.add(bucket.getKeyAsString()); + } + } + } + } + } + String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]); + return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.EMPTY_LIST, metaData()); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private final GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new MinBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java new file mode 100644 index 00000000000..b755159526d --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java @@ -0,0 +1,433 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.minBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MinBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void testDocCount_topLevel() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + if (bucket.getDocCount() < minValue) { + minValue = bucket.getDocCount(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() < minValue) { + minValue = bucket.getDocCount(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(minBucketValue.keys(), equalTo(new String[0])); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(minBucket("min_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(minBucket("min_terms_bucket").setBucketsPaths("terms>min_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + List minTermsKeys = new ArrayList<>(); + double minTermsValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minHistoKeys = new ArrayList<>(); + double minHistoValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() < minHistoValue) { + minHistoValue = bucket.getDocCount(); + minHistoKeys = new ArrayList<>(); + minHistoKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minHistoValue) { + minHistoKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_histo_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_histo_bucket")); + assertThat(minBucketValue.value(), equalTo(minHistoValue)); + assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[minHistoKeys.size()]))); + if (minHistoValue < minTermsValue) { + minTermsValue = minHistoValue; + minTermsKeys = new ArrayList<>(); + minTermsKeys.add(termsBucket.getKeyAsString()); + } else if (minHistoValue == minTermsValue) { + minTermsKeys.add(termsBucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_terms_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_terms_bucket")); + assertThat(minBucketValue.value(), equalTo(minTermsValue)); + assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[minTermsKeys.size()]))); + } +} From 969f53e399c4b941bac894f28e152f0488ebc1d7 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 30 Apr 2015 14:41:01 +0100 Subject: [PATCH 220/236] fix typo in Min bucket aggregation docs --- .../search/aggregations/reducer/min-bucket-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc index 8eb69f9d683..558d0c19983 100644 --- a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc +++ b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc @@ -1,5 +1,5 @@ [[search-aggregations-reducer-min-bucket-aggregation]] -=== Max Bucket Aggregation +=== Min Bucket Aggregation A sibling reducer aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must From e5be85d586a807b438d9e034f836dcf0f3d7fe2c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 30 Apr 2015 14:55:34 +0200 Subject: [PATCH 221/236] Aggs: Change the default `min_doc_count` to 0 on histograms. The assumption is that gaps in histogram are generally undesirable, for instance if you want to build a visualization from it. Additionally, we are building new aggregations that require that there are no gaps to work correctly (eg. derivatives). --- docs/reference/migration/migrate_2_0.asciidoc | 2 + .../bucket/datehistogram-aggregation.asciidoc | 8 +- .../bucket/histogram-aggregation.asciidoc | 85 +++---------------- .../reducer/derivative-aggregation.asciidoc | 6 +- .../reducer/movavg-aggregation.asciidoc | 17 ++-- .../bucket/histogram/DateHistogramParser.java | 2 +- .../bucket/histogram/HistogramParser.java | 2 +- .../bucket/DateHistogramTests.java | 2 +- .../reducers/DerivativeTests.java | 22 ++--- .../aggregations/reducers/MaxBucketTests.java | 10 +-- .../aggregations/reducers/MinBucketTests.java | 10 +-- .../reducers/moving/avg/MovAvgTests.java | 32 +++---- 12 files changed, 67 insertions(+), 131 deletions(-) diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 58282f5a0f2..ff8befce427 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -139,6 +139,8 @@ equivalent to the former `pre_zone` option. Setting `time_zone` to a value like being applied in the specified time zone but In addition to this, also the `pre_zone_adjust_large_interval` is removed because we now always return dates and bucket keys in UTC. +Both the `histogram` and `date_histogram` aggregations now have a default `min_doc_count` of `0` instead of `1` previously. + `include`/`exclude` filtering on the `terms` aggregation now uses the same syntax as regexp queries instead of the Java syntax. While simple regexps should still work, more complex ones might need some rewriting. Also, the `flags` parameter is not supported anymore. diff --git a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc index 999a933f91d..256ef62d766 100644 --- a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -119,7 +119,7 @@ Response: Like with the normal <>, both document level scripts and value level scripts are supported. It is also possible to control the order of the returned buckets using the `order` -settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets with -`min_doc_count > 0` will be returned). This histogram also supports the `extended_bounds` setting, which enables extending -the bounds of the histogram beyond the data itself (to read more on why you'd want to do that please refer to the -explanation <>). +settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets between the first +bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds` +setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to +do that please refer to the explanation <>). diff --git a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc index 545bc24c767..cd1fd06ddaf 100644 --- a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc @@ -50,6 +50,10 @@ And the following may be the response: "key": 50, "doc_count": 4 }, + { + "key": 100, + "doc_count": 0 + }, { "key": 150, "doc_count": 3 @@ -60,10 +64,11 @@ And the following may be the response: } -------------------------------------------------- -The response above shows that none of the aggregated products has a price that falls within the range of `[100 - 150)`. -By default, the response will only contain those buckets with a `doc_count` greater than 0. It is possible change that -and request buckets with either a higher minimum count or even 0 (in which case elasticsearch will "fill in the gaps" -and create buckets with zero documents). This can be configured using the `min_doc_count` setting: +==== Minimum document count + +The response above show that no documents has a price that falls within the range of `[100 - 150)`. By default the +response will fill gaps in the histogram with empty buckets. It is possible change that and request buckets with +a higher minimum count thanks to the `min_doc_count` setting: [source,js] -------------------------------------------------- @@ -73,7 +78,7 @@ and create buckets with zero documents). This can be configured using the `min_d "histogram" : { "field" : "price", "interval" : 50, - "min_doc_count" : 0 + "min_doc_count" : 1 } } } @@ -96,10 +101,6 @@ Response: "key": 50, "doc_count": 4 }, - { - "key" : 100, - "doc_count" : 0 <1> - }, { "key": 150, "doc_count": 3 @@ -110,13 +111,11 @@ Response: } -------------------------------------------------- -<1> No documents were found that belong in this bucket, yet it is still returned with zero `doc_count`. - [[search-aggregations-bucket-histogram-aggregation-extended-bounds]] By default the date_/histogram returns all the buckets within the range of the data itself, that is, the documents with the smallest values (on which with histogram) will determine the min bucket (the bucket with the smallest key) and the documents with the highest values will determine the max bucket (the bucket with the highest key). Often, when when -requesting empty buckets (`"min_doc_count" : 0`), this causes a confusion, specifically, when the data is also filtered. +requesting empty buckets, this causes a confusion, specifically, when the data is also filtered. To understand why, let's look at an example: @@ -149,7 +148,6 @@ Example: "histogram" : { "field" : "price", "interval" : 50, - "min_doc_count" : 0, "extended_bounds" : { "min" : 0, "max" : 500 @@ -265,67 +263,6 @@ PATH := []*[ "date_histogram":{ "field":"timestamp", - "interval":"day", - "min_doc_count": 0 <2> + "interval":"day" }, "aggs":{ "the_sum":{ - "sum":{ "field": "lemmings" } <3> + "sum":{ "field": "lemmings" } <2> }, "the_movavg":{ - "moving_avg":{ "buckets_path": "the_sum" } <4> + "moving_avg":{ "buckets_path": "the_sum" } <3> } } } } -------------------------------------------------- <1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals -<2> We must specify "min_doc_count: 0" in our date histogram that all buckets are returned, even if they are empty. -<3> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) -<4> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as its input. +<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) +<3> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as its input. Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. @@ -85,8 +83,7 @@ A moving average can also be calculated on the document count of each bucket, in "my_date_histo":{ "date_histogram":{ "field":"timestamp", - "interval":"day", - "min_doc_count": 0 + "interval":"day" }, "aggs":{ "the_movavg":{ @@ -294,4 +291,4 @@ global trend is slightly positive, so the prediction makes a sharp u-turn and be [[double_prediction_global]] .Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.1 -image::images/reducers_movavg/double_prediction_global.png[] \ No newline at end of file +image::images/reducers_movavg/double_prediction_global.png[] diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index 6f316d901db..e2ce1cc4b09 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -86,7 +86,7 @@ public class DateHistogramParser implements Aggregator.Parser { .build(); boolean keyed = false; - long minDocCount = 1; + long minDocCount = 0; ExtendedBounds extendedBounds = null; InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; String interval = null; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java index c9c885be3f5..ace6e6711c4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java @@ -52,7 +52,7 @@ public class HistogramParser implements Aggregator.Parser { .build(); boolean keyed = false; - long minDocCount = 1; + long minDocCount = 0; InternalOrder order = (InternalOrder) InternalOrder.KEY_ASC; long interval = -1; ExtendedBounds extendedBounds = null; diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index b9f7e3e511d..d3114d20283 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -170,7 +170,7 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { @Test public void singleValuedField_WithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).timeZone("+01:00")).execute() + .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(1).timeZone("+01:00")).execute() .actionGet(); DateTimeZone tz = DateTimeZone.forID("+01:00"); assertSearchResponse(response); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 0974d297d46..0135f72b4be 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -167,7 +167,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .subAggregation(derivative("deriv").setBucketsPaths("_count")) .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); @@ -204,7 +204,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); @@ -250,7 +250,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("stats.sum"))).execute().actionGet(); @@ -296,7 +296,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx_unmapped") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -312,7 +312,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx", "idx_unmapped") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertSearchResponse(response); @@ -342,7 +342,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); @@ -371,7 +371,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx_rnd") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(randomFrom(GapPolicy.values())))) .execute().actionGet(); @@ -402,7 +402,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() .actionGet(); @@ -432,7 +432,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); @@ -474,7 +474,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() .actionGet(); @@ -514,7 +514,7 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { .prepareSearch("empty_bucket_idx_rnd") .setQuery(matchAllQuery()) .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(gapPolicy))).execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java index 84e559e4970..4b91c92fccf 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java @@ -94,7 +94,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { @Test public void testDocCount_topLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .addAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); @@ -138,7 +138,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); @@ -232,7 +232,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); @@ -291,7 +291,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) @@ -370,7 +370,7 @@ public class MaxBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .subAggregation(maxBucket("max_histo_bucket").setBucketsPaths("histo>_count"))) .addAggregation(maxBucket("max_terms_bucket").setBucketsPaths("terms>max_histo_bucket")).execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java index b755159526d..c34ba920da9 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java @@ -94,7 +94,7 @@ public class MinBucketTests extends ElasticsearchIntegrationTest { @Test public void testDocCount_topLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .addAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); @@ -138,7 +138,7 @@ public class MinBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); @@ -232,7 +232,7 @@ public class MinBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); @@ -291,7 +291,7 @@ public class MinBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) @@ -370,7 +370,7 @@ public class MinBucketTests extends ElasticsearchIntegrationTest { .field("tag") .order(Order.term(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) .extendedBounds((long) minRandomValue, (long) maxRandomValue)) .subAggregation(minBucket("min_histo_bucket").setBucketsPaths("histo>_count"))) .addAggregation(minBucket("min_terms_bucket").setBucketsPaths("terms>min_histo_bucket")).execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index 069f9904a3f..77b7c8bc208 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -314,7 +314,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") @@ -367,7 +367,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") @@ -420,7 +420,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") @@ -473,7 +473,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") @@ -525,7 +525,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -568,7 +568,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -592,7 +592,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field("test").interval(interval).minDocCount(0) + histogram("histo").field("test").interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -617,7 +617,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field("test").interval(interval).minDocCount(0) + histogram("histo").field("test").interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -643,7 +643,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -666,7 +666,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { client() .prepareSearch("idx").setTypes("type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval).minDocCount(0) + histogram("histo").field(INTERVAL_FIELD).interval(interval) .extendedBounds(0L, (long) (interval * (numBuckets - 1))) .subAggregation(randomMetric("the_metric", VALUE_FIELD)) .subAggregation(movingAvg("movavg_counts") @@ -695,7 +695,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("gap_type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(min("the_metric").field(GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) @@ -754,7 +754,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { SearchResponse response = client() .prepareSearch("idx").setTypes("gap_type") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(min("the_metric").field(GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) @@ -822,7 +822,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .prepareSearch("idx").setTypes("gap_type") .addAggregation( filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) @@ -865,7 +865,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .prepareSearch("idx").setTypes("gap_type") .addAggregation( filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) @@ -921,7 +921,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .prepareSearch("idx").setTypes("gap_type") .addAggregation( filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) @@ -968,7 +968,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .prepareSearch("idx").setTypes("gap_type") .addAggregation( filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(1).minDocCount(0).extendedBounds(0L, 49L) + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) .subAggregation(randomMetric("the_metric", GAP_FIELD)) .subAggregation(movingAvg("movavg_values") .window(windowSize) From 7a6fe809d0e046018659cbccc1ebbe4b042e1671 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Sun, 19 Apr 2015 23:52:29 +0200 Subject: [PATCH 222/236] inner_hits: Don't use bitset cache for children filters. Only parent filters should use bitset filter cache, to avoid memory being wasted. Also in case of object fields inline the field name into the nested object, instead of creating an additional (dummy) nested identity. Closes #10662 Closes #10629 --- .../index/mapper/DocumentMapper.java | 46 +++++++-------- .../search/fetch/FetchPhase.java | 57 ++++++++++++------- .../search/innerhits/InnerHitsTests.java | 44 ++++++++++++-- 3 files changed, 96 insertions(+), 51 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index ceab117683e..b267bf3978c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -24,8 +24,9 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.lucene.document.Field; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.util.BitDocIdSet; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -41,21 +42,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.Mapping.SourceTransform; -import org.elasticsearch.index.mapper.internal.AllFieldMapper; -import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.internal.IdFieldMapper; -import org.elasticsearch.index.mapper.internal.IndexFieldMapper; -import org.elasticsearch.index.mapper.internal.ParentFieldMapper; -import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; -import org.elasticsearch.index.mapper.internal.SizeFieldMapper; -import org.elasticsearch.index.mapper.internal.SourceFieldMapper; -import org.elasticsearch.index.mapper.internal.TTLFieldMapper; -import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; -import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.mapper.internal.VersionFieldMapper; +import org.elasticsearch.index.mapper.internal.*; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.script.ExecutableScript; @@ -63,14 +51,10 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -352,15 +336,29 @@ public class DocumentMapper implements ToXContent { /** * Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId. */ - public ObjectMapper findNestedObjectMapper(int nestedDocId, BitsetFilterCache cache, LeafReaderContext context) throws IOException { + public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException { ObjectMapper nestedObjectMapper = null; for (ObjectMapper objectMapper : objectMappers().values()) { if (!objectMapper.nested().isNested()) { continue; } - BitDocIdSet nestedTypeBitSet = cache.getBitDocIdSetFilter(objectMapper.nestedTypeFilter()).getDocIdSet(context); - if (nestedTypeBitSet != null && nestedTypeBitSet.bits().get(nestedDocId)) { + Filter filter = sc.filterCache().cache(objectMapper.nestedTypeFilter(), null, sc.queryParserService().autoFilterCachePolicy()); + if (filter == null) { + continue; + } + // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and + // therefor is guaranteed to be a live doc. + DocIdSet nestedTypeSet = filter.getDocIdSet(context, null); + if (nestedTypeSet == null) { + continue; + } + DocIdSetIterator iterator = nestedTypeSet.iterator(); + if (iterator == null) { + continue; + } + + if (iterator.advance(nestedDocId) == nestedDocId) { if (nestedObjectMapper == null) { nestedObjectMapper = objectMapper; } else { diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 1e072a4916d..5eef114e5cb 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -21,9 +21,9 @@ package org.elasticsearch.search.fetch; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; @@ -67,12 +67,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder; @@ -288,7 +283,7 @@ public class FetchPhase implements SearchPhase { SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); - ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context.bitsetFilterCache(), subReaderContext); + ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext); assert nestedObjectMapper != null; InternalSearchHit.InternalNestedIdentity nestedIdentity = getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, documentMapper, nestedObjectMapper); @@ -375,38 +370,56 @@ public class FetchPhase implements SearchPhase { private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; + StringBuilder field = new StringBuilder(); + ObjectMapper current = nestedObjectMapper; InternalSearchHit.InternalNestedIdentity nestedIdentity = null; do { - String field; Filter parentFilter; - nestedParentObjectMapper = documentMapper.findParentObjectMapper(nestedObjectMapper); + nestedParentObjectMapper = documentMapper.findParentObjectMapper(current); + if (field.length() != 0) { + field.insert(0, '.'); + } + field.insert(0, current.name()); if (nestedParentObjectMapper != null) { - field = nestedObjectMapper.name(); - if (!nestedParentObjectMapper.nested().isNested()) { - nestedObjectMapper = nestedParentObjectMapper; - // all right, the parent is a normal object field, so this is the best identiy we can give for that: - nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field, 0, nestedIdentity); + if (nestedParentObjectMapper.nested().isNested() == false) { + current = nestedParentObjectMapper; continue; } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { - field = nestedObjectMapper.fullPath(); parentFilter = Queries.newNonNestedFilter(); } + Filter childFilter = context.filterCache().cache(nestedObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + if (childFilter == null) { + current = nestedParentObjectMapper; + continue; + } + // We can pass down 'null' as acceptedDocs, because we're fetching matched docId that matched in the query phase. + DocIdSet childDocSet = childFilter.getDocIdSet(subReaderContext, null); + if (childDocSet == null) { + current = nestedParentObjectMapper; + continue; + } + DocIdSetIterator childIter = childDocSet.iterator(); + if (childIter == null) { + current = nestedParentObjectMapper; + continue; + } + BitDocIdSet parentBitSet = context.bitsetFilterCache().getBitDocIdSetFilter(parentFilter).getDocIdSet(subReaderContext); BitSet parentBits = parentBitSet.bits(); + int offset = 0; - BitDocIdSet nestedDocsBitSet = context.bitsetFilterCache().getBitDocIdSetFilter(nestedObjectMapper.nestedTypeFilter()).getDocIdSet(subReaderContext); - BitSet nestedBits = nestedDocsBitSet.bits(); int nextParent = parentBits.nextSetBit(currentParent); - for (int docId = nestedBits.nextSetBit(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = nestedBits.nextSetBit(docId + 1)) { + for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) { offset++; } currentParent = nextParent; - nestedObjectMapper = nestedParentObjectMapper; - nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field, offset, nestedIdentity); - } while (nestedParentObjectMapper != null); + current = nestedObjectMapper = nestedParentObjectMapper; + nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field.toString(), offset, nestedIdentity); + field = new StringBuilder(); + } while (current != null); return nestedIdentity; } diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 428d76880b3..8792b2cb6a8 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -867,7 +867,12 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { List requests = new ArrayList<>(); requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() .field("title", "quick brown fox") - .startObject("comments").startObject("messages").field("message", "fox eat quick").endObject().endObject() + .startObject("comments") + .startArray("messages") + .startObject().field("message", "fox eat quick").endObject() + .startObject().field("message", "bear eat quick").endObject() + .endArray() + .endObject() .endObject())); indexRandom(true, requests); @@ -879,11 +884,40 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { assertThat(response.getHits().getAt(0).id(), equalTo("1")); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("messages")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getChild(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); + + response = client().prepareSearch("articles") + .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear")).innerHit(new QueryInnerHitBuilder())) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); + + // index the message in an object form instead of an array + requests = new ArrayList<>(); + requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() + .field("title", "quick brown fox") + .startObject("comments").startObject("messages").field("message", "fox eat quick").endObject().endObject() + .endObject())); + indexRandom(true, requests); + response = client().prepareSearch("articles") + .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox")).innerHit(new QueryInnerHitBuilder())) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); } } From 12c19508fb55e6854b6499de45e4ef9a1fa89b04 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 30 Apr 2015 19:14:56 +0200 Subject: [PATCH 223/236] Tests: Fix test bug in aggregations' EquivalenceTests due to the change of the default min_doc_count. --- .../elasticsearch/search/aggregations/EquivalenceTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java b/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java index 5079e6730dd..d318ed5ca7b 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -303,7 +302,7 @@ public class EquivalenceTests extends ElasticsearchIntegrationTest { SearchResponse resp = client().prepareSearch("idx") .addAggregation(terms("terms").field("values").collectMode(randomFrom(SubAggCollectionMode.values())).script("floor(_value / interval)").param("interval", interval).size(maxNumTerms)) - .addAggregation(histogram("histo").field("values").interval(interval)) + .addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)) .execute().actionGet(); assertSearchResponse(resp); From a0451a37cc90306be955015b974fd5f8e7cedcf6 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 Apr 2015 13:36:36 -0700 Subject: [PATCH 224/236] Upgrade lucene snapshot to r1677039 --- pom.xml | 2 +- .../elasticsearch/search/child/SimpleChildQuerySearchTests.java | 1 + .../search/scriptfilter/ScriptFilterSearchTests.java | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fa3d5813d58..a3611822d9a 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ 5.2.0 - 1675927 + 1677039 5.2.0-snapshot-${lucene.snapshot.revision} 2.1.14 auto diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index c8cc0921bee..44b57045965 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -1863,6 +1863,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception { assertAcked(prepareCreate("test") .setSettings(builder().put(indexSettings()) diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java index 2e83e4ef3ba..1f071e95bb5 100644 --- a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java @@ -118,6 +118,7 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") public void testCustomScriptCache() throws Exception { assertAcked(prepareCreate("test").setSettings( ImmutableSettings.settingsBuilder() From 4ef9f3ca63264b1056caedfb5c4119ba594d9edb Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Apr 2015 00:29:32 -0700 Subject: [PATCH 225/236] Mappings: Remove file based default mappings Using files that must be specified on each node is an anti-pattern from the API based goal of ES. This change removes the ability to specify the default mapping with a file on each node. closes #10620 --- docs/reference/index-modules/mapper.asciidoc | 9 +- docs/reference/mapping.asciidoc | 2 - docs/reference/mapping/conf-mappings.asciidoc | 19 --- .../mapping/dynamic-mapping.asciidoc | 8 +- docs/reference/migration/migrate_2_0.asciidoc | 8 ++ .../index/mapper/MapperService.java | 120 +++--------------- 6 files changed, 33 insertions(+), 133 deletions(-) delete mode 100644 docs/reference/mapping/conf-mappings.asciidoc diff --git a/docs/reference/index-modules/mapper.asciidoc b/docs/reference/index-modules/mapper.asciidoc index 2bbca6c095d..baca199efae 100644 --- a/docs/reference/index-modules/mapper.asciidoc +++ b/docs/reference/index-modules/mapper.asciidoc @@ -32,7 +32,7 @@ mapping specified in the <> or `_default_` mapping. The default mapping definition is a plain mapping definition that is -embedded within ElasticSearch: +embedded within Elasticsearch: [source,js] -------------------------------------------------- @@ -46,11 +46,8 @@ Pretty short, isn't it? Basically, everything is `_default_`ed, including the dynamic nature of the root object mapping which allows new fields to be added automatically. -The built-in default mapping definition can be overridden in several ways. A -`_default_` mapping can be specified when creating a new index, or the global -`_default_` mapping (for all indices) can be configured by creating a file -called `config/default-mapping.json`. (This location can be changed with -the `index.mapper.default_mapping_location` setting.) +The default mapping can be overridden by specifying the `_default_` type when +creating a new index. Dynamic creation of mappings for unmapped types can be completely disabled by setting `index.mapper.dynamic` to `false`. diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 945d5a49ffc..7e11fe658a2 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -71,8 +71,6 @@ include::mapping/date-format.asciidoc[] include::mapping/dynamic-mapping.asciidoc[] -include::mapping/conf-mappings.asciidoc[] - include::mapping/meta.asciidoc[] include::mapping/transform.asciidoc[] diff --git a/docs/reference/mapping/conf-mappings.asciidoc b/docs/reference/mapping/conf-mappings.asciidoc deleted file mode 100644 index e9bb3f91f93..00000000000 --- a/docs/reference/mapping/conf-mappings.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[mapping-conf-mappings]] -== Config Mappings - -Creating new mappings can be done using the -<> -API. When a document is indexed with no mapping associated with it in -the specific index, the -<> feature will kick in and automatically create mapping -definition for it. - -Mappings can also be provided on the node level, meaning that each index -created will automatically be started with all the mappings defined -within a certain location. - -Mappings can be defined within files called `[mapping_name].json` and be -placed either under `config/mappings/_default` location, or under -`config/mappings/[index_name]` (for mappings that should be associated -only with a specific index). diff --git a/docs/reference/mapping/dynamic-mapping.asciidoc b/docs/reference/mapping/dynamic-mapping.asciidoc index abcfbc650e1..91ecd6b0c2d 100644 --- a/docs/reference/mapping/dynamic-mapping.asciidoc +++ b/docs/reference/mapping/dynamic-mapping.asciidoc @@ -21,12 +21,8 @@ embedded within the distribution: -------------------------------------------------- Pretty short, isn't it? Basically, everything is defaulted, especially the -dynamic nature of the root object mapping. The default mapping -definition can be overridden in several manners. The simplest manner is -to simply define a file called `default-mapping.json` and to place it -under the `config` directory (which can be configured to exist in a -different location). It can also be explicitly set using the -`index.mapper.default_mapping_location` setting. +dynamic nature of the root object mapping. The default mapping can be +overridden by specifying the `_default_` type when creating a new index. The dynamic creation of mappings for unmapped types can be completely disabled by setting `index.mapper.dynamic` to `false`. diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index ff8befce427..5435f4df2fd 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -312,6 +312,14 @@ They are always stored with doc values, and not indexed. The `_source` field no longer supports `includes` and `excludes` paramters. When `_source` is enabled, the entire original source will be stored. +==== Config based mappings +The ability to specify mappings in configuration files has been removed. To specify +default mappings that apply to multiple indexes, use index templates. + +The following settings are no longer valid: +* `index.mapper.default_mapping_location` +* `index.mapper.default_percolator_mapping_location` + === Codecs It is no longer possible to specify per-field postings and doc values formats diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 58f6bb271d7..7d242c953c0 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.google.common.base.Charsets; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.IndexOptions; @@ -44,14 +42,9 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.FailedToResolveConfigException; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; @@ -67,8 +60,6 @@ import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.script.ScriptService; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -122,7 +113,7 @@ public class MapperService extends AbstractIndexComponent { private volatile ImmutableMap> unmappedFieldMappers = ImmutableMap.of(); @Inject - public MapperService(Index index, @IndexSettings Settings indexSettings, Environment environment, AnalysisService analysisService, IndexFieldDataService fieldDataService, + public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService, IndexFieldDataService fieldDataService, SimilarityLookupService similarityLookupService, ScriptService scriptService) { super(index, indexSettings); @@ -134,107 +125,36 @@ public class MapperService extends AbstractIndexComponent { this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer()); this.dynamic = indexSettings.getAsBoolean("index.mapper.dynamic", true); - String defaultMappingLocation = indexSettings.get("index.mapper.default_mapping_location"); - final URL defaultMappingUrl; + defaultPercolatorMappingSource = "{\n" + + "\"_default_\":{\n" + + "\"properties\" : {\n" + + "\"query\" : {\n" + + "\"type\" : \"object\",\n" + + "\"enabled\" : false\n" + + "}\n" + + "}\n" + + "}\n" + + "}"; if (index.getName().equals(ScriptService.SCRIPT_INDEX)){ - defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "script-mapping.json", "org/elasticsearch/index/mapper/script-mapping.json"); - } else { - defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "default-mapping.json", "org/elasticsearch/index/mapper/default-mapping.json"); - } - - if (defaultMappingUrl == null) { - logger.info("failed to find default-mapping.json in the classpath, using the default template"); - if (index.getName().equals(ScriptService.SCRIPT_INDEX)){ - defaultMappingSource = "{" + - "\"_default_\": {" + - "\"properties\": {" + + defaultMappingSource = "{" + + "\"_default_\": {" + + "\"properties\": {" + "\"script\": { \"enabled\": false }," + "\"template\": { \"enabled\": false }" + - "}" + - "}" + - "}"; - } else { - defaultMappingSource = "{\n" + - " \"_default_\":{\n" + - " }\n" + - "}"; - } + "}" + + "}" + + "}"; } else { - try { - defaultMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(defaultMappingUrl, Charsets.UTF_8)); - } catch (IOException e) { - throw new MapperException("Failed to load default mapping source from [" + defaultMappingLocation + "]", e); - } - } - - String percolatorMappingLocation = indexSettings.get("index.mapper.default_percolator_mapping_location"); - URL percolatorMappingUrl = null; - if (percolatorMappingLocation != null) { - try { - percolatorMappingUrl = environment.resolveConfig(percolatorMappingLocation); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - try { - percolatorMappingUrl = PathUtils.get(percolatorMappingLocation).toUri().toURL(); - } catch (MalformedURLException e1) { - throw new FailedToResolveConfigException("Failed to resolve default percolator mapping location [" + percolatorMappingLocation + "]"); - } - } - } - if (percolatorMappingUrl != null) { - try { - defaultPercolatorMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(percolatorMappingUrl, Charsets.UTF_8)); - } catch (IOException e) { - throw new MapperException("Failed to load default percolator mapping source from [" + percolatorMappingUrl + "]", e); - } - } else { - defaultPercolatorMappingSource = "{\n" + - //" \"" + PercolatorService.TYPE_NAME + "\":{\n" + - " \"" + "_default_" + "\":{\n" + - " \"properties\" : {\n" + - " \"query\" : {\n" + - " \"type\" : \"object\",\n" + - " \"enabled\" : false\n" + - " }\n" + - " }\n" + - " }\n" + - "}"; + defaultMappingSource = "{\"_default_\":{}}"; } if (logger.isTraceEnabled()) { - logger.trace("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}] and source[{}], default percolator mapping: location[{}], loaded_from[{}] and source[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, defaultMappingSource, percolatorMappingLocation, percolatorMappingUrl, defaultPercolatorMappingSource); + logger.trace("using dynamic[{}], default mapping source[{}], default percolator mapping source[{}]", dynamic, defaultMappingSource, defaultPercolatorMappingSource); } else if (logger.isDebugEnabled()) { - logger.debug("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}], default percolator mapping: location[{}], loaded_from[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, percolatorMappingLocation, percolatorMappingUrl); + logger.debug("using dynamic[{}]", dynamic); } } - private URL getMappingUrl(Settings indexSettings, Environment environment, String mappingLocation, String configString, String resourceLocation) { - URL mappingUrl; - if (mappingLocation == null) { - try { - mappingUrl = environment.resolveConfig(configString); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - mappingUrl = indexSettings.getClassLoader().getResource(resourceLocation); - if (mappingUrl == null) { - mappingUrl = MapperService.class.getClassLoader().getResource(resourceLocation); - } - } - } else { - try { - mappingUrl = environment.resolveConfig(mappingLocation); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - try { - mappingUrl = PathUtils.get(mappingLocation).toUri().toURL(); - } catch (MalformedURLException e1) { - throw new FailedToResolveConfigException("Failed to resolve dynamic mapping location [" + mappingLocation + "]"); - } - } - } - return mappingUrl; - } - public void close() { for (DocumentMapper documentMapper : mappers.values()) { documentMapper.close(); From d2b12e4fc2c0f4347972784bdde50b05e779a56b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 30 Apr 2015 13:57:55 -0700 Subject: [PATCH 226/236] Mappings: Remove docs for type level analyzer defaults These settings were removed in #9430. --- .../mapping/types/root-object-type.asciidoc | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/docs/reference/mapping/types/root-object-type.asciidoc b/docs/reference/mapping/types/root-object-type.asciidoc index fe16264b73f..a8e8ea000cd 100644 --- a/docs/reference/mapping/types/root-object-type.asciidoc +++ b/docs/reference/mapping/types/root-object-type.asciidoc @@ -16,27 +16,6 @@ specifying the `tweet` type in the document itself: } -------------------------------------------------- -[float] -==== Index / Search Analyzers - -The root object allows to define type mapping level analyzers for index -and search that will be used with all different fields that do not -explicitly set analyzers on their own. Here is an example: - -[source,js] --------------------------------------------------- -{ - "tweet" : { - "analyzer" : "standard", - "search_analyzer" : "standard_with_synonyms" - } -} --------------------------------------------------- - -The above simply explicitly defines both the `analyzer` and -`search_analyzer` that will be used. If `search_analyzer` is not specified, -it defaults to the value of `analyzer`. - [float] ==== dynamic_date_formats From aa968f6b650d8836831e8f7546a7ba1a987a76cc Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 28 Apr 2015 17:43:02 -0700 Subject: [PATCH 227/236] Scripting: Add Field Methods Added infrastructure to allow basic member methods in the expressions language to be called. The methods must have a signature with no arguments. Also added the following member methods for date fields (and it should be easy to add more) * getYear * getMonth * getDayOfMonth * getHourOfDay * getMinutes * getSeconds Allow fields to be accessed without using the member variable [value]. (Note that both ways can be used to access fields for back-compat.) closes #10890 --- docs/reference/modules/scripting.asciidoc | 14 ++++ .../expression/DateMethodFunctionValues.java | 46 +++++++++++ .../expression/DateMethodValueSource.java | 80 +++++++++++++++++++ .../ExpressionScriptEngineService.java | 63 +++++++++++++-- .../expression/FieldDataValueSource.java | 17 ++-- .../expression/ExpressionScriptTests.java | 64 +++++++++++++-- 6 files changed, 268 insertions(+), 16 deletions(-) create mode 100644 src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java create mode 100644 src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 5d198520e87..750802c4ec2 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -389,9 +389,23 @@ for details on what operators and functions are available. Variables in `expression` scripts are available to access: * Single valued document fields, e.g. `doc['myfield'].value` +* Single valued document fields can also be accessed without `.value` e.g. `doc['myfield']` * Parameters passed into the script, e.g. `mymodifier` * The current document's score, `_score` (only available when used in a `script_score`) +Variables in `expression` scripts that are of type `date` may use the following member methods: + +* getYear() +* getMonth() +* getDayOfMonth() +* getHourOfDay() +* getMinutes() +* getSeconds() + +The following example shows the difference in years between the `date` fields date0 and date1: + +`doc['date1'].getYear() - doc['date0'].getYear()` + There are a few limitations relative to other script languages: * Only numeric fields may be accessed diff --git a/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java new file mode 100644 index 00000000000..64eed0741bc --- /dev/null +++ b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.expression; + +import java.util.Calendar; +import java.util.Locale; +import java.util.TimeZone; + +import org.apache.lucene.queries.function.ValueSource; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; + +class DateMethodFunctionValues extends FieldDataFunctionValues { + private final int calendarType; + private final Calendar calendar; + + DateMethodFunctionValues(ValueSource parent, AtomicNumericFieldData data, int calendarType) { + super(parent, data); + + this.calendarType = calendarType; + calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"), Locale.ROOT); + } + + @Override + public double doubleVal(int docId) { + long millis = (long)dataAccessor.get(docId); + calendar.setTimeInMillis(millis); + return calendar.get(calendarType); + } +} diff --git a/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java b/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java new file mode 100644 index 00000000000..a157790e2bb --- /dev/null +++ b/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.expression; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.queries.function.FunctionValues; + +import org.elasticsearch.index.fielddata.AtomicFieldData; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; + +class DateMethodValueSource extends FieldDataValueSource { + + protected final String methodName; + protected final int calendarType; + + DateMethodValueSource(IndexFieldData indexFieldData, String methodName, int calendarType) { + super(indexFieldData); + + Objects.requireNonNull(methodName); + + this.methodName = methodName; + this.calendarType = calendarType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + + DateMethodValueSource that = (DateMethodValueSource) o; + + if (calendarType != that.calendarType) return false; + return methodName.equals(that.methodName); + + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + methodName.hashCode(); + result = 31 * result + calendarType; + return result; + } + + @Override + public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException { + AtomicFieldData leafData = fieldData.load(leaf); + assert(leafData instanceof AtomicNumericFieldData); + + return new DateMethodFunctionValues(this, (AtomicNumericFieldData)leafData, calendarType); + } + + @Override + public String description() { + return methodName + ": field(" + fieldData.getFieldNames().toString() + ")"; + } +} diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index 23841942104..6d6f986432b 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -23,6 +23,7 @@ import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.SimpleBindings; import org.apache.lucene.expressions.js.JavascriptCompiler; import org.apache.lucene.expressions.js.VariableContext; +import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource; import org.apache.lucene.search.SortField; import org.elasticsearch.common.Nullable; @@ -32,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; @@ -40,6 +42,7 @@ import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import java.text.ParseException; +import java.util.Calendar; import java.util.Map; /** @@ -50,6 +53,13 @@ public class ExpressionScriptEngineService extends AbstractComponent implements public static final String NAME = "expression"; + protected static final String GET_YEAR_METHOD = "getYear"; + protected static final String GET_MONTH_METHOD = "getMonth"; + protected static final String GET_DAY_OF_MONTH_METHOD = "getDayOfMonth"; + protected static final String GET_HOUR_OF_DAY_METHOD = "getHourOfDay"; + protected static final String GET_MINUTES_METHOD = "getMinutes"; + protected static final String GET_SECONDS_METHOD = "getSeconds"; + @Inject public ExpressionScriptEngineService(Settings settings) { super(settings); @@ -112,19 +122,30 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } } else { + String fieldname = null; + String methodname = null; VariableContext[] parts = VariableContext.parse(variable); if (parts[0].text.equals("doc") == false) { throw new ExpressionScriptCompilationException("Unknown variable [" + parts[0].text + "] in expression"); } if (parts.length < 2 || parts[1].type != VariableContext.Type.STR_INDEX) { - throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield'].value"); + throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield']"); + } else { + fieldname = parts[1].text; } - if (parts.length < 3 || parts[2].type != VariableContext.Type.MEMBER || parts[2].text.equals("value") == false) { - throw new ExpressionScriptCompilationException("Invalid member for field data in expression. Only '.value' is currently supported."); + if (parts.length == 3) { + if (parts[2].type == VariableContext.Type.METHOD) { + methodname = parts[2].text; + } else if (parts[2].type != VariableContext.Type.MEMBER || !"value".equals(parts[2].text)) { + throw new ExpressionScriptCompilationException("Only the member variable [value] or member methods may be accessed on a field when not accessing the field directly"); + } + } + if (parts.length > 3) { + throw new ExpressionScriptCompilationException("Variable [" + variable + "] does not follow an allowed format of either doc['field'] or doc['field'].method()"); } - String fieldname = parts[1].text; FieldMapper field = mapper.smartNameFieldMapper(fieldname); + if (field == null) { throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression does not exist in mappings"); } @@ -132,14 +153,46 @@ public class ExpressionScriptEngineService extends AbstractComponent implements // TODO: more context (which expression?) throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression must be numeric"); } + IndexFieldData fieldData = lookup.doc().fieldDataService().getForField((NumberFieldMapper)field); - bindings.add(variable, new FieldDataValueSource(fieldData)); + if (methodname == null) { + bindings.add(variable, new FieldDataValueSource(fieldData)); + } else { + bindings.add(variable, getMethodValueSource(field, fieldData, fieldname, methodname)); + } } } return new ExpressionScript((Expression)compiledScript, bindings, specialValue); } + protected ValueSource getMethodValueSource(FieldMapper field, IndexFieldData fieldData, String fieldName, String methodName) { + switch (methodName) { + case GET_YEAR_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.YEAR); + case GET_MONTH_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.MONTH); + case GET_DAY_OF_MONTH_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.DAY_OF_MONTH); + case GET_HOUR_OF_DAY_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.HOUR_OF_DAY); + case GET_MINUTES_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.MINUTE); + case GET_SECONDS_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.SECOND); + default: + throw new IllegalArgumentException("Member method [" + methodName + "] does not exist."); + } + } + + protected ValueSource getDateMethodValueSource(FieldMapper field, IndexFieldData fieldData, String fieldName, String methodName, int calendarType) { + if (!(field instanceof DateFieldMapper)) { + throw new IllegalArgumentException("Member method [" + methodName + "] can only be used with a date field type, not the field [" + fieldName + "]."); + } + + return new DateMethodValueSource(fieldData, methodName, calendarType); + } + @Override public ExecutableScript executable(Object compiledScript, @Nullable Map vars) { throw new UnsupportedOperationException("Cannot use expressions for updates"); diff --git a/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java b/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java index 16e3d35bb61..7a97532068a 100644 --- a/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java +++ b/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.expression; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; @@ -29,15 +28,18 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import java.io.IOException; import java.util.Map; +import java.util.Objects; /** * A {@link ValueSource} wrapper for field data. */ class FieldDataValueSource extends ValueSource { - IndexFieldData fieldData; + protected IndexFieldData fieldData; + + protected FieldDataValueSource(IndexFieldData d) { + Objects.requireNonNull(d); - FieldDataValueSource(IndexFieldData d) { fieldData = d; } @@ -49,8 +51,13 @@ class FieldDataValueSource extends ValueSource { } @Override - public boolean equals(Object other) { - return fieldData.equals(other); + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldDataValueSource that = (FieldDataValueSource) o; + + return fieldData.equals(that.fieldData); } @Override diff --git a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index 8ee8d1dcbf1..1f04063a42d 100644 --- a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -61,6 +61,15 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { } public void testBasic() throws Exception { + createIndex("test"); + ensureGreen("test"); + client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get(); + SearchResponse rsp = buildRequest("doc['foo'] + 1").get(); + assertEquals(1, rsp.getHits().getTotalHits()); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue()); + } + + public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get(); @@ -89,13 +98,56 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { assertEquals("2", hits.getAt(2).getId()); } + public void testDateMethods() throws Exception { + ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "date0", "type=date", "date1", "type=date")); + ensureGreen("test"); + indexRandom(true, + client().prepareIndex("test", "doc", "1").setSource("date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + client().prepareIndex("test", "doc", "2").setSource("date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z")); + SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue()); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue()); + assertEquals(24.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date1'].getMonth() + 1").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(9.0, hits.getAt(0).field("foo").getValue()); + assertEquals(10.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date1'].getYear()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue()); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue()); + } + + public void testInvalidDateMethodCall() throws Exception { + ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "double", "type=double")); + ensureGreen("test"); + indexRandom(true, client().prepareIndex("test", "doc", "1").setSource("double", "178000000.0")); + try { + buildRequest("doc['double'].getYear()").get(); + fail(); + } catch (SearchPhaseExecutionException e) { + assertThat(e.toString() + "should have contained IllegalArgumentException", + e.toString().contains("IllegalArgumentException"), equalTo(true)); + assertThat(e.toString() + "should have contained can only be used with a date field type", + e.toString().contains("can only be used with a date field type"), equalTo(true)); + } + } + public void testSparseField() throws Exception { ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "x", "type=long", "y", "type=long")); ensureGreen("test"); indexRandom(true, client().prepareIndex("test", "doc", "1").setSource("x", 4), client().prepareIndex("test", "doc", "2").setSource("y", 2)); - SearchResponse rsp = buildRequest("doc['x'].value + 1").get(); + SearchResponse rsp = buildRequest("doc['x'] + 1").get(); ElasticsearchAssertions.assertSearchResponse(rsp); SearchHits hits = rsp.getHits(); assertEquals(2, rsp.getHits().getTotalHits()); @@ -108,7 +160,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { ensureGreen("test"); client().prepareIndex("test", "doc", "1").setSource("x", 4).setRefresh(true).get(); try { - buildRequest("doc['bogus'].value").get(); + buildRequest("doc['bogus']").get(); fail("Expected missing field to cause failure"); } catch (SearchPhaseExecutionException e) { assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", @@ -126,7 +178,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "doc", "2").setSource("x", 3), client().prepareIndex("test", "doc", "3").setSource("x", 5)); // a = int, b = double, c = long - String script = "doc['x'].value * a + b + ((c + doc['x'].value) > 5000000009 ? 1 : 0)"; + String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get(); SearchHits hits = rsp.getHits(); assertEquals(3, hits.getTotalHits()); @@ -164,7 +216,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { public void testNonNumericField() { client().prepareIndex("test", "doc", "1").setSource("text", "this is not a number").setRefresh(true).get(); try { - buildRequest("doc['text'].value").get(); + buildRequest("doc['text']").get(); fail("Expected text field to cause execution failure"); } catch (SearchPhaseExecutionException e) { assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", @@ -208,8 +260,8 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { } catch (SearchPhaseExecutionException e) { assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(e.toString() + "should have contained field member error", - e.toString().contains("Invalid member for field"), equalTo(true)); + assertThat(e.toString() + "should have contained member variable [value] or member methods may be accessed", + e.toString().contains("member variable [value] or member methods may be accessed"), equalTo(true)); } } From c165afb4d5a0d90ac214307beb16fc10513c8995 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 30 Apr 2015 13:47:19 -0400 Subject: [PATCH 228/236] Logging: Add logging of slow cluster state tasks Closes #10874 --- .../service/InternalClusterService.java | 42 +++- .../ClusterDynamicSettingsModule.java | 2 + .../cluster/ClusterServiceTests.java | 216 +++++++++++++++++- .../elasticsearch/test/MockLogAppender.java | 139 +++++++++++ 4 files changed, 392 insertions(+), 7 deletions(-) create mode 100644 src/test/java/org/elasticsearch/test/MockLogAppender.java diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index b1823e5d74e..a5b7470bfa1 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.service; import com.google.common.collect.Iterables; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterState.Builder; @@ -59,6 +58,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { + public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; + public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; + public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; @@ -74,6 +76,8 @@ public class InternalClusterService extends AbstractLifecycleComponent slowTaskLoggingThreshold.getMillis()) { + logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); + } + } + class NotifyTimeout implements Runnable { final TimeoutClusterStateListener listener; final TimeValue timeout; @@ -755,4 +776,13 @@ public class InternalClusterService extends AbstractLifecycleComponent expectations; + + public MockLogAppender() { + expectations = newArrayList(); + } + + public void addExpectation(LoggingExpectation expectation) { + expectations.add(expectation); + } + + @Override + protected void append(LoggingEvent loggingEvent) { + for (LoggingExpectation expectation : expectations) { + expectation.match(loggingEvent); + } + } + + @Override + public void close() { + + } + + @Override + public boolean requiresLayout() { + return false; + } + + public void assertAllExpectationsMatched() { + for (LoggingExpectation expectation : expectations) { + expectation.assertMatched(); + } + } + + public interface LoggingExpectation { + void match(LoggingEvent loggingEvent); + + void assertMatched(); + } + + public static abstract class AbstractEventExpectation implements LoggingExpectation { + protected final String name; + protected final String logger; + protected final Level level; + protected final String message; + protected boolean saw; + + public AbstractEventExpectation(String name, String logger, Level level, String message) { + this.name = name; + this.logger = getLoggerName(logger); + this.level = level; + this.message = message; + this.saw = false; + } + + @Override + public void match(LoggingEvent event) { + if (event.getLevel() == level && event.getLoggerName().equals(logger)) { + if (Regex.isSimpleMatchPattern(message)) { + if (Regex.simpleMatch(message, event.getMessage().toString())) { + saw = true; + } + } else { + if (event.getMessage().toString().contains(message)) { + saw = true; + } + } + } + } + } + + public static class UnseenEventExpectation extends AbstractEventExpectation { + + public UnseenEventExpectation(String name, String logger, Level level, String message) { + super(name, logger, level, message); + } + + @Override + public void assertMatched() { + assertThat(name, saw, equalTo(false)); + } + } + + public static class SeenEventExpectation extends AbstractEventExpectation { + + public SeenEventExpectation(String name, String logger, Level level, String message) { + super(name, logger, level, message); + } + + @Override + public void assertMatched() { + assertThat(name, saw, equalTo(true)); + } + } + + private static String getLoggerName(String name) { + if (name.startsWith("org.elasticsearch.")) { + name = name.substring("org.elasticsearch.".length()); + } + return COMMON_PREFIX + name; + } +} From aade6194b7aaf6744a752bc9f81b9bcd9de0345a Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 30 Apr 2015 23:31:31 -0400 Subject: [PATCH 229/236] Add span within/containing queries. Expose new span queries from https://issues.apache.org/jira/browse/LUCENE-6083 Within returns matches from 'little' that are enclosed inside of a match from 'big'. Containing returns matches from 'big' that enclose matches from 'little'. --- docs/reference/query-dsl/queries.asciidoc | 4 + .../queries/span-containing-query.asciidoc | 29 +++++ .../queries/span-within-query.asciidoc | 29 +++++ .../index/query/QueryBuilders.java | 10 ++ .../query/SpanContainingQueryBuilder.java | 92 ++++++++++++++++ .../query/SpanContainingQueryParser.java | 100 ++++++++++++++++++ .../index/query/SpanWithinQueryBuilder.java | 92 ++++++++++++++++ .../index/query/SpanWithinQueryParser.java | 100 ++++++++++++++++++ .../indices/query/IndicesQueriesModule.java | 2 + .../query/SimpleIndexQueryParserTests.java | 48 +++++++++ .../index/query/spanContaining.json | 14 +++ .../elasticsearch/index/query/spanWithin.json | 14 +++ 12 files changed, 534 insertions(+) create mode 100644 docs/reference/query-dsl/queries/span-containing-query.asciidoc create mode 100644 docs/reference/query-dsl/queries/span-within-query.asciidoc create mode 100644 src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java create mode 100644 src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java create mode 100644 src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java create mode 100644 src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java create mode 100644 src/test/java/org/elasticsearch/index/query/spanContaining.json create mode 100644 src/test/java/org/elasticsearch/index/query/spanWithin.json diff --git a/docs/reference/query-dsl/queries.asciidoc b/docs/reference/query-dsl/queries.asciidoc index d56d2c719f1..20ce2789f7c 100644 --- a/docs/reference/query-dsl/queries.asciidoc +++ b/docs/reference/query-dsl/queries.asciidoc @@ -52,6 +52,8 @@ include::queries/range-query.asciidoc[] include::queries/regexp-query.asciidoc[] +include::queries/span-containing-query.asciidoc[] + include::queries/span-first-query.asciidoc[] include::queries/span-multi-term-query.asciidoc[] @@ -64,6 +66,8 @@ include::queries/span-or-query.asciidoc[] include::queries/span-term-query.asciidoc[] +include::queries/span-within-query.asciidoc[] + include::queries/term-query.asciidoc[] include::queries/terms-query.asciidoc[] diff --git a/docs/reference/query-dsl/queries/span-containing-query.asciidoc b/docs/reference/query-dsl/queries/span-containing-query.asciidoc new file mode 100644 index 00000000000..3464ff656bf --- /dev/null +++ b/docs/reference/query-dsl/queries/span-containing-query.asciidoc @@ -0,0 +1,29 @@ +[[query-dsl-span-containing-query]] +=== Span Containing Query + +Returns matches which enclose another span query. The span within +query maps to Lucene `SpanContainingQuery`. Here is an example: + +[source,js] +-------------------------------------------------- +{ + "span_containing" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } +} +-------------------------------------------------- + +The `big` and `little` clauses can be any span type query. Matching +spans from `big` that contain matches from `little` are returned. diff --git a/docs/reference/query-dsl/queries/span-within-query.asciidoc b/docs/reference/query-dsl/queries/span-within-query.asciidoc new file mode 100644 index 00000000000..dc5c4bbfdfd --- /dev/null +++ b/docs/reference/query-dsl/queries/span-within-query.asciidoc @@ -0,0 +1,29 @@ +[[query-dsl-span-within-query]] +=== Span Within Query + +Returns matches which are enclosed inside another span query. The span within +query maps to Lucene `SpanWithinQuery`. Here is an example: + +[source,js] +-------------------------------------------------- +{ + "span_within" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } +} +-------------------------------------------------- + +The `big` and `little` clauses can be any span type query. Matching +spans from `little` that are enclosed within `big` are returned. diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 415544f3288..f383dc33ac6 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -318,6 +318,16 @@ public abstract class QueryBuilders { return new SpanOrQueryBuilder(); } + /** Creates a new {@code span_within} builder. */ + public static SpanWithinQueryBuilder spanWithinQuery() { + return new SpanWithinQueryBuilder(); + } + + /** Creates a new {@code span_containing} builder. */ + public static SpanContainingQueryBuilder spanContainingQuery() { + return new SpanContainingQueryBuilder(); + } + /** * Creates a {@link SpanQueryBuilder} which allows having a sub query * which implements {@link MultiTermQueryBuilder}. This is useful for diff --git a/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java new file mode 100644 index 00000000000..6fd2dee013a --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Builder for {@link SpanContainingQuery}. + */ +public class SpanContainingQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder { + + private SpanQueryBuilder big; + private SpanQueryBuilder little; + private float boost = -1; + private String queryName; + + /** + * Sets the little clause, it must be contained within {@code big} for a match. + */ + public SpanContainingQueryBuilder little(SpanQueryBuilder clause) { + this.little = clause; + return this; + } + + /** + * Sets the big clause, it must enclose {@code little} for a match. + */ + public SpanContainingQueryBuilder big(SpanQueryBuilder clause) { + this.big = clause; + return this; + } + + @Override + public SpanContainingQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + /** + * Sets the query name for the filter that can be used when searching for matched_filters per hit. + */ + public SpanContainingQueryBuilder queryName(String queryName) { + this.queryName = queryName; + return this; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + if (big == null) { + throw new IllegalArgumentException("Must specify big clause when building a span_containing query"); + } + if (little == null) { + throw new IllegalArgumentException("Must specify little clause when building a span_containing query"); + } + builder.startObject(SpanContainingQueryParser.NAME); + + builder.field("big"); + big.toXContent(builder, params); + + builder.field("little"); + little.toXContent(builder, params); + + if (boost != -1) { + builder.field("boost", boost); + } + + if (queryName != null) { + builder.field("_name", queryName); + } + + builder.endObject(); + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java new file mode 100644 index 00000000000..63e312bf384 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanContainingQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Parser for {@link SpanContainingQuery} + */ +public class SpanContainingQueryParser implements QueryParser { + + public static final String NAME = "span_containing"; + + @Inject + public SpanContainingQueryParser() { + } + + @Override + public String[] names() { + return new String[]{NAME, Strings.toCamelCase(NAME)}; + } + + @Override + public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { + XContentParser parser = parseContext.parser(); + + float boost = 1.0f; + String queryName = null; + SpanQuery big = null; + SpanQuery little = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("big".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(parseContext, "span_containing [big] must be of type span query"); + } + big = (SpanQuery) query; + } else if ("little".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(parseContext, "span_containing [little] must be of type span query"); + } + little = (SpanQuery) query; + } else { + throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]"); + } + } else if ("boost".equals(currentFieldName)) { + boost = parser.floatValue(); + } else if ("_name".equals(currentFieldName)) { + queryName = parser.text(); + } else { + throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]"); + } + } + + if (big == null) { + throw new QueryParsingException(parseContext, "span_containing must include [big]"); + } + if (little == null) { + throw new QueryParsingException(parseContext, "span_containing must include [little]"); + } + + Query query = new SpanContainingQuery(big, little); + query.setBoost(boost); + if (queryName != null) { + parseContext.addNamedQuery(queryName, query); + } + return query; + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java new file mode 100644 index 00000000000..88e1538bff4 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Builder for {@link SpanWithinQuery}. + */ +public class SpanWithinQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder { + + private SpanQueryBuilder big; + private SpanQueryBuilder little; + private float boost = -1; + private String queryName; + + /** + * Sets the little clause, it must be contained within {@code big} for a match. + */ + public SpanWithinQueryBuilder little(SpanQueryBuilder clause) { + this.little = clause; + return this; + } + + /** + * Sets the big clause, it must enclose {@code little} for a match. + */ + public SpanWithinQueryBuilder big(SpanQueryBuilder clause) { + this.big = clause; + return this; + } + + @Override + public SpanWithinQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + /** + * Sets the query name for the filter that can be used when searching for matched_filters per hit. + */ + public SpanWithinQueryBuilder queryName(String queryName) { + this.queryName = queryName; + return this; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + if (big == null) { + throw new IllegalArgumentException("Must specify big clause when building a span_within query"); + } + if (little == null) { + throw new IllegalArgumentException("Must specify little clause when building a span_within query"); + } + builder.startObject(SpanWithinQueryParser.NAME); + + builder.field("big"); + big.toXContent(builder, params); + + builder.field("little"); + little.toXContent(builder, params); + + if (boost != -1) { + builder.field("boost", boost); + } + + if (queryName != null) { + builder.field("_name", queryName); + } + + builder.endObject(); + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java new file mode 100644 index 00000000000..9194cbd2d0e --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanWithinQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Parser for {@link SpanWithinQuery} + */ +public class SpanWithinQueryParser implements QueryParser { + + public static final String NAME = "span_within"; + + @Inject + public SpanWithinQueryParser() { + } + + @Override + public String[] names() { + return new String[]{NAME, Strings.toCamelCase(NAME)}; + } + + @Override + public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { + XContentParser parser = parseContext.parser(); + + float boost = 1.0f; + String queryName = null; + SpanQuery big = null; + SpanQuery little = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("big".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (query instanceof SpanQuery == false) { + throw new QueryParsingException(parseContext, "span_within [big] must be of type span query"); + } + big = (SpanQuery) query; + } else if ("little".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (query instanceof SpanQuery == false) { + throw new QueryParsingException(parseContext, "span_within [little] must be of type span query"); + } + little = (SpanQuery) query; + } else { + throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]"); + } + } else if ("boost".equals(currentFieldName)) { + boost = parser.floatValue(); + } else if ("_name".equals(currentFieldName)) { + queryName = parser.text(); + } else { + throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]"); + } + } + + if (big == null) { + throw new QueryParsingException(parseContext, "span_within must include [big]"); + } + if (little == null) { + throw new QueryParsingException(parseContext, "span_within must include [little]"); + } + + Query query = new SpanWithinQuery(big, little); + query.setBoost(boost); + if (queryName != null) { + parseContext.addNamedQuery(queryName, query); + } + return query; + } +} diff --git a/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java index 14bbbc428f2..d70995ffebf 100644 --- a/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java +++ b/src/main/java/org/elasticsearch/indices/query/IndicesQueriesModule.java @@ -89,6 +89,8 @@ public class IndicesQueriesModule extends AbstractModule { qpBinders.addBinding().to(ConstantScoreQueryParser.class).asEagerSingleton(); qpBinders.addBinding().to(SpanTermQueryParser.class).asEagerSingleton(); qpBinders.addBinding().to(SpanNotQueryParser.class).asEagerSingleton(); + qpBinders.addBinding().to(SpanWithinQueryParser.class).asEagerSingleton(); + qpBinders.addBinding().to(SpanContainingQueryParser.class).asEagerSingleton(); qpBinders.addBinding().to(FieldMaskingSpanQueryParser.class).asEagerSingleton(); qpBinders.addBinding().to(SpanFirstQueryParser.class).asEagerSingleton(); qpBinders.addBinding().to(SpanNearQueryParser.class).asEagerSingleton(); diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index d37e5cf31c0..5c29fe57713 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -51,12 +51,14 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanContainingQuery; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNotQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.search.spans.SpanWithinQuery; import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -131,11 +133,13 @@ import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; +import static org.elasticsearch.index.query.QueryBuilders.spanContainingQuery; import static org.elasticsearch.index.query.QueryBuilders.spanFirstQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery; import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery; import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery; +import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; @@ -1434,6 +1438,50 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0)))); } + @Test + public void testSpanWithinQueryBuilder() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + Query actualQuery = queryParser.parse(spanWithinQuery() + .big(spanTermQuery("age", 34)) + .little(spanTermQuery("age", 35))) + .query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanWithinQueryParser() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanWithin.json"); + Query actualQuery = queryParser.parse(queryText).query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanContainingQueryBuilder() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + Query actualQuery = queryParser.parse(spanContainingQuery() + .big(spanTermQuery("age", 34)) + .little(spanTermQuery("age", 35))) + .query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanContainingQueryParser() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanContaining.json"); + Query actualQuery = queryParser.parse(queryText).query(); + assertEquals(expectedQuery, actualQuery); + } + @Test public void testSpanFirstQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); diff --git a/src/test/java/org/elasticsearch/index/query/spanContaining.json b/src/test/java/org/elasticsearch/index/query/spanContaining.json new file mode 100644 index 00000000000..13f91d88b44 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/query/spanContaining.json @@ -0,0 +1,14 @@ +{ + span_containing:{ + big:{ + span_term:{ + age:34 + } + }, + little:{ + span_term:{ + age:35 + } + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/query/spanWithin.json b/src/test/java/org/elasticsearch/index/query/spanWithin.json new file mode 100644 index 00000000000..7cf767cdf12 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/query/spanWithin.json @@ -0,0 +1,14 @@ +{ + span_within:{ + big:{ + span_term:{ + age:34 + } + }, + little:{ + span_term:{ + age:35 + } + } + } +} \ No newline at end of file From dfe1d1463ce9a79af1215252372e1fc00d53d732 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Thu, 30 Apr 2015 23:46:37 -0400 Subject: [PATCH 230/236] fix doc typo --- docs/reference/query-dsl/queries/span-containing-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/queries/span-containing-query.asciidoc b/docs/reference/query-dsl/queries/span-containing-query.asciidoc index 3464ff656bf..965bf855b6f 100644 --- a/docs/reference/query-dsl/queries/span-containing-query.asciidoc +++ b/docs/reference/query-dsl/queries/span-containing-query.asciidoc @@ -1,7 +1,7 @@ [[query-dsl-span-containing-query]] === Span Containing Query -Returns matches which enclose another span query. The span within +Returns matches which enclose another span query. The span containing query maps to Lucene `SpanContainingQuery`. Here is an example: [source,js] From 16c7689355ecfe3132acd6a03d8a426d8608fbd6 Mon Sep 17 00:00:00 2001 From: Shay Banon Date: Fri, 1 May 2015 19:03:14 +0200 Subject: [PATCH 231/236] Exclude jackson-databind dependency the jackson yaml data format pulls in the databind dependency, its important that we exclude it so we won't use any of its classes by mistake --- pom.xml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pom.xml b/pom.xml index a3611822d9a..432aa22744e 100644 --- a/pom.xml +++ b/pom.xml @@ -267,6 +267,12 @@ jackson-dataformat-yaml 2.5.1 compile + + + com.fasterxml.jackson.core + jackson-databind + + From c28bf3bb3f9c46ad7e20dcf0d6251db02a9e1697 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 1 May 2015 20:37:26 +0200 Subject: [PATCH 232/236] Docs: Updated elasticsearch.org links to elastic.co --- docs/community/clients.asciidoc | 10 +- docs/community/index.asciidoc | 2 +- docs/groovy-api/index.asciidoc | 4 +- docs/java-api/index.asciidoc | 2 +- docs/javascript/index.asciidoc | 138 ------------------ docs/reference/getting-started.asciidoc | 8 +- docs/reference/migration/migrate_1_0.asciidoc | 2 +- docs/reference/modules/plugins.asciidoc | 2 +- docs/reference/setup.asciidoc | 2 +- docs/reference/setup/repositories.asciidoc | 10 +- docs/reference/setup/upgrade.asciidoc | 4 +- docs/resiliency/index.asciidoc | 6 +- 12 files changed, 26 insertions(+), 164 deletions(-) delete mode 100644 docs/javascript/index.asciidoc diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc index 5455440e114..e0205816ca0 100644 --- a/docs/community/clients.asciidoc +++ b/docs/community/clients.asciidoc @@ -50,13 +50,13 @@ See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client] * https://github.com/ddnexus/flex[Flex]: Ruby Client. - + * https://github.com/printercu/elastics-rb[elastics]: Tiny client with built-in zero-downtime migrations and ActiveRecord integration. - + * https://github.com/toptal/chewy[chewy]: - Chewy is ODM and wrapper for official elasticsearch client - + Chewy is ODM and wrapper for official elasticsearch client + * https://github.com/ankane/searchkick[Searchkick]: Intelligent search made easy @@ -82,7 +82,7 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client]. * https://github.com/searchbox-io/Jest[Jest]: Java Rest client. -* There is of course the http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current/index.html[native ES Java client] +* There is of course the {client}/java-api/current/index.html[native ES Java client] [[community-javascript]] === JavaScript diff --git a/docs/community/index.asciidoc b/docs/community/index.asciidoc index 88135d89563..48b2f2ad8c1 100644 --- a/docs/community/index.asciidoc +++ b/docs/community/index.asciidoc @@ -1,6 +1,6 @@ = Community Supported Clients -:client: http://www.elasticsearch.org/guide/en/elasticsearch/client +:client: http://www.elastic.co/guide/en/elasticsearch/client include::clients.asciidoc[] diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc index 5ab4bf61318..5e06cd1f2f4 100644 --- a/docs/groovy-api/index.asciidoc +++ b/docs/groovy-api/index.asciidoc @@ -1,6 +1,6 @@ = Groovy API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current -:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current +:java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current [preface] == Preface diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index e626fcad7bd..6145e2918d8 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -1,6 +1,6 @@ [[java-api]] = Java API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current [preface] == Preface diff --git a/docs/javascript/index.asciidoc b/docs/javascript/index.asciidoc deleted file mode 100644 index 67a2a73a2e6..00000000000 --- a/docs/javascript/index.asciidoc +++ /dev/null @@ -1,138 +0,0 @@ -= elasticsearch-js - -== Overview - -Official low-level client for Elasticsearch. Its goal is to provide common -ground for all Elasticsearch-related code in JavaScript; because of this it tries -to be opinion-free and very extendable. - -The full documentation is available at http://elasticsearch.github.io/elasticsearch-js - - -=== Getting the Node.js module - -To install the module into an existing Node.js project use npm: - -[source,sh] ------------------------------------- -npm install elasticsearch ------------------------------------- - -=== Getting the browser client - -For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment: - - * elasticsearch.jquery.js - for projects that already use jQuery - * elasticsearch.angular.js - for Angular projects - * elasticsearch.js - generic build for all other projects - -Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate. - -=== Setting up the client - -Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs]. - -[source,javascript] ------------------------------------- -var elasticsearch = require('elasticsearch'); - -// Connect to localhost:9200 and use the default settings -var client = new elasticsearch.Client(); - -// Connect the client to two nodes, requests will be -// load-balanced between them using round-robin -var client = elasticsearch.Client({ - hosts: [ - 'elasticsearch1:9200', - 'elasticsearch2:9200' - ] -}); - -// Connect to the this host's cluster, sniff -// for the rest of the cluster right away, and -// again every 5 minutes -var client = elasticsearch.Client({ - host: 'elasticsearch1:9200', - sniffOnStart: true, - sniffInterval: 300000 -}); - -// Connect to this host using https, basic auth, -// a path prefix, and static query string values -var client = new elasticsearch.Client({ - host: 'https://user:password@elasticsearch1/search?app=blog' -}); ------------------------------------- - - -=== Setting up the client in the browser - -The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build. - -[source,javascript] ------------------------------------- -// elasticsearch.js adds the elasticsearch namespace to the window -var client = elasticsearch.Client({ ... }); - -// elasticsearch.jquery.js adds the es namespace to the jQuery object -var client = jQuery.es.Client({ ... }); - -// elasticsearch.angular.js creates an elasticsearch -// module, which provides an esFactory -var app = angular.module('app', ['elasticsearch']); -app.service('es', function (esFactory) { - return esFactory({ ... }); -}); ------------------------------------- - -=== Using the client instance to make API calls. - -Once you create the client, making API calls is simple. - -[source,javascript] ------------------------------------- -// get the current status of the entire cluster. -// Note: params are always optional, you can just send a callback -client.cluster.health(function (err, resp) { - if (err) { - console.error(err.message); - } else { - console.dir(resp); - } -}); - -// index a document -client.index({ - index: 'blog', - type: 'post', - id: 1, - body: { - title: 'JavaScript Everywhere!', - content: 'It all started when...', - date: '2013-12-17' - } -}, function (err, resp) { - // ... -}); - -// search for documents (and also promises!!) -client.search({ - index: 'users', - size: 50, - body: { - query: { - match: { - profile: 'elasticsearch' - } - } - } -}).then(function (resp) { - var hits = resp.body.hits; -}); ------------------------------------- - -== Copyright and License - -This software is Copyright (c) 2013-2015 by Elasticsearch BV. - -This is free software, licensed under The Apache License Version 2.0. diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d5e9adbbf8b..358c5f09ef8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -89,7 +89,7 @@ The number of shards and replicas can be defined per index at the time the index By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. +NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the <> api. With that out of the way, let's get started with the fun part... @@ -104,13 +104,13 @@ java -version echo $JAVA_HOME -------------------------------------------------- -Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elasticsearch.org/download[`www.elasticsearch.org/download`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. +Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package): ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -curl -L -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz +curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz -------------------------------------------------- Then extract it as follows (Windows users should unzip the zip package): @@ -868,7 +868,7 @@ In the previous section, we skipped over a little detail called the document sco All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons: * Filters do not score so they are faster to execute than queries -* Filters can be http://www.elasticsearch.org/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries +* Filters can be http://www.elastic.co/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries To understand filters, let's first introduce the <>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index aca40b33efb..f6cfd4f92a9 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -362,7 +362,7 @@ in the query string. === Percolator The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, -but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator] +but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator] blog post for the reasons why the percolator has been redesigned. Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc index 25f01a4715e..c06fc9c6e57 100644 --- a/docs/reference/modules/plugins.asciidoc +++ b/docs/reference/modules/plugins.asciidoc @@ -26,7 +26,7 @@ plugin --install // ----------------------------------- The plugins will be -automatically downloaded in this case from `download.elasticsearch.org`, +automatically downloaded in this case from `download.elastic.co`, and in case they don't exist there, from maven (central and sonatype). Note that when the plugin is located in maven central or sonatype diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index b91d8ea17bb..f0d8fdff4d3 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -4,7 +4,7 @@ [partintro] -- This section includes information on how to setup *elasticsearch* and -get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and +get it running. If you haven't already, http://www.elastic.co/downloads[download] it, and then check the <> docs. NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`. diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc index f8fe939604c..3bf693d33ea 100644 --- a/docs/reference/setup/repositories.asciidoc +++ b/docs/reference/setup/repositories.asciidoc @@ -22,14 +22,14 @@ Download and install the Public Signing Key: [source,sh] -------------------------------------------------- -wget -qO - https://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add - +wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - -------------------------------------------------- Add the repository definition to your `/etc/apt/sources.list` file: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -echo "deb http://packages.elasticsearch.org/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list +echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list -------------------------------------------------- [WARNING] @@ -65,7 +65,7 @@ Download and install the public signing key: [source,sh] -------------------------------------------------- -rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch +rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch -------------------------------------------------- Add the following in your `/etc/yum.repos.d/` directory @@ -75,9 +75,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo` -------------------------------------------------- [elasticsearch-{branch}] name=Elasticsearch repository for {branch}.x packages -baseurl=http://packages.elasticsearch.org/elasticsearch/{branch}/centos +baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos gpgcheck=1 -gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch +gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 -------------------------------------------------- diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index 3a87a049563..9f9e745808f 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -69,7 +69,7 @@ $ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{ [float] ==== 1.0 and later -To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here]. +To back up a running 1.0 or later system, it is simplest to use the snapshot feature. See the complete instructions for <>. [float] [[rolling-upgrades]] @@ -96,7 +96,7 @@ This syntax applies to Elasticsearch 1.0 and later: * Confirm that all shards are correctly reallocated to the remaining running nodes. -* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org: +* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elastic.co: ** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration. ** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved. ** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used. diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 4618a1d94b9..d52e8804392 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -22,10 +22,10 @@ improvements throughout this page to provide the full context. If you’re interested in more on how we approach ensuring resiliency in Elasticsearch, you may be interested in Igor Motov’s recent talk -http://www.elasticsearch.org/videos/improving-elasticsearch-resiliency/[Improving Elasticsearch Resiliency]. +http://www.elastic.co/videos/improving-elasticsearch-resiliency[Improving Elasticsearch Resiliency]. You may also be interested in our blog post -http://www.elasticsearch.org/blog/resiliency-elasticsearch/[Resiliency in Elasticsearch], +http://www.elastic.co/blog/resiliency-elasticsearch[Resiliency in Elasticsearch], which details our thought processes when addressing resiliency in both Elasticsearch and the work our developers do upstream in Apache Lucene. @@ -416,7 +416,7 @@ The Snapshot/Restore API supports a number of different repository types for sto [float] === Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0) -Currently, the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. +Currently, the https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. [float] === Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0) From df1914cb21d696790875c11c79c521132f424304 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 1 May 2015 21:30:24 +0200 Subject: [PATCH 233/236] Java API docs: Removed mlt-field --- docs/java-api/query-dsl-queries.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/query-dsl-queries.asciidoc b/docs/java-api/query-dsl-queries.asciidoc index afded7d9785..92e0982d4e5 100644 --- a/docs/java-api/query-dsl-queries.asciidoc +++ b/docs/java-api/query-dsl-queries.asciidoc @@ -234,7 +234,7 @@ QueryBuilder qb = matchAllQuery(); [[mlt]] -=== More Like This (Field) Query (mlt and mlt_field) +=== More Like This Query (mlt) See: * {ref}/query-dsl-mlt-query.html[More Like This Query] From 9a1f11da6ec2790da14641791a1825f6eb924445 Mon Sep 17 00:00:00 2001 From: Karel Minarik Date: Sat, 2 May 2015 12:54:22 +0200 Subject: [PATCH 234/236] Trimmed the main `elasticsearch.yml` configuration file The main `elasticsearch.yml` file mixed configuration, documentation and advice together. Due to a much improved documentation at , the content has been trimmed, and only the essential settings have been left, to prevent the urge to excessive over-configuration. Related: 8d0f1a7d123f579fc772e82ef6b9aae08f6d13fd --- config/elasticsearch.yml | 375 ++++++--------------------------------- 1 file changed, 53 insertions(+), 322 deletions(-) diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index 3384a5ee616..b3baf765b3a 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -1,368 +1,99 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . +# ======================== Elasticsearch Configuration ========================= # -# The installation procedure is covered at -# . +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. # -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. # -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: +# Please see the documentation for further information on configuration options: +# # -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. +# ---------------------------------- Cluster ----------------------------------- # -#cluster.name: elasticsearch - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: +# Use a descriptive name for your cluster: # -#node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. +# cluster.name: my-application # -# Allow this node to be eligible as a master node (enabled by default): +# ------------------------------------ Node ------------------------------------ # -#node.master: true +# Use a descriptive name for the node: # -# Allow this node to store data (enabled by default): +# node.name: node-1 # -#node.data: true - -# You can exploit these settings to design advanced cluster topologies. +# Add custom attributes to the node: # -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. +# node.rack: r1 # -#node.master: false -#node.data: true +# ----------------------------------- Paths ------------------------------------ # -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. +# Path to directory where to store the data (separate multiple locations by comma): # -#node.master: true -#node.data: false +# path.data: /path/to/data # -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -#node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -#index.number_of_shards: 1 -#index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): -# -#path.conf: /path/to/conf - -# Path to directory where to store index data allocated for this node. -# -#path.data: /path/to/data -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -#path.data: /path/to/data1,/path/to/data2 - # Path to log files: # -#path.logs: /path/to/logs - -# Path to where plugins are installed: +# path.logs: /path/to/logs # -#path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. +# ----------------------------------- Memory ----------------------------------- # -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +# Lock the memory on startup: # -# Set this property to true to lock the memory: +# bootstrap.mlockall: true # -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. # -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): +# Elasticsearch performs poorly when the system is swapping the memory. # -#network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. +# ---------------------------------- Network ----------------------------------- # -#network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': +# Set the bind adress to a specific IP (IPv4 or IPv6): # -#network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): +# network.host: 192.168.0.1 # -#transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): +# Set a custom port for HTTP: # -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: +# http.port: 9200 # -#http.port: 9200 - -# Set a custom allowed content length: +# For more information, see the documentation at: +# # -#http.max_content_length: 100mb - -# Disable HTTP completely: +# ---------------------------------- Gateway ----------------------------------- # -#http.enabled: false - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# For more information, see -# . - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: +# Block initial recovery after a full cluster restart until N nodes are started: # -#gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): +# gateway.recover_after_nodes: 3 # -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): +# For more information, see the documentation at: +# # -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: +# --------------------------------- Discovery ---------------------------------- # -# 1. During the initial recovery +# Elasticsearch nodes will find each other via multicast, by default. # -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# To use the unicast discovery, disable the multicast discovery: # -# 2. During adding/removing nodes, rebalancing, etc +# discovery.zen.ping.multicast.enabled: false # -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# Pass an initial list of hosts to perform discovery when new node is started: # -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: +# discovery.zen.ping.unicast.hosts: ["host1", "host2"] # -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): # -#discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: +# discovery.zen.minimum_master_nodes: 3 # -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. +# For more information, see the documentation at: +# # -# 1. Disable multicast discovery (enabled by default): +# ---------------------------------- Various ----------------------------------- # -#discovery.zen.ping.multicast.enabled: false +# Disable starting multiple nodes on a single system: # -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: +# node.max_local_storage_nodes: 1 # -#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# Require explicit names when deleting indices: # -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s +# action.destructive_requires_name: true From b72f27a410fb8e4b21cdedd7485b317add305597 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 24 Apr 2015 20:59:22 +0200 Subject: [PATCH 235/236] Core: Cut over to the Lucene filter cache. This removes Elasticsearch's filter cache and uses Lucene's instead. It has some implications: - custom cache keys (`_cache_key`) are unsupported - decisions are made internally and can't be overridden by users ('_cache`) - not only filters can be cached but also all queries that do not need scores - parent/child queries can now be cached, however cached entries are only valid for the current top-level reader so in practice it will likely only be used on read-only indices - the cache deduplicates filters, which plays nicer with large keys (eg. `terms`) - better stats: we already had ram usage and evictions, but now also hit count, miss count, lookup count, number of cached doc id sets and current number of doc id sets in the cache - dynamically changing the filter cache size is not supported anymore Internally, an important change is that it removes the NoCacheFilter infrastructure in favour of making Query.rewrite specializing the query for the current reader so that it will only be cached on this reader (look for IndexCacheableQuery). Note that consuming filters with the query API (createWeight/scorer) instead of the filter API (getDocIdSet) is important for parent/child queries because otherwise a QueryWrapperFilter(ParentQuery) would run the wrapped query per segment while relations might be cross segments. --- dev-tools/forbidden/core-signatures.txt | 3 - .../cluster/update-settings.asciidoc | 3 - docs/reference/migration/migrate_2_0.asciidoc | 6 + docs/reference/query-dsl/filters.asciidoc | 85 +--- .../query-dsl/filters/and-filter.asciidoc | 37 -- .../filters/geo-bounding-box-filter.asciidoc | 8 - .../filters/geo-distance-filter.asciidoc | 8 - .../filters/geo-polygon-filter.asciidoc | 8 - .../filters/geo-shape-filter.asciidoc | 9 - .../filters/geohash-cell-filter.asciidoc | 7 - .../filters/has-child-filter.asciidoc | 6 - .../filters/has-parent-filter.asciidoc | 6 - .../query-dsl/filters/nested-filter.asciidoc | 8 +- .../query-dsl/filters/not-filter.asciidoc | 30 -- .../query-dsl/filters/or-filter.asciidoc | 33 -- .../query-dsl/filters/prefix-filter.asciidoc | 19 - .../query-dsl/filters/query-filter.asciidoc | 31 -- .../query-dsl/filters/range-filter.asciidoc | 8 - .../query-dsl/filters/regexp-filter.asciidoc | 4 +- .../query-dsl/filters/script-filter.asciidoc | 8 - .../query-dsl/filters/term-filter.asciidoc | 19 - .../query-dsl/filters/terms-filter.asciidoc | 14 +- rest-api-spec/api/indices.clear_cache.json | 4 - .../CustomPostingsHighlighter.java | 5 +- .../cluster/stats/ClusterStatsIndices.java | 1 + .../cache/clear/ClearIndicesCacheRequest.java | 12 - .../ClearIndicesCacheRequestBuilder.java | 5 - .../clear/ShardClearIndicesCacheRequest.java | 8 - .../TransportClearIndicesCacheAction.java | 4 - .../ClusterDynamicSettingsModule.java | 4 - .../common/lucene/IndexCacheableQuery.java | 74 +++ .../common/lucene/ShardCoreKeyMap.java | 109 +++++ .../common/lucene/docset/DocIdSets.java | 44 +- .../common/lucene/search/CachedFilter.java | 32 -- .../lucene/search/FilteredCollector.java | 8 +- .../common/lucene/search/NoCacheFilter.java | 79 ---- .../common/lucene/search/NoCacheQuery.java | 36 -- .../common/lucene/search/Queries.java | 31 +- .../lucene/search/ResolvableFilter.java | 11 + .../function/FiltersFunctionScoreQuery.java | 18 +- .../org/elasticsearch/index/IndexService.java | 2 +- .../index/aliases/IndexAliasesService.java | 4 +- .../elasticsearch/index/cache/IndexCache.java | 9 +- .../index/cache/bitset/BitsetFilterCache.java | 2 - .../index/cache/filter/FilterCache.java | 18 +- .../index/cache/filter/FilterCacheModule.java | 4 +- .../index/cache/filter/FilterCacheStats.java | 95 +++- .../index/cache/filter/ShardFilterCache.java | 34 +- .../cache/filter/index/IndexFilterCache.java | 63 +++ .../cache/filter/none/NoneFilterCache.java | 29 +- .../filter/weighted/WeightedFilterCache.java | 277 ----------- .../index/cache/query/ShardQueryCache.java | 6 +- .../index/engine/EngineConfig.java | 26 +- .../index/engine/EngineSearcherFactory.java | 4 +- .../index/engine/InternalEngine.java | 8 +- .../index/mapper/DocumentMapper.java | 24 +- .../index/mapper/MapperService.java | 15 +- .../mapper/core/AbstractFieldMapper.java | 12 +- .../index/mapper/core/BooleanFieldMapper.java | 4 +- .../index/mapper/core/ByteFieldMapper.java | 6 +- .../index/mapper/core/DateFieldMapper.java | 11 +- .../index/mapper/core/DoubleFieldMapper.java | 8 +- .../index/mapper/core/FloatFieldMapper.java | 6 +- .../index/mapper/core/IntegerFieldMapper.java | 6 +- .../index/mapper/core/LongFieldMapper.java | 6 +- .../index/mapper/core/ShortFieldMapper.java | 8 +- .../index/mapper/internal/IdFieldMapper.java | 10 +- .../mapper/internal/ParentFieldMapper.java | 9 +- .../mapper/internal/TypeFieldMapper.java | 8 +- .../index/mapper/ip/IpFieldMapper.java | 6 +- .../index/mapper/object/ObjectMapper.java | 4 +- .../percolator/PercolatorQueriesRegistry.java | 12 +- .../index/query/AndFilterBuilder.java | 22 - .../index/query/AndFilterParser.java | 20 +- .../index/query/BoolFilterBuilder.java | 22 - .../index/query/BoolFilterParser.java | 20 +- .../index/query/ConstantScoreQueryParser.java | 15 +- .../index/query/ExistsFilterParser.java | 7 +- .../index/query/FQueryFilterParser.java | 17 +- .../index/query/FilterBuilder.java | 1 - .../index/query/FilteredQueryParser.java | 15 +- .../query/GeoBoundingBoxFilterBuilder.java | 22 - .../query/GeoBoundingBoxFilterParser.java | 15 +- .../index/query/GeoDistanceFilterBuilder.java | 22 - .../index/query/GeoDistanceFilterParser.java | 13 +- .../query/GeoDistanceRangeFilterBuilder.java | 22 - .../query/GeoDistanceRangeFilterParser.java | 13 +- .../index/query/GeoPolygonFilterBuilder.java | 23 +- .../index/query/GeoPolygonFilterParser.java | 13 +- .../index/query/GeoShapeFilterBuilder.java | 31 -- .../index/query/GeoShapeFilterParser.java | 18 +- .../index/query/GeohashCellFilter.java | 41 +- .../index/query/HasChildFilterParser.java | 16 +- .../index/query/HasChildQueryParser.java | 7 +- .../index/query/HasParentFilterBuilder.java | 14 - .../index/query/HasParentFilterParser.java | 12 +- .../index/query/HasParentQueryParser.java | 10 +- .../index/query/IdsFilterParser.java | 3 +- .../index/query/IndexQueryParserService.java | 9 - .../index/query/MissingFilterParser.java | 16 +- .../index/query/NestedFilterBuilder.java | 21 - .../index/query/NestedFilterParser.java | 16 +- .../index/query/NotFilterBuilder.java | 13 - .../index/query/NotFilterParser.java | 17 +- .../index/query/OrFilterBuilder.java | 22 - .../index/query/OrFilterParser.java | 18 +- .../index/query/PrefixFilterBuilder.java | 22 - .../index/query/PrefixFilterParser.java | 15 +- .../index/query/QueryFilterBuilder.java | 15 +- .../index/query/QueryFilterParser.java | 4 +- .../index/query/QueryParseContext.java | 116 +---- .../index/query/RangeFilterBuilder.java | 22 - .../index/query/RangeFilterParser.java | 17 +- .../index/query/RegexpFilterBuilder.java | 21 - .../index/query/RegexpFilterParser.java | 18 +- .../index/query/ScriptFilterBuilder.java | 22 - .../index/query/ScriptFilterParser.java | 21 +- .../index/query/TermFilterBuilder.java | 22 - .../index/query/TermFilterParser.java | 24 +- .../index/query/TermsFilterBuilder.java | 22 - .../index/query/TermsFilterParser.java | 17 +- .../index/query/TermsLookupFilterBuilder.java | 18 - .../index/query/TopChildrenQueryParser.java | 6 +- .../index/query/TypeFilterParser.java | 6 +- .../child/ChildrenConstantScoreQuery.java | 50 +- .../index/search/child/ChildrenQuery.java | 63 +-- .../child/CustomQueryWrappingFilter.java | 136 ------ .../child/ParentConstantScoreQuery.java | 50 +- .../index/search/child/ParentIdsFilter.java | 10 +- .../index/search/child/ParentQuery.java | 113 +++-- .../index/search/child/TopChildrenQuery.java | 65 +-- .../geo/IndexedGeoBoundingBoxFilter.java | 6 +- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../indices/NodeIndicesStats.java | 1 + .../cache/filter/IndicesFilterCache.java | 432 +++++++++++------- .../MultiDocumentPercolatorIndex.java | 4 +- .../percolator/PercolateContext.java | 5 - .../percolator/PercolatorService.java | 4 +- .../clear/RestClearIndicesCacheAction.java | 4 - .../elasticsearch/search/SearchService.java | 9 +- .../search/aggregations/AggregationPhase.java | 11 +- .../bucket/children/ChildrenParser.java | 4 +- .../children/ParentToChildrenAggregator.java | 45 +- .../bucket/filter/FilterAggregator.java | 11 +- .../bucket/filters/FiltersAggregator.java | 32 +- .../bucket/nested/NestedAggregator.java | 11 +- .../bucket/nested/NestedParser.java | 2 +- .../search/dfs/CachedDfSource.java | 5 +- .../search/fetch/FetchPhase.java | 2 +- .../search/fetch/FetchSubPhase.java | 13 - .../fetch/innerhits/InnerHitsContext.java | 27 +- .../search/highlight/PostingsHighlighter.java | 2 +- .../search/internal/ContextIndexSearcher.java | 10 +- .../search/internal/DefaultSearchContext.java | 18 +- .../internal/FilteredSearchContext.java | 10 +- .../search/internal/SearchContext.java | 10 +- .../search/lookup/LeafIndexLookup.java | 24 +- .../search/sort/GeoDistanceSortParser.java | 4 +- .../search/sort/ScriptSortParser.java | 4 +- .../search/sort/SortParseElement.java | 4 +- .../CustomPostingsHighlighterTests.java | 36 +- .../XPostingsHighlighterTests.java | 2 - .../aliases/IndexAliasesTests.java | 4 +- ...TimeDataHistogramAggregationBenchmark.java | 3 +- .../settings/ClusterSettingsTests.java | 3 +- .../lucene/IndexCacheableQueryTests.java | 140 ++++++ .../common/lucene/ShardCoreKeyMapTests.java | 137 ++++++ .../lucene/index/FreqTermsEnumTests.java | 3 +- .../count/query/CountQueryTests.java | 20 - .../aliases/IndexAliasesServiceTests.java | 12 +- .../cache/bitset/BitSetFilterCacheTest.java | 4 +- .../index/engine/InternalEngineTests.java | 12 +- .../index/engine/ShadowEngineTests.java | 3 +- .../query/SimpleIndexQueryParserTests.java | 83 ++-- .../search/child/AbstractChildTests.java | 4 - .../ChildrenConstantScoreQueryTests.java | 5 +- .../search/child/ChildrenQueryTests.java | 3 +- .../child/ParentConstantScoreQueryTests.java | 4 +- .../index/search/child/ParentQueryTests.java | 4 +- .../AbstractNumberNestedSortingTests.java | 9 +- .../nested/DoubleNestedSortingTests.java | 3 +- .../nested/FloatNestedSortingTests.java | 3 +- .../search/nested/NestedSortingTests.java | 11 +- .../cache/query/IndicesQueryCacheTests.java | 1 + .../indices/stats/IndexStatsTests.java | 126 +++-- .../template/SimpleIndexTemplateTests.java | 2 +- .../warmer/SimpleIndicesWarmerTests.java | 1 + .../bucket/nested/NestedAggregatorTest.java | 3 +- .../child/SimpleChildQuerySearchTests.java | 163 +------ .../innerhits/NestedChildrenFilterTest.java | 6 +- .../functionscore/FunctionScoreTests.java | 4 +- .../search/geo/GeoFilterTests.java | 15 - .../search/query/SearchQueryTests.java | 4 +- .../scriptfilter/ScriptFilterSearchTests.java | 63 +-- .../search/sort/SimpleSortTests.java | 6 +- .../test/ElasticsearchIntegrationTest.java | 2 - .../test/ElasticsearchTestCase.java | 15 - .../test/InternalTestCluster.java | 4 +- .../elasticsearch/test/TestSearchContext.java | 9 - .../test/engine/MockEngineSupport.java | 10 +- 200 files changed, 1734 insertions(+), 2970 deletions(-) create mode 100644 src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java create mode 100644 src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java create mode 100644 src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java delete mode 100644 src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java delete mode 100644 src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java create mode 100644 src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java create mode 100644 src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java diff --git a/dev-tools/forbidden/core-signatures.txt b/dev-tools/forbidden/core-signatures.txt index 2a662a60974..acd66985081 100644 --- a/dev-tools/forbidden/core-signatures.txt +++ b/dev-tools/forbidden/core-signatures.txt @@ -39,9 +39,6 @@ org.apache.lucene.index.IndexReader#decRef() org.apache.lucene.index.IndexReader#incRef() org.apache.lucene.index.IndexReader#tryIncRef() -@defaultMessage QueryWrapperFilter is cacheable by default - use Queries#wrap instead -org.apache.lucene.search.QueryWrapperFilter#(org.apache.lucene.search.Query) - @defaultMessage Pass the precision step from the mappings explicitly instead org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 438b04d4094..a0f7bbaa976 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -153,9 +153,6 @@ due to forced awareness or allocation filtering. `indices.cache.filter.size`:: See <> -`indices.cache.filter.expire` (time):: - See <> - [float] ==== TTL interval diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 5435f4df2fd..292bb633a29 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -418,6 +418,12 @@ favour or `bool`. The `execution` option of the `terms` filter is now deprecated and ignored if provided. +The `_cache` and `_cache_key` parameters of filters are deprecated in the REST +layer and removed in the Java API. In case they are specified they will be +ignored. Instead filters are always used as their own cache key and elasticsearch +makes decisions by itself about whether it should cache filters based on how +often they are used. + === Snapshot and Restore The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer diff --git a/docs/reference/query-dsl/filters.asciidoc b/docs/reference/query-dsl/filters.asciidoc index 0c78dd21934..59a4a06caec 100644 --- a/docs/reference/query-dsl/filters.asciidoc +++ b/docs/reference/query-dsl/filters.asciidoc @@ -10,85 +10,14 @@ As a general rule, filters should be used instead of queries: [[caching]] === Filters and Caching -Filters can be a great candidate for caching. Caching the result of a -filter does not require a lot of memory, and will cause other queries -executing against the same filter (same parameters) to be blazingly -fast. +Filters can be a great candidate for caching. Caching the document set that +a filter matches does not require much memory and can help improve +execution speed of queries. -However the cost of caching is not the same for all filters. For -instance some filters are already fast out of the box while caching could -add significant overhead, and some filters produce results that are already -cacheable so caching them is just a matter of putting the result in the -cache. - -The default caching policy, `_cache: auto`, tracks the 1000 most recently -used filters on a per-index basis and makes decisions based on their -frequency. - -[float] -==== Filters that read directly the index structure - -Some filters can directly read the index structure and potentially jump -over large sequences of documents that are not worth evaluating (for -instance when these documents do not match the query). Caching these -filters introduces overhead given that all documents that the filter -matches need to be consumed in order to be loaded into the cache. - -These filters, which include the <> and -<> filters, are only cached after they -appear 5 times or more in the history of the 1000 most recently used -filters. - -[float] -==== Filters that produce results that are already cacheable - -Some filters produce results that are already cacheable, and the difference -between caching and not caching them is the act of placing the result in -the cache or not. These filters, which include the -<>, -<>, and -<> filters, are by default cached after they -appear twice or more in the history of the most 1000 recently used filters. - -[float] -==== Computational filters - -Some filters need to run some computation in order to figure out whether -a given document matches a filter. These filters, which include the geo and -<> filters, but also the -<> and <> -filters when using the `fielddata` execution mode are never cached by default, -as it would require to evaluate the filter on all documents in your indices -while they can otherwise be only evaluated on documents that match the query. - -[float] -==== Compound filters - -The last type of filters are those working with other filters, and includes -the <>, -<>, -<> and -<> filters. - -There is no general rule about these filters. Depending on the filters that -they wrap, they will sometimes return a filter that dynamically evaluates the -sub filters and sometimes evaluate the sub filters eagerly in order to return -a result that is already cacheable, so depending on the case, these filters -will be cached after they appear 2+ or 5+ times in the history of the most -1000 recently used filters. - -[float] -==== Overriding the default behaviour - -All filters allow to set `_cache` element on them to explicitly control -caching. It accepts 3 values: `true` in order to cache the filter, `false` -to make sure that the filter will not be cached, and `auto`, which is the -default and will decide on whether to cache the filter based on the cost -to cache it and how often it has been used as explained above. - -Filters also allow to set `_cache_key` which will be used as the -caching key for that filter. This can be handy when using very large -filters (like a terms filter with many elements in it). +Elasticsearch decides to cache filters based on how often they are used. For +this reason you might occasionally see better performance by splitting +complex filters into a static part that Elasticsearch will cache and a dynamic +part which is least costly than the original filter. include::filters/and-filter.asciidoc[] diff --git a/docs/reference/query-dsl/filters/and-filter.asciidoc b/docs/reference/query-dsl/filters/and-filter.asciidoc index 043a62e68bf..9484d4bf999 100644 --- a/docs/reference/query-dsl/filters/and-filter.asciidoc +++ b/docs/reference/query-dsl/filters/and-filter.asciidoc @@ -32,40 +32,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of -reuse. It is possible to opt-in explicitely for caching by setting `_cache` -to `true`. Since the `_cache` element requires to be set on the `and` filter -itself, the structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "and" : { - "filters": [ - { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - { - "prefix" : { "name.second" : "ba" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc index 7f16ec562d9..748756d7857 100644 --- a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc @@ -230,11 +230,3 @@ are not supported. Here is an example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same bounding box parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc index 670245a11a3..11ab6ccaa66 100644 --- a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc @@ -172,11 +172,3 @@ The `geo_distance` filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same point and distance parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc index a4212343eff..22bcb3fce31 100644 --- a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc @@ -116,11 +116,3 @@ The filter *requires* the <> type to be set on the relevant field. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same points parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc index dfe06932bbd..ca1df1ea995 100644 --- a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc @@ -110,12 +110,3 @@ shape: } -------------------------------------------------- -[float] -==== Caching - -The result of the Filter is not cached by default. Setting `_cache` to -`true` will mean the results of the Filter will be cached. Since shapes -can contain 10s-100s of coordinates and any one differing means a new -shape, it may make sense to only using caching when you are sure that -the shapes will remain reasonably static. - diff --git a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc index cd77803f53f..5f55936c616 100644 --- a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc @@ -61,10 +61,3 @@ next to the given cell. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The -`_cache` parameter can be set to `true` to turn caching on. -By default the filter uses the resulting geohash cells as a cache key. -This can be changed by using the `_cache_key` option. diff --git a/docs/reference/query-dsl/filters/has-child-filter.asciidoc b/docs/reference/query-dsl/filters/has-child-filter.asciidoc index 2605505a792..4802a5c07fa 100644 --- a/docs/reference/query-dsl/filters/has-child-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-child-filter.asciidoc @@ -88,9 +88,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_child` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_child` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc index 345e69258bc..dc708cceda3 100644 --- a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc @@ -63,9 +63,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_parent` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_parent` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/nested-filter.asciidoc b/docs/reference/query-dsl/filters/nested-filter.asciidoc index 584e26e04f6..41e14cd00c4 100644 --- a/docs/reference/query-dsl/filters/nested-filter.asciidoc +++ b/docs/reference/query-dsl/filters/nested-filter.asciidoc @@ -2,10 +2,7 @@ === Nested Filter A `nested` filter works in a similar fashion to the -<> query, except it's -used as a filter. It follows exactly the same structure, but also allows -to cache the results (set `_cache` to `true`), and have it named (set -the `_name` value). For example: +<> query. For example: [source,js] -------------------------------------------------- @@ -26,8 +23,7 @@ the `_name` value). For example: } ] } - }, - "_cache" : true + } } } } diff --git a/docs/reference/query-dsl/filters/not-filter.asciidoc b/docs/reference/query-dsl/filters/not-filter.asciidoc index 1e2b50fac23..ed533fc6d32 100644 --- a/docs/reference/query-dsl/filters/not-filter.asciidoc +++ b/docs/reference/query-dsl/filters/not-filter.asciidoc @@ -50,33 +50,3 @@ Or, in a longer form with a `filter` element: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it (though usually -not needed). Here is an example: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "not" : { - "filter" : { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/or-filter.asciidoc b/docs/reference/query-dsl/filters/or-filter.asciidoc index c7c845c33ee..890d30f38e0 100644 --- a/docs/reference/query-dsl/filters/or-filter.asciidoc +++ b/docs/reference/query-dsl/filters/or-filter.asciidoc @@ -27,36 +27,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence -of reuse. The `_cache` can be -set to `true` in order to cache it (though usually not needed). Since -the `_cache` element requires to be set on the `or` filter itself, the -structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "or" : { - "filters" : [ - { - "term" : { "name.second" : "banon" } - }, - { - "term" : { "name.nick" : "kimchy" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/prefix-filter.asciidoc b/docs/reference/query-dsl/filters/prefix-filter.asciidoc index 73c13ec8fe1..964d9f42ba2 100644 --- a/docs/reference/query-dsl/filters/prefix-filter.asciidoc +++ b/docs/reference/query-dsl/filters/prefix-filter.asciidoc @@ -16,22 +16,3 @@ a filter. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is cached by default if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "prefix" : { - "user" : "ki", - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/query-filter.asciidoc b/docs/reference/query-dsl/filters/query-filter.asciidoc index 2c5a7556c9a..8df0f3c3b11 100644 --- a/docs/reference/query-dsl/filters/query-filter.asciidoc +++ b/docs/reference/query-dsl/filters/query-filter.asciidoc @@ -19,34 +19,3 @@ that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. - -The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same query is used on several (many) other queries. Note, the -process of caching the first execution is higher when not caching (since -it needs to satisfy different queries). - -Setting the `_cache` element requires a different format for the -`query`: - -[source,js] --------------------------------------------------- -{ - "constantScore" : { - "filter" : { - "fquery" : { - "query" : { - "query_string" : { - "query" : "this AND that OR thus" - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/range-filter.asciidoc b/docs/reference/query-dsl/filters/range-filter.asciidoc index 51d7390f1b1..0c84f91e196 100644 --- a/docs/reference/query-dsl/filters/range-filter.asciidoc +++ b/docs/reference/query-dsl/filters/range-filter.asciidoc @@ -95,11 +95,3 @@ requires more memory, so make sure you have sufficient memory on your nodes in order to use this execution mode. It usually makes sense to use it on fields you're already aggregating or sorting by. -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. The -`_cache` can be set to `false` to turn it off. - -Having the `now` expression used without rounding will make the filter unlikely to be -cached since reuse is very unlikely. diff --git a/docs/reference/query-dsl/filters/regexp-filter.asciidoc b/docs/reference/query-dsl/filters/regexp-filter.asciidoc index 1f11da47565..06a45ae0739 100644 --- a/docs/reference/query-dsl/filters/regexp-filter.asciidoc +++ b/docs/reference/query-dsl/filters/regexp-filter.asciidoc @@ -51,9 +51,7 @@ You have to enable caching explicitly in order to have the "flags" : "INTERSECTION|COMPLEMENT|EMPTY", "max_determinized_states": 20000 }, - "_name":"test", - "_cache" : true, - "_cache_key" : "key" + "_name":"test" } } } diff --git a/docs/reference/query-dsl/filters/script-filter.asciidoc b/docs/reference/query-dsl/filters/script-filter.asciidoc index f9e0cd19cee..2f49422d88a 100644 --- a/docs/reference/query-dsl/filters/script-filter.asciidoc +++ b/docs/reference/query-dsl/filters/script-filter.asciidoc @@ -43,11 +43,3 @@ to use the ability to pass parameters to the script itself, for example: } ---------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same script and parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/term-filter.asciidoc b/docs/reference/query-dsl/filters/term-filter.asciidoc index cb249a83604..768fd94ac89 100644 --- a/docs/reference/query-dsl/filters/term-filter.asciidoc +++ b/docs/reference/query-dsl/filters/term-filter.asciidoc @@ -17,22 +17,3 @@ accept a filter, for example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. -The `_cache` can be set to `false` to turn it off. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "term" : { - "user" : "kimchy", - "_cache" : false - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/terms-filter.asciidoc b/docs/reference/query-dsl/filters/terms-filter.asciidoc index 19e9358a4dd..027fc174db2 100644 --- a/docs/reference/query-dsl/filters/terms-filter.asciidoc +++ b/docs/reference/query-dsl/filters/terms-filter.asciidoc @@ -18,13 +18,6 @@ Filters documents that have fields that match any of the provided terms The `terms` filter is also aliased with `in` as the filter name for simpler usage. -[float] -==== Caching - -The result of the filter is cached if there is evidence of reuse. It is -possible to enable caching explicitely by setting `_cache` to `true` and -to disable caching by setting `_cache` to `false`. - [float] ==== Terms lookup mechanism @@ -93,8 +86,7 @@ curl -XGET localhost:9200/tweets/_search -d '{ "type" : "user", "id" : "2", "path" : "followers" - }, - "_cache_key" : "user_2_friends" + } } } } @@ -102,10 +94,6 @@ curl -XGET localhost:9200/tweets/_search -d '{ }' -------------------------------------------------- -If there are lots of matching values, then `_cache_key` is recommended to be set, -so that the filter cache will not store a reference to the potentially heavy -terms filter. - The structure of the external terms document can also include array of inner objects, for example: diff --git a/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/api/indices.clear_cache.json index 9fd73acbd01..c8e3e84de88 100644 --- a/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/api/indices.clear_cache.json @@ -32,10 +32,6 @@ "type" : "boolean", "description" : "Clear filter caches" }, - "filter_keys": { - "type" : "boolean", - "description" : "A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all)" - }, "id": { "type" : "boolean", "description" : "Clear ID caches for parent/child" diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java index 7528206f6ae..936fe490a5d 100644 --- a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java @@ -18,9 +18,9 @@ package org.apache.lucene.search.postingshighlight; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; @@ -91,8 +91,7 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter { /* Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object */ - public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException { - IndexReader reader = searcher.getIndexReader(); + public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexReader reader, int docId, int maxPassages) throws IOException { IndexReaderContext readerContext = reader.getContext(); List leaves = readerContext.leaves(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index 2f0b0e7d4f2..d2395abf5f8 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.stats; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 59e4f3a8842..3a96c83b3ac 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -37,7 +37,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest 0) { - clearedAtLeastOne = true; - service.cache().filter().clear("api", request.filterKeys()); - } if (request.fieldDataCache()) { clearedAtLeastOne = true; if (request.fields() == null || request.fields().length == 0) { diff --git a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java index 0bcada3c827..312c5979994 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettingsModule.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.ttl.IndicesTTLService; @@ -62,9 +61,6 @@ public class ClusterDynamicSettingsModule extends AbstractModule { clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*"); clusterDynamicSettings.addDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*"); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_SIZE); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_EXPIRE, Validator.TIME); - clusterDynamicSettings.addDynamicSetting(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, Validator.POSITIVE_INTEGER); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_TYPE); clusterDynamicSettings.addDynamicSetting(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); clusterDynamicSettings.addDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); diff --git a/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java new file mode 100644 index 00000000000..d31cd3835ec --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Objects; + +/** + * Base implementation for a query which is cacheable at the index level but + * not the segment level as usually expected. + */ +public abstract class IndexCacheableQuery extends Query { + + private Object readerCacheKey; + + @Override + public Query rewrite(IndexReader reader) throws IOException { + if (reader.getCoreCacheKey() != this.readerCacheKey) { + IndexCacheableQuery rewritten = (IndexCacheableQuery) clone(); + rewritten.readerCacheKey = reader.getCoreCacheKey(); + return rewritten; + } + return super.rewrite(reader); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) + && readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hashCode(readerCacheKey); + } + + @Override + public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + if (readerCacheKey == null) { + throw new IllegalStateException("Rewrite first"); + } + if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) { + throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting"); + } + return doCreateWeight(searcher, needsScores); + } + + /** Create a {@link Weight} for this query. + * @see Query#createWeight(IndexSearcher, boolean) + */ + public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java new file mode 100644 index 00000000000..0d9270edaff --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Multimap; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReader.CoreClosedListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; + +import java.io.IOException; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +/** + * A map between segment core cache keys and the shard that these segments + * belong to. This allows to get the shard that a segment belongs to or to get + * the entire set of live core cache keys for a given index. In order to work + * this class needs to be notified about new segments. It modifies the current + * mappings as segments that were not known before are added and prevents the + * structure from growing indefinitely by registering close listeners on these + * segments so that at any time it only tracks live segments. + * + * NOTE: This is heavy. Avoid using this class unless absolutely required. + */ +public final class ShardCoreKeyMap { + + private final Map coreKeyToShard; + private final Multimap indexToCoreKey; + + public ShardCoreKeyMap() { + coreKeyToShard = new IdentityHashMap<>(); + indexToCoreKey = HashMultimap.create(); + } + + /** + * Register a {@link LeafReader}. This is necessary so that the core cache + * key of this reader can be found later using {@link #getCoreCacheKeys(ShardId)}. + */ + public void add(LeafReader reader) { + final ShardId shardId = ShardUtils.extractShardId(reader); + if (shardId == null) { + throw new IllegalArgumentException("Could not extract shard id from " + reader); + } + final Object coreKey = reader.getCoreCacheKey(); + final String index = shardId.getIndex(); + synchronized (this) { + if (coreKeyToShard.put(coreKey, shardId) == null) { + final boolean added = indexToCoreKey.put(index, coreKey); + assert added; + reader.addCoreClosedListener(new CoreClosedListener() { + @Override + public void onClose(Object ownerCoreCacheKey) throws IOException { + assert coreKey == ownerCoreCacheKey; + synchronized (ShardCoreKeyMap.this) { + coreKeyToShard.remove(ownerCoreCacheKey); + indexToCoreKey.remove(index, coreKey); + } + } + }); + } + } + } + + /** + * Return the {@link ShardId} that holds the given segment, or {@code null} + * if this segment is not tracked. + */ + public synchronized ShardId getShardId(Object coreKey) { + return coreKeyToShard.get(coreKey); + } + + /** + * Get the set of core cache keys associated with the given index. + */ + public synchronized Set getCoreKeysForIndex(String index) { + return ImmutableSet.copyOf(indexToCoreKey.get(index)); + } + + /** + * Return the number of tracked segments. + */ + public synchronized int size() { + assert indexToCoreKey.size() == coreKeyToShard.size(); + return coreKeyToShard.size(); + } + +} diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java index e05c11905ec..71cc5d7f9c2 100644 --- a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java +++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.lucene.docset; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; @@ -104,32 +106,41 @@ public class DocIdSets { } /** - * Given a {@link DocIdSet}, return a {@link Bits} instance that will match + * Given a {@link Scorer}, return a {@link Bits} instance that will match * all documents contained in the set. Note that the returned {@link Bits} - * instance should only be consumed once and in order. + * instance MUST be consumed in order. */ - public static Bits asSequentialAccessBits(final int maxDoc, @Nullable DocIdSet set) throws IOException { - if (set == null) { + public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException { + if (scorer == null) { return new Bits.MatchNoBits(maxDoc); } - Bits bits = set.bits(); - if (bits != null) { - return bits; - } - final DocIdSetIterator iterator = set.iterator(); - if (iterator == null) { - return new Bits.MatchNoBits(maxDoc); + final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator(); + final DocIdSetIterator iterator; + if (twoPhase == null) { + iterator = scorer; + } else { + iterator = twoPhase.approximation(); } + return new Bits() { - int previous = 0; + int previous = -1; + boolean previousMatched = false; @Override public boolean get(int index) { + if (index < 0 || index >= maxDoc) { + throw new IndexOutOfBoundsException(index + " is out of bounds: [" + 0 + "-" + maxDoc + "["); + } if (index < previous) { throw new IllegalArgumentException("This Bits instance can only be consumed in order. " + "Got called on [" + index + "] while previously called on [" + previous + "]"); } + if (index == previous) { + // we cache whether it matched because it is illegal to call + // twoPhase.matches() twice + return previousMatched; + } previous = index; int doc = iterator.docID(); @@ -140,7 +151,14 @@ public class DocIdSets { throw new IllegalStateException("Cannot advance iterator", e); } } - return index == doc; + if (index == doc) { + try { + return previousMatched = twoPhase == null || twoPhase.matches(); + } catch (IOException e) { + throw new IllegalStateException("Cannot validate match", e); + } + } + return previousMatched = false; } @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java deleted file mode 100644 index 027f794e6f1..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.search.Filter; - -/** - * A marker indicating that this is a cached filter. - */ -public abstract class CachedFilter extends Filter { - - public static boolean isCached(Filter filter) { - return filter instanceof CachedFilter; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index 7501307264b..770ddac0ce3 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -31,18 +31,18 @@ import java.io.IOException; public class FilteredCollector implements Collector { private final Collector collector; - private final Filter filter; + private final Weight filter; - public FilteredCollector(Collector collector, Filter filter) { + public FilteredCollector(Collector collector, Weight filter) { this.collector = collector; this.filter = filter; } @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final DocIdSet set = filter.getDocIdSet(context, null); + final Scorer filterScorer = filter.scorer(context, null); final LeafCollector in = collector.getLeafCollector(context); - final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), set); + final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); return new FilterLeafCollector(in) { @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java deleted file mode 100644 index 73b3ba0590c..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.util.Bits; - -import java.io.IOException; - -/** - * A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter - * as one that should not be cached, ever. - */ -public abstract class NoCacheFilter extends Filter { - - private static final class NoCacheFilterWrapper extends NoCacheFilter { - private final Filter delegate; - private NoCacheFilterWrapper(Filter delegate) { - this.delegate = delegate; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - return delegate.getDocIdSet(context, acceptDocs); - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj instanceof NoCacheFilterWrapper) { - return delegate.equals(((NoCacheFilterWrapper)obj).delegate); - } - return false; - } - - @Override - public String toString(String field) { - - return "no_cache(" + delegate + ")"; - } - - } - - /** - * Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter. - */ - public static Filter wrap(Filter filter) { - if (filter instanceof NoCacheFilter) { - return filter; - } - return new NoCacheFilterWrapper(filter); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java deleted file mode 100644 index c5bec8c5d9b..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.search.Query; - -/** - * Queries are never cached directly, but a query can be wrapped in a filter that may end being cached. - * Filters that wrap this query either directly or indirectly will never be cached. - */ -public abstract class NoCacheQuery extends Query { - - @Override - public final String toString(String s) { - return "no_cache(" + innerToString(s) + ")"; - } - - public abstract String innerToString(String s); -} diff --git a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 69ed1f68d64..fe33206b0cc 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -31,10 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import java.util.List; import java.util.regex.Pattern; @@ -54,19 +51,19 @@ public class Queries { } public static Filter newMatchAllFilter() { - return wrap(newMatchAllQuery()); + return new QueryWrapperFilter(newMatchAllQuery()); } public static Filter newMatchNoDocsFilter() { - return wrap(newMatchNoDocsQuery()); + return new QueryWrapperFilter(newMatchNoDocsQuery()); } public static Filter newNestedFilter() { - return wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); + return new QueryWrapperFilter(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); } public static Filter newNonNestedFilter() { - return wrap(not(newNestedFilter())); + return new QueryWrapperFilter(not(newNestedFilter())); } /** Return a query that matches all documents but those that match the given query. */ @@ -169,24 +166,4 @@ public class Queries { optionalClauseCount : (result < 0 ? 0 : result)); } - - /** - * Wraps a query in a filter. - * - * If a filter has an anti per segment execution / caching nature then @{@link CustomQueryWrappingFilter} is returned - * otherwise the standard {@link org.apache.lucene.search.QueryWrapperFilter} is returned. - */ - @SuppressForbidden(reason = "QueryWrapperFilter cachability") - public static Filter wrap(Query query, QueryParseContext context) { - if ((context != null && context.requireCustomQueryWrappingFilter()) || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) { - return new CustomQueryWrappingFilter(query); - } else { - return new QueryWrapperFilter(query); - } - } - - /** Wrap as a {@link Filter}. */ - public static Filter wrap(Query query) { - return wrap(query, null); - } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java index 8ecb6228705..a4c92d78804 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; import org.apache.lucene.util.Bits; import java.io.IOException; @@ -46,4 +48,13 @@ public abstract class ResolvableFilter extends Filter { return null; } } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Filter resolved = resolve(); + if (resolved != null) { + return resolved; + } + return super.rewrite(reader); + } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index 03dc0fcfb9e..d1835f57098 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -119,16 +119,22 @@ public class FiltersFunctionScoreQuery extends Query { // TODO: needsScores // if we dont need scores, just return the underlying Weight? Weight subQueryWeight = subQuery.createWeight(searcher, needsScores); - return new CustomBoostFactorWeight(this, subQueryWeight); + Weight[] filterWeights = new Weight[filterFunctions.length]; + for (int i = 0; i < filterFunctions.length; ++i) { + filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false); + } + return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights); } class CustomBoostFactorWeight extends Weight { final Weight subQueryWeight; + final Weight[] filterWeights; - public CustomBoostFactorWeight(Query parent, Weight subQueryWeight) throws IOException { + public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights) throws IOException { super(parent); this.subQueryWeight = subQueryWeight; + this.filterWeights = filterWeights; } @Override @@ -162,7 +168,8 @@ public class FiltersFunctionScoreQuery extends Query { for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterFunction.filter.getDocIdSet(context, acceptDocs)); + Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs + docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore); } @@ -177,7 +184,8 @@ public class FiltersFunctionScoreQuery extends Query { // First: Gather explanations for all filters List filterExplanations = new ArrayList<>(); float weightSum = 0; - for (FilterFunction filterFunction : filterFunctions) { + for (int i = 0; i < filterFunctions.length; ++i) { + FilterFunction filterFunction = filterFunctions[i]; if (filterFunction.function instanceof WeightFactorFunction) { weightSum += ((WeightFactorFunction) filterFunction.function).getWeight(); @@ -186,7 +194,7 @@ public class FiltersFunctionScoreQuery extends Query { } Bits docSet = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), - filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs())); + filterWeights[i].scorer(context, null)); if (docSet.get(doc)) { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index 74c0e87f44c..6b192981dca 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import com.google.common.base.Function; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -148,7 +149,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class); // inject workarounds for cyclic dep - indexCache.filter().setIndexService(this); indexFieldData.setIndexService(this); bitSetFilterCache.setIndexService(this); this.nodeEnv = nodeEnv; diff --git a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java index 7d142a0c803..940344a627c 100644 --- a/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java +++ b/src/main/java/org/elasticsearch/index/aliases/IndexAliasesService.java @@ -22,10 +22,10 @@ package org.elasticsearch.index.aliases; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentFactory; @@ -109,7 +109,7 @@ public class IndexAliasesService extends AbstractIndexComponent implements Itera return null; } } - return Queries.wrap(combined); + return new QueryWrapperFilter(combined); } } diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 338b49f0490..3b71f735c2e 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.cache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -37,12 +38,14 @@ import java.io.IOException; public class IndexCache extends AbstractIndexComponent implements Closeable { private final FilterCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; private final BitsetFilterCache bitsetFilterCache; @Inject - public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) { super(index, indexSettings); this.filterCache = filterCache; + this.filterCachingPolicy = filterCachingPolicy; this.bitsetFilterCache = bitsetFilterCache; } @@ -50,6 +53,10 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { return filterCache; } + public QueryCachingPolicy filterPolicy() { + return filterCachingPolicy; + } + /** * Return the {@link BitsetFilterCache} for this index. */ diff --git a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 13fc57ed1ef..284ecc0ecb7 100644 --- a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -36,7 +36,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.NoCacheFilter; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -105,7 +104,6 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea public BitDocIdSetFilter getBitDocIdSetFilter(Filter filter) { assert filter != null; - assert !(filter instanceof NoCacheFilter); return new BitDocIdSetFilterWrapper(filter); } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java index a16b5da2bd9..37c45e3adf7 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java @@ -19,19 +19,14 @@ package org.elasticsearch.index.cache.filter; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.index.IndexComponent; -import org.elasticsearch.index.IndexService; import java.io.Closeable; /** * */ -public interface FilterCache extends IndexComponent, Closeable { +public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { static class EntriesStats { public final long sizeInBytes; @@ -43,16 +38,5 @@ public interface FilterCache extends IndexComponent, Closeable { } } - // we need to "inject" the index service to not create cyclic dep - void setIndexService(IndexService indexService); - - String type(); - - Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy); - - void clear(Object reader); - void clear(String reason); - - void clear(String reason, String[] keys); } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java index 551ea4fa279..20496e3266b 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; /** * @@ -46,7 +46,7 @@ public class FilterCacheModule extends AbstractModule { @Override protected void configure() { bind(FilterCache.class) - .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) + .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) .in(Scopes.SINGLETON); // the filter cache is a node-level thing, however we want the most popular filters // to be computed on a per-index basis, that is why we don't use the SINGLETON diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java index e56a1145d08..948f7e57702 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java @@ -33,32 +33,79 @@ import java.io.IOException; */ public class FilterCacheStats implements Streamable, ToXContent { - long memorySize; - long evictions; + long ramBytesUsed; + long hitCount; + long missCount; + long cacheCount; + long cacheSize; public FilterCacheStats() { } - public FilterCacheStats(long memorySize, long evictions) { - this.memorySize = memorySize; - this.evictions = evictions; + public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) { + this.ramBytesUsed = ramBytesUsed; + this.hitCount = hitCount; + this.missCount = missCount; + this.cacheCount = cacheCount; + this.cacheSize = cacheSize; } public void add(FilterCacheStats stats) { - this.memorySize += stats.memorySize; - this.evictions += stats.evictions; + ramBytesUsed += stats.ramBytesUsed; + hitCount += stats.hitCount; + missCount += stats.missCount; + cacheCount += stats.cacheCount; + cacheSize += stats.cacheSize; } public long getMemorySizeInBytes() { - return this.memorySize; + return ramBytesUsed; } public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySize); + return new ByteSizeValue(ramBytesUsed); } + /** + * The total number of lookups in the cache. + */ + public long getTotalCount() { + return hitCount + missCount; + } + + /** + * The number of successful lookups in the cache. + */ + public long getHitCount() { + return hitCount; + } + + /** + * The number of lookups in the cache that failed to retrieve a {@link DocIdSet}. + */ + public long getMissCount() { + return missCount; + } + + /** + * The number of {@link DocIdSet}s that have been cached. + */ + public long getCacheCount() { + return cacheCount; + } + + /** + * The number of {@link DocIdSet}s that are in the cache. + */ + public long getCacheSize() { + return cacheSize; + } + + /** + * The number of {@link DocIdSet}s that have been evicted from the cache. + */ public long getEvictions() { - return this.evictions; + return cacheCount - cacheSize; } public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException { @@ -67,22 +114,34 @@ public class FilterCacheStats implements Streamable, ToXContent { return stats; } + @Override public void readFrom(StreamInput in) throws IOException { - memorySize = in.readVLong(); - evictions = in.readVLong(); + ramBytesUsed = in.readLong(); + hitCount = in.readLong(); + missCount = in.readLong(); + cacheCount = in.readLong(); + cacheSize = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(memorySize); - out.writeVLong(evictions); + out.writeLong(ramBytesUsed); + out.writeLong(hitCount); + out.writeLong(missCount); + out.writeLong(cacheCount); + out.writeLong(cacheSize); } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.FILTER_CACHE); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); + builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); + builder.field(Fields.TOTAL_COUNT, getTotalCount()); + builder.field(Fields.HIT_COUNT, getHitCount()); + builder.field(Fields.MISS_COUNT, getMissCount()); + builder.field(Fields.CACHE_SIZE, getCacheSize()); + builder.field(Fields.CACHE_COUNT, getCacheCount()); builder.field(Fields.EVICTIONS, getEvictions()); builder.endObject(); return builder; @@ -92,6 +151,12 @@ public class FilterCacheStats implements Streamable, ToXContent { static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache"); static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); + static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count"); + static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); + static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); + static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size"); + static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count"); static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java index 67ab084bd07..97f75094580 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java @@ -19,45 +19,35 @@ package org.elasticsearch.index.cache.filter; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; + +import java.io.Closeable; +import java.io.IOException; /** */ -public class ShardFilterCache extends AbstractIndexShardComponent implements RemovalListener { +public class ShardFilterCache extends AbstractIndexShardComponent implements Closeable { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); + final IndicesFilterCache cache; @Inject - public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) { + public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings, IndicesFilterCache cache) { super(shardId, indexSettings); + this.cache = cache; } public FilterCacheStats stats() { - return new FilterCacheStats(totalMetric.count(), evictionsMetric.count()); - } - - public void onCached(long sizeInBytes) { - totalMetric.inc(sizeInBytes); + return cache.getStats(shardId); } @Override - public void onRemoval(RemovalNotification removalNotification) { - if (removalNotification.wasEvicted()) { - evictionsMetric.inc(); - } - if (removalNotification.getValue() != null) { - totalMetric.dec(DocIdSets.sizeInBytes(removalNotification.getValue())); - } + public void close() throws IOException { + cache.onClose(shardId); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java new file mode 100644 index 00000000000..5dfaf4c7799 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.filter.index; + +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Weight; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; + +/** + * The index-level filter cache. This class mostly delegates to the node-level + * filter cache: {@link IndicesFilterCache}. + */ +public class IndexFilterCache extends AbstractIndexComponent implements FilterCache { + + final IndicesFilterCache indicesFilterCache; + + @Inject + public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { + super(index, indexSettings); + this.indicesFilterCache = indicesFilterCache; + } + + @Override + public void close() throws ElasticsearchException { + clear("close"); + } + + @Override + public void clear(String reason) { + logger.debug("full cache clear, reason [{}]", reason); + indicesFilterCache.clearIndex(index.getName()); + } + + @Override + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return indicesFilterCache.doCache(weight, policy); + } + +} diff --git a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java index 41a704a9afd..ded3c207a42 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java @@ -19,15 +19,12 @@ package org.elasticsearch.index.cache.filter.none; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; +import org.apache.lucene.search.Weight; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.settings.IndexSettings; @@ -42,38 +39,18 @@ public class NoneFilterCache extends AbstractIndexComponent implements FilterCac logger.debug("Using no filter cache"); } - @Override - public void setIndexService(IndexService indexService) { - // nothing to do here... - } - - @Override - public String type() { - return "none"; - } - @Override public void close() { // nothing to do here } @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy) { - return filterToCache; + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return weight; } @Override public void clear(String reason) { // nothing to do here } - - @Override - public void clear(String reason, String[] keys) { - // nothing to do there - } - - @Override - public void clear(Object reader) { - // nothing to do here - } } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java deleted file mode 100644 index 9e7a3772860..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.filter.weighted; - -import com.google.common.cache.Cache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.Weigher; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.search.BitsFilteredDocIdSet; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.lucene.search.CachedFilter; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.ResolvableFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.cache.filter.FilterCache; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; - -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; - -public class WeightedFilterCache extends AbstractIndexComponent implements FilterCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener { - - final IndicesFilterCache indicesFilterCache; - IndexService indexService; - - final ConcurrentMap seenReaders = ConcurrentCollections.newConcurrentMap(); - - @Inject - public WeightedFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { - super(index, indexSettings); - this.indicesFilterCache = indicesFilterCache; - } - - @Override - public void setIndexService(IndexService indexService) { - this.indexService = indexService; - } - - @Override - public String type() { - return "weighted"; - } - - @Override - public void close() { - clear("close"); - } - - @Override - public void onClose(IndexReader reader) { - clear(reader.getCoreCacheKey()); - } - - - @Override - public void clear(String reason) { - logger.debug("full cache clear, reason [{}]", reason); - for (Object readerKey : seenReaders.keySet()) { - Boolean removed = seenReaders.remove(readerKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(readerKey); - } - } - - @Override - public void clear(String reason, String[] keys) { - logger.debug("clear keys [], reason [{}]", reason, keys); - for (String key : keys) { - final HashedBytesRef keyBytes = new HashedBytesRef(key); - for (Object readerKey : seenReaders.keySet()) { - indicesFilterCache.cache().invalidate(new FilterCacheKey(readerKey, keyBytes)); - } - } - } - - @Override - public void onClose(Object coreKey) { - clear(coreKey); - } - - @Override - public void clear(Object coreCacheKey) { - // we add the seen reader before we add the first cache entry for this reader - // so, if we don't see it here, its won't be in the cache - Boolean removed = seenReaders.remove(coreCacheKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(coreCacheKey); - } - - @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy cachePolicy) { - if (filterToCache == null) { - return null; - } - if (filterToCache instanceof NoCacheFilter) { - return filterToCache; - } - if (CachedFilter.isCached(filterToCache)) { - return filterToCache; - } - if (filterToCache instanceof ResolvableFilter) { - throw new IllegalArgumentException("Cannot cache instances of ResolvableFilter: " + filterToCache); - } - return new FilterCacheFilterWrapper(filterToCache, cacheKey, cachePolicy, this); - } - - static class FilterCacheFilterWrapper extends CachedFilter { - - private final Filter filter; - private final Object filterCacheKey; - private final QueryCachingPolicy cachePolicy; - private final WeightedFilterCache cache; - - FilterCacheFilterWrapper(Filter filter, Object cacheKey, QueryCachingPolicy cachePolicy, WeightedFilterCache cache) { - this.filter = filter; - this.filterCacheKey = cacheKey != null ? cacheKey : filter; - this.cachePolicy = cachePolicy; - this.cache = cache; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - if (context.ord == 0) { - cachePolicy.onUse(filter); - } - FilterCacheKey cacheKey = new FilterCacheKey(context.reader().getCoreCacheKey(), filterCacheKey); - Cache innerCache = cache.indicesFilterCache.cache(); - - DocIdSet cacheValue = innerCache.getIfPresent(cacheKey); - final DocIdSet ret; - if (cacheValue != null) { - ret = cacheValue; - } else { - final DocIdSet uncached = filter.getDocIdSet(context, null); - if (cachePolicy.shouldCache(filter, context)) { - if (!cache.seenReaders.containsKey(context.reader().getCoreCacheKey())) { - Boolean previous = cache.seenReaders.putIfAbsent(context.reader().getCoreCacheKey(), Boolean.TRUE); - if (previous == null) { - // we add a core closed listener only, for non core IndexReaders we rely on clear being called (percolator for example) - context.reader().addCoreClosedListener(cache); - } - } - // we can't pass down acceptedDocs provided, because we are caching the result, and acceptedDocs - // might be specific to a query. We don't pass the live docs either because a cache built for a specific - // generation of a segment might be reused by an older generation which has fewer deleted documents - cacheValue = DocIdSets.toCacheable(context.reader(), uncached); - // we might put the same one concurrently, that's fine, it will be replaced and the removal - // will be called - ShardId shardId = ShardUtils.extractShardId(context.reader()); - if (shardId != null) { - IndexShard shard = cache.indexService.shard(shardId.id()); - if (shard != null) { - cacheKey.removalListener = shard.filterCache(); - shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue)); - } - } - innerCache.put(cacheKey, cacheValue); - ret = cacheValue; - } else { - // uncached - ret = uncached; - } - } - - return BitsFilteredDocIdSet.wrap(DocIdSets.isEmpty(ret) ? null : ret, acceptDocs); - } - - @Override - public String toString(String field) { - return "cache(" + filter + ")"; - } - - @Override - public boolean equals(Object o) { - if (super.equals(o) == false) return false; - return this.filter.equals(((FilterCacheFilterWrapper) o).filter); - } - - @Override - public int hashCode() { - return 31 * super.hashCode() + filter.hashCode(); - } - } - - - /** A weigher for the Guava filter cache that uses a minimum entry size */ - public static class FilterCacheValueWeigher implements Weigher { - - private final int minimumEntrySize; - - public FilterCacheValueWeigher(int minimumEntrySize) { - this.minimumEntrySize = minimumEntrySize; - } - - @Override - public int weigh(FilterCacheKey key, DocIdSet value) { - int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE); - return Math.max(weight, this.minimumEntrySize); - } - } - - public static class FilterCacheKey { - private final Object readerKey; - private final Object filterKey; - - // if we know, we will try and set the removal listener (for statistics) - // its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads - @Nullable - public RemovalListener removalListener; - - public FilterCacheKey(Object readerKey, Object filterKey) { - this.readerKey = readerKey; - this.filterKey = filterKey; - } - - public Object readerKey() { - return readerKey; - } - - public Object filterKey() { - return filterKey; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; -// if (o == null || getClass() != o.getClass()) return false; - FilterCacheKey that = (FilterCacheKey) o; - return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey)); - } - - @Override - public int hashCode() { - return readerKey().hashCode() + 31 * filterKey.hashCode(); - } - } -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java index e7246d0e942..808542fadc4 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java +++ b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java @@ -21,14 +21,10 @@ package org.elasticsearch.index.cache.query; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; -import org.elasticsearch.common.bytes.BytesReference; + import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index ac072115cc7..9c069139173 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.engine; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; @@ -74,6 +76,8 @@ public final class EngineConfig { private final Similarity similarity; private final CodecService codecService; private final Engine.FailedEngineListener failedEngineListener; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; /** * Index setting for index concurrency / number of threadstates in the indexwriter. @@ -130,7 +134,11 @@ public final class EngineConfig { /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, TranslogRecoveryPerformer translogRecoveryPerformer) { + public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, + IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, + Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, + Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy) { this.shardId = shardId; this.threadPool = threadPool; this.indexingService = indexingService; @@ -155,6 +163,8 @@ public final class EngineConfig { versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); updateVersionMapSize(); this.translogRecoveryPerformer = translogRecoveryPerformer; + this.filterCache = filterCache; + this.filterCachingPolicy = filterCachingPolicy; } /** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */ @@ -396,4 +406,18 @@ public final class EngineConfig { public TranslogRecoveryPerformer getTranslogRecoveryPerformer() { return translogRecoveryPerformer; } + + /** + * Return the cache to use for filters. + */ + public QueryCache getFilterCache() { + return filterCache; + } + + /** + * Return the policy to use when caching filters. + */ + public QueryCachingPolicy getFilterCachingPolicy() { + return filterCachingPolicy; + } } diff --git a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java index 7255b686bc8..fa8d9a6a5c1 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java @@ -40,7 +40,9 @@ public class EngineSearcherFactory extends SearcherFactory { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = super.newSearcher(reader, previousReader); + searcher.setQueryCache(engineConfig.getFilterCache()); + searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy()); searcher.setSimilarity(engineConfig.getSimilarity()); return searcher; } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 7ea6db76926..1d0bbe21644 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1044,7 +1044,7 @@ public class InternalEngine extends Engine { try { assert isMergedSegment(reader); if (warmer != null) { - final Engine.Searcher searcher = new Searcher("warmer", new IndexSearcher(reader)); + final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(reader, null)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); warmer.warmNewReaders(context); } @@ -1077,8 +1077,7 @@ public class InternalEngine extends Engine { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); - searcher.setSimilarity(engineConfig.getSimilarity()); + IndexSearcher searcher = super.newSearcher(reader, previousReader); if (warmer != null) { // we need to pass a custom searcher that does not release anything on Engine.Search Release, // we will release explicitly @@ -1110,7 +1109,8 @@ public class InternalEngine extends Engine { } if (!readers.isEmpty()) { // we don't want to close the inner readers, just increase ref on them - newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false)); + IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false); + newSearcher = super.newSearcher(newReader, null); closeNewSearcher = true; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index b267bf3978c..fa459d36b13 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; + import org.apache.lucene.document.Field; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; @@ -43,7 +44,19 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.Mapping.SourceTransform; -import org.elasticsearch.index.mapper.internal.*; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; +import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.internal.IndexFieldMapper; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.SizeFieldMapper; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.script.ExecutableScript; @@ -54,7 +67,12 @@ import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -343,7 +361,7 @@ public class DocumentMapper implements ToXContent { continue; } - Filter filter = sc.filterCache().cache(objectMapper.nestedTypeFilter(), null, sc.queryParserService().autoFilterCachePolicy()); + Filter filter = objectMapper.nestedTypeFilter(); if (filter == null) { continue; } diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 7d242c953c0..6fe69c809ff 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; @@ -371,11 +372,11 @@ public class MapperService extends AbstractIndexComponent { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(Queries.newNonNestedFilter(), Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else if (hasNested) { return Queries.newNonNestedFilter(); } else if (filterPercolateType) { - return Queries.wrap(Queries.not(percolatorType)); + return new QueryWrapperFilter(Queries.not(percolatorType)); } else { return null; } @@ -384,12 +385,12 @@ public class MapperService extends AbstractIndexComponent { // since they have different types (starting with __) if (types.length == 1) { DocumentMapper docMapper = documentMapper(types[0]); - Filter filter = docMapper != null ? docMapper.typeFilter() : Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); + Filter filter = docMapper != null ? docMapper.typeFilter() : new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); if (filterPercolateType) { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(filter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { return filter; } @@ -419,9 +420,9 @@ public class MapperService extends AbstractIndexComponent { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(termsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(termsFilter); + return new QueryWrapperFilter(termsFilter); } } else { // Current bool filter requires that at least one should clause matches, even with a must clause. @@ -441,7 +442,7 @@ public class MapperService extends AbstractIndexComponent { bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); } - return Queries.wrap(bool); + return new QueryWrapperFilter(bool); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 7e97c86b4b6..8d004a31a91 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -35,6 +36,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; @@ -480,7 +482,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -499,7 +501,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return Queries.wrap(new TermsQuery(names.indexName(), bytesRefs)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bytesRefs)); } } @@ -529,7 +531,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(new TermRangeQuery(names.indexName(), + return new QueryWrapperFilter(new TermRangeQuery(names.indexName(), lowerTerm == null ? null : indexedValueForSearch(lowerTerm), upperTerm == null ? null : indexedValueForSearch(upperTerm), includeLower, includeUpper)); @@ -551,7 +553,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter prefixFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -565,7 +567,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter regexpFilter(Object value, int flags, int maxDeterminizedStates, @Nullable QueryParseContext parseContext) { - return Queries.wrap(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); + return new QueryWrapperFilter(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 7df9d2f179f..d01ff743279 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -24,13 +24,13 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -205,7 +205,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index fc91a8ac061..37a6f0a33b5 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -34,7 +35,6 @@ import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -212,7 +212,7 @@ public class ByteFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class ByteFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 38309d729c6..5ab0049178b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -39,8 +40,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -392,7 +391,7 @@ public class DateFieldMapper extends NumberFieldMapper { if (fieldData != null) { filter = NumericRangeFieldDataFilter.newLongRange(fieldData, lowerVal,upperVal, includeLower, includeUpper); } else { - filter = Queries.wrap(NumericRangeQuery.newLongRange( + filter = new QueryWrapperFilter(NumericRangeQuery.newLongRange( names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper )); } @@ -406,7 +405,7 @@ public class DateFieldMapper extends NumberFieldMapper { return null; } long value = parseStringValue(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); @@ -588,7 +587,7 @@ public class DateFieldMapper extends NumberFieldMapper { } } - public final class LateParsingQuery extends NoCacheQuery { + public final class LateParsingQuery extends Query { final Object lowerTerm; final Object upperTerm; @@ -613,7 +612,7 @@ public class DateFieldMapper extends NumberFieldMapper { } @Override - public String innerToString(String s) { + public String toString(String s) { final StringBuilder sb = new StringBuilder(); return sb.append(names.indexName()).append(':') .append(includeLower ? '[' : '{') diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index d3802650074..e7ee19e3dba 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -38,7 +39,6 @@ import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -202,14 +202,14 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseDoubleValue(lowerTerm), upperTerm == null ? null : parseDoubleValue(upperTerm), includeLower, includeUpper)); } public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); } @Override @@ -225,7 +225,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index ad5faa81c4e..968261805d9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -39,7 +40,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -212,7 +212,7 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 647e7c3b5d1..8fc32539120 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -206,7 +206,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -225,7 +225,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index a600769749a..bca94be2d34 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -196,7 +196,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseLongValue(lowerTerm), upperTerm == null ? null : parseLongValue(upperTerm), includeLower, includeUpper)); @@ -215,7 +215,7 @@ public class LongFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index b0d15fac59d..a3a905aed99 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -36,7 +37,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -212,7 +212,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -220,7 +220,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(QueryParseContext parseContext, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), + return new QueryWrapperFilter(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -231,7 +231,7 @@ public class ShortFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index efa7fe36c0b..c9a38f18f9c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -41,7 +42,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -200,7 +200,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termFilter(value, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); } @Override @@ -208,7 +208,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termsFilter(values, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); } @Override @@ -238,7 +238,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern for (String queryType : queryTypes) { filter.add(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override @@ -277,7 +277,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern filter.add(new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags, maxDeterminizedStates), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index f1eca621076..d6a14f7be3c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -275,7 +276,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } BytesRef bValue = BytesRefs.toBytesRef(value); if (Uid.hasDelimiter(bValue)) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), bValue))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), bValue))); } List types = new ArrayList<>(context.mapperService().types().size()); @@ -288,14 +289,14 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter if (types.isEmpty()) { return Queries.newMatchNoDocsFilter(); } else if (types.size() == 1) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); } else { // we use all non child types, cause we don't know if its exact or not... List typesValues = new ArrayList<>(types.size()); for (String type : context.mapperService().types()) { typesValues.add(Uid.createUidAsBytes(type, bValue)); } - return Queries.wrap(new TermsQuery(names.indexName(), typesValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), typesValues)); } } @@ -328,7 +329,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } } } - return Queries.wrap(new TermsQuery(names.indexName(), bValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bValues)); } /** diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index 83cdbf536b2..b5668761c03 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -28,13 +28,13 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.FieldDataType; @@ -133,15 +133,15 @@ public class TypeFieldMapper extends AbstractFieldMapper implements Inte @Override public Query termQuery(Object value, @Nullable QueryParseContext context) { - return new ConstantScoreQuery(context.cacheFilter(termFilter(value, context), null, context.autoFilterCachePolicy())); + return new ConstantScoreQuery(termFilter(value, context)); } @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { if (fieldType.indexOptions() == IndexOptions.NONE) { - return Queries.wrap(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); + return new QueryWrapperFilter(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 9f69e83edfe..64ba3bace25 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; @@ -35,7 +36,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -253,7 +253,7 @@ public class IpFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -273,7 +273,7 @@ public class IpFieldMapper extends NumberFieldMapper { return null; } final long value = ipToLong(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 9d591156b99..8004bb38e7c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Iterables; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; @@ -29,7 +30,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -367,7 +367,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } this.nestedTypePathAsString = "__" + fullPath; this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString); - this.nestedTypeFilter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); + this.nestedTypeFilter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); } @Override diff --git a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java index fd4cce1c763..0bde0193768 100644 --- a/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ b/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.percolator; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; @@ -28,7 +27,6 @@ import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -93,7 +91,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple private CloseableThreadLocal cache = new CloseableThreadLocal() { @Override protected QueryParseContext initialValue() { - return new QueryParseContext(shardId.index(), queryParserService, true); + return new QueryParseContext(shardId.index(), queryParserService); } }; @@ -280,13 +278,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple shard.refresh("percolator_load_queries"); // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery try (Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", true)) { - Query query = new ConstantScoreQuery( - indexCache.filter().cache( - Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))), - null, - queryParserService.autoFilterCachePolicy() - ) - ); + Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); searcher.searcher().search(query, queryCollector); Map queries = queryCollector.queries(); diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java index c69f7c8ef0f..87611a03b73 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java @@ -34,9 +34,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public AndFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public AndFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public AndFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -82,12 +66,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java index 02322db9a0b..f0c8c2724bb 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class AndFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -74,6 +69,8 @@ public class AndFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if ("filters".equals(currentFieldName)) { filtersFound = true; @@ -93,12 +90,8 @@ public class AndFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[and] filter does not support [" + currentFieldName + "]"); } @@ -120,10 +113,7 @@ public class AndFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.MUST); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java index f4982f12f69..330adaf8d08 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java @@ -36,9 +36,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { private ArrayList shouldClauses = new ArrayList<>(); - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -114,19 +111,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public BoolFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public BoolFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("bool"); @@ -137,12 +121,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java index 71f8b8248f7..7d96f1bc3bc 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,9 +50,6 @@ public class BoolFilterParser implements FilterParser { BooleanQuery boolFilter = new BooleanQuery(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token; @@ -64,6 +59,8 @@ public class BoolFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("must".equals(currentFieldName)) { hasAnyFilter = true; @@ -117,12 +114,8 @@ public class BoolFilterParser implements FilterParser { throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } @@ -138,10 +131,7 @@ public class BoolFilterParser implements FilterParser { return null; } - Filter filter = Queries.wrap(boolFilter); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index d89ff05b7fa..593643abc52 100644 --- a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -55,14 +53,14 @@ public class ConstantScoreQueryParser implements QueryParser { Query query = null; boolean queryFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -76,10 +74,6 @@ public class ConstantScoreQueryParser implements QueryParser { } else if (token.isValue()) { if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } @@ -94,11 +88,6 @@ public class ConstantScoreQueryParser implements QueryParser { } if (filter != null) { - // cache the filter if possible needed - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - Query query1 = new ConstantScoreQuery(filter); query1.setBoost(boost); return query1; diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java index 008f554a57f..8dc0a3eb2c1 100644 --- a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -120,11 +121,7 @@ public class ExistsFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - Filter filter = Queries.wrap(boolFilter); - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - filter = parseContext.cacheFilter(filter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java index d31e2f1a943..b349dd3e65c 100644 --- a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java @@ -21,10 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,8 +50,6 @@ public class FQueryFilterParser implements FilterParser { Query query = null; boolean queryFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -61,6 +57,8 @@ public class FQueryFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { queryFound = true; @@ -71,10 +69,6 @@ public class FQueryFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.autoFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } @@ -86,10 +80,7 @@ public class FQueryFilterParser implements FilterParser { if (query == null) { return null; } - Filter filter = Queries.wrap(query, parseContext); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(query); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java index 9b3a89976c5..77eb4d136ca 100644 --- a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentType; diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java index f6ec14313b1..9e2f8e133b3 100644 --- a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,8 +53,6 @@ public class FilteredQueryParser implements QueryParser { Filter filter = null; boolean filterFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String queryName = null; String currentFieldName = null; @@ -66,6 +62,8 @@ public class FilteredQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { query = parseContext.parseInnerQuery(); @@ -99,10 +97,6 @@ public class FilteredQueryParser implements QueryParser { queryName = parser.text(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } @@ -129,11 +123,6 @@ public class FilteredQueryParser implements QueryParser { return query; } - // cache if required - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - // if its a match_all query, use constant_score if (Queries.isConstantMatchAllQuery(query)) { Query q = new ConstantScoreQuery(filter); diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java index 5efedc86c1e..40b41c7ffbd 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java @@ -42,9 +42,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { private double[] box = {Double.NaN, Double.NaN, Double.NaN, Double.NaN}; - private Boolean cache; - private String cacheKey; - private String filterName; private String type; @@ -140,19 +137,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoBoundingBoxFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoBoundingBoxFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults * to `memory`. @@ -185,12 +169,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (type != null) { builder.field("type", type); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java index 107e3a507dd..6441b8d4b07 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java @@ -20,12 +20,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.mapper.FieldMapper; @@ -72,8 +70,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; double top = Double.NaN; @@ -100,7 +96,9 @@ public class GeoBoundingBoxFilterParser implements FilterParser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); - if (FIELD.equals(currentFieldName)) { + if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (FIELD.equals(currentFieldName)) { fieldName = parser.text(); } else if (TOP.equals(currentFieldName)) { top = parser.doubleValue(); @@ -138,10 +136,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalize = parser.booleanValue(); } else if ("type".equals(currentFieldName)) { @@ -188,9 +182,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { + "] not supported, either 'indexed' or 'memory' are allowed"); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java index 6a07c285c1a..a45aee92c6c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { private String optimizeBbox; - private Boolean cache; - private String cacheKey; - private String filterName; public GeoDistanceFilterBuilder(String name) { @@ -103,19 +100,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceFilterParser.NAME); @@ -134,12 +118,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java index a7859977388..ff46d591f3c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -80,6 +76,8 @@ public class GeoDistanceFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { fieldName = currentFieldName; GeoUtils.parseGeoPoint(parser, point); @@ -125,10 +123,6 @@ public class GeoDistanceFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -167,9 +161,6 @@ public class GeoDistanceFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceFilter(point.lat(), point.lon(), distance, geoDistance, indexFieldData, geoMapper, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java index 343e50e3efb..c21cd3d62d7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { private GeoDistance geoDistance; - private Boolean cache; - private String cacheKey; - private String filterName; private String optimizeBbox; @@ -139,19 +136,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceRangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceRangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceRangeFilterParser.NAME); @@ -173,12 +157,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java index 113c59d2c83..9322a230c01 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -82,6 +78,8 @@ public class GeoDistanceRangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { GeoUtils.parseGeoPoint(parser, point); fieldName = currentFieldName; @@ -155,10 +153,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -206,9 +200,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceRangeFilter(point, from, to, includeLower, includeUpper, geoDistance, geoMapper, indexFieldData, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java index e32a1e58e1b..fd0a2f569c4 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; + import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -38,9 +39,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { private final List shell = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public GeoPolygonFilterBuilder(String name) { @@ -75,19 +73,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoPolygonFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoPolygonFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoPolygonFilterParser.NAME); @@ -103,12 +88,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java index e63c6012ede..f3f41ac7126 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -68,8 +66,6 @@ public class GeoPolygonFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; List shell = Lists.newArrayList(); @@ -84,6 +80,8 @@ public class GeoPolygonFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -107,10 +105,6 @@ public class GeoPolygonFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalizeLat = parser.booleanValue(); normalizeLon = parser.booleanValue(); @@ -154,9 +148,6 @@ public class GeoPolygonFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoPolygonFilter(indexFieldData, shell.toArray(new GeoPoint[shell.size()])); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java index 4ff26d7aacd..1ac7b14481c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java @@ -37,9 +37,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { private SpatialStrategy strategy = null; - private Boolean cache; - private String cacheKey; - private String filterName; private final String indexedShapeId; @@ -93,28 +90,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { this.indexedShapeType = indexedShapeType; } - /** - * Sets whether the filter will be cached. - * - * @param cache Whether filter will be cached - * @return this - */ - public GeoShapeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - /** - * Sets the key used for the filter if it is cached - * - * @param cacheKey Key for the Filter if cached - * @return this - */ - public GeoShapeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the name of the filter * @@ -205,12 +180,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { if (name != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java index 5a5e45736cd..bba22882d27 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java @@ -24,15 +24,13 @@ import com.spatial4j.core.shape.Shape; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.internal.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -85,8 +83,6 @@ public class GeoShapeFilterParser implements FilterParser { ShapeRelation shapeRelation = ShapeRelation.INTERSECTS; String strategyName = null; ShapeBuilder shape = null; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String id = null; @@ -100,6 +96,8 @@ public class GeoShapeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -147,10 +145,6 @@ public class GeoShapeFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } @@ -189,15 +183,11 @@ public class GeoShapeFilterParser implements FilterParser { Filter intersects = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, ShapeRelation.INTERSECTS)); bool.add(exists, BooleanClause.Occur.MUST); bool.add(intersects, BooleanClause.Occur.MUST_NOT); - filter = Queries.wrap(bool); + filter = new QueryWrapperFilter(bool); } else { filter = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, shapeRelation)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java index 63ca22db644..e390bf53ea7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java +++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -28,7 +27,6 @@ import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -62,8 +60,6 @@ public class GeohashCellFilter { public static final String NAME = "geohash_cell"; public static final String NEIGHBORS = "neighbors"; public static final String PRECISION = "precision"; - public static final String CACHE = "_cache"; - public static final String CACHE_KEY = "_cache_key"; /** * Create a new geohash filter for a given set of geohashes. In general this method @@ -103,8 +99,6 @@ public class GeohashCellFilter { private String geohash; private int levels = -1; private boolean neighbors; - private Boolean cache; - private String cacheKey; public Builder(String field) { @@ -161,19 +155,6 @@ public class GeohashCellFilter { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public Builder cache(boolean cache) { - this.cache = cache; - return this; - } - - public Builder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -183,12 +164,6 @@ public class GeohashCellFilter { if(levels > 0) { builder.field(PRECISION, levels); } - if (cache != null) { - builder.field(CACHE, cache); - } - if (cacheKey != null) { - builder.field(CACHE_KEY, cacheKey); - } builder.field(field, geohash); builder.endObject(); @@ -214,8 +189,6 @@ public class GeohashCellFilter { String geohash = null; int levels = -1; boolean neighbors = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; XContentParser.Token token; @@ -227,7 +200,9 @@ public class GeohashCellFilter { if (token == Token.FIELD_NAME) { String field = parser.text(); - if (PRECISION.equals(field)) { + if (parseContext.isDeprecatedSetting(field)) { + // skip + } else if (PRECISION.equals(field)) { token = parser.nextToken(); if(token == Token.VALUE_NUMBER) { levels = parser.intValue(); @@ -238,12 +213,6 @@ public class GeohashCellFilter { } else if (NEIGHBORS.equals(field)) { parser.nextToken(); neighbors = parser.booleanValue(); - } else if (CACHE.equals(field)) { - parser.nextToken(); - cache = parseContext.parseFilterCachePolicy(); - } else if (CACHE_KEY.equals(field)) { - parser.nextToken(); - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = field; token = parser.nextToken(); @@ -296,10 +265,6 @@ public class GeohashCellFilter { filter = create(parseContext, geoMapper, geohash, null); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - return filter; } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index d22a05f6a11..8bf761b84a0 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -34,7 +35,6 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -80,6 +80,8 @@ public class HasChildFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -101,10 +103,6 @@ public class HasChildFilterParser implements FilterParser { childType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else if ("short_circuit_cutoff".equals(currentFieldName)) { shortCircuitParentDocSet = parser.intValue(); } else if ("min_children".equals(currentFieldName) || "minChildren".equals(currentFieldName)) { @@ -150,7 +148,7 @@ public class HasChildFilterParser implements FilterParser { String parentType = parentFieldMapper.type(); // wrap the query with type query - query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + query = new FilteredQuery(query, childDocMapper.typeFilter()); DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { @@ -167,7 +165,7 @@ public class HasChildFilterParser implements FilterParser { nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); Query childrenQuery; @@ -178,9 +176,9 @@ public class HasChildFilterParser implements FilterParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(childrenQuery)); } - return new CustomQueryWrappingFilter(childrenQuery); + return new QueryWrapperFilter(childrenQuery); } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index e088b58a51a..2e55395535c 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -166,10 +165,10 @@ public class HasChildQueryParser implements QueryParser { } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); Query query; - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); if (minChildren > 1 || maxChildren > 0 || scoreType != ScoreType.NONE) { query = new ChildrenQuery(parentChildIndexFieldData, parentType, childType, parentFilter, innerQuery, scoreType, minChildren, @@ -179,7 +178,7 @@ public class HasChildQueryParser implements QueryParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } query.setBoost(boost); return query; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java index 62a96debf8c..8f565022c4f 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java @@ -62,20 +62,6 @@ public class HasParentFilterBuilder extends BaseFilterBuilder { return this; } - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cache(boolean cache) { - return this; - } - - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cacheKey(String cacheKey) { - return this; - } - /** * Sets inner hit definition in the scope of this filter and reusing the defined type and query. */ diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java index 388f24d4ab0..331f575df77 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java @@ -20,13 +20,13 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; @@ -69,6 +69,8 @@ public class HasParentFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -90,10 +92,6 @@ public class HasParentFilterParser implements FilterParser { parentType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else { throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } @@ -122,9 +120,9 @@ public class HasParentFilterParser implements FilterParser { return null; } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(parentQuery)); } - return new CustomQueryWrappingFilter(parentQuery); + return new QueryWrapperFilter(parentQuery); } } diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index d7d57b6ddd6..0d718efdb69 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -33,7 +34,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.child.ParentQuery; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -136,7 +136,7 @@ public class HasParentQueryParser implements QueryParser { query.setBoost(boost); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } return query; } @@ -186,7 +186,7 @@ public class HasParentQueryParser implements QueryParser { parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD); } } - parentFilter = Queries.wrap(parentsFilter); + parentFilter = new QueryWrapperFilter(parentsFilter); } if (parentFilter == null) { @@ -194,8 +194,8 @@ public class HasParentQueryParser implements QueryParser { } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); - Filter childrenFilter = parseContext.cacheFilter(Queries.wrap(Queries.not(parentFilter)), null, parseContext.autoFilterCachePolicy()); + innerQuery = new FilteredQuery(innerQuery, parentDocMapper.typeFilter()); + Filter childrenFilter = new QueryWrapperFilter(Queries.not(parentFilter)); if (score) { return new ParentQuery(parentChildIndexFieldData, innerQuery, parentDocMapper.type(), childrenFilter); } else { diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java index 138557cd79a..23d4c9c1483 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java @@ -24,6 +24,7 @@ import com.google.common.collect.Iterables; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; @@ -109,7 +110,7 @@ public class IdsFilterParser implements FilterParser { types = parseContext.mapperService().types(); } - Filter filter = Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); + Filter filter = new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java index e2bcd353e11..4d04e8e675b 100644 --- a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java +++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -94,8 +93,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { final BitsetFilterCache bitsetFilterCache; - final QueryCachingPolicy autoFilterCachePolicy; - private final Map queryParsers; private final Map filterParsers; @@ -111,7 +108,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { ScriptService scriptService, AnalysisService analysisService, MapperService mapperService, IndexCache indexCache, IndexFieldDataService fieldDataService, BitsetFilterCache bitsetFilterCache, - QueryCachingPolicy autoFilterCachePolicy, @Nullable SimilarityService similarityService, @Nullable Map namedQueryParsers, @Nullable Map namedFilterParsers) { @@ -123,7 +119,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { this.indexCache = indexCache; this.fieldDataService = fieldDataService; this.bitsetFilterCache = bitsetFilterCache; - this.autoFilterCachePolicy = autoFilterCachePolicy; this.defaultField = indexSettings.get(DEFAULT_FIELD, AllFieldMapper.NAME); this.queryStringLenient = indexSettings.getAsBoolean(QUERY_STRING_LENIENT, false); @@ -185,10 +180,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { return this.defaultField; } - public QueryCachingPolicy autoFilterCachePolicy() { - return autoFilterCachePolicy; - } - public boolean queryStringLenient() { return this.queryStringLenient; } diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java index 3f394ff735e..07af9717dc1 100644 --- a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -143,13 +144,8 @@ public class MissingFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - existenceFilter = Queries.wrap(boolFilter); - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - existenceFilter = Queries.wrap(Queries.not(existenceFilter)); - // cache the not filter as well, so it will be faster - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$missing$" + fieldPattern), parseContext.autoFilterCachePolicy()); + existenceFilter = new QueryWrapperFilter(boolFilter); + existenceFilter = new QueryWrapperFilter(Queries.not(existenceFilter));; } if (nullValue) { @@ -157,10 +153,6 @@ public class MissingFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field); if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) { nullFilter = smartNameFieldMappers.mapper().nullValueFilter(); - if (nullFilter != null) { - // cache the not filter as well, so it will be faster - nullFilter = parseContext.cacheFilter(nullFilter, new HashedBytesRef("$null$" + fieldPattern), parseContext.autoFilterCachePolicy()); - } } } } @@ -172,7 +164,7 @@ public class MissingFilterParser implements FilterParser { combined.add(existenceFilter, BooleanClause.Occur.SHOULD); combined.add(nullFilter, BooleanClause.Occur.SHOULD); // cache the not filter as well, so it will be faster - filter = parseContext.cacheFilter(Queries.wrap(combined), null, parseContext.autoFilterCachePolicy()); + filter = new QueryWrapperFilter(combined); } else { filter = nullFilter; } diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java index 0b15bbfcf14..c64c7ef327e 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java @@ -32,8 +32,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { private final String path; private Boolean join; - private Boolean cache; - private String cacheKey; private String filterName; private QueryInnerHitBuilder innerHit = null; @@ -55,19 +53,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NestedFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public NestedFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -101,12 +86,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (innerHit != null) { builder.startObject("inner_hits"); builder.value(innerHit); diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java index fc2237d6630..467705afca7 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java @@ -20,12 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; @@ -53,8 +52,6 @@ public class NestedFilterParser implements FilterParser { final NestedQueryParser.ToBlockJoinQueryBuilder builder = new NestedQueryParser.ToBlockJoinQueryBuilder(parseContext); float boost = 1.0f; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -62,6 +59,8 @@ public class NestedFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { builder.query(); @@ -79,10 +78,6 @@ public class NestedFilterParser implements FilterParser { boost = parser.floatValue(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } @@ -92,10 +87,7 @@ public class NestedFilterParser implements FilterParser { ToParentBlockJoinQuery joinQuery = builder.build(); if (joinQuery != null) { joinQuery.getChildQuery().setBoost(boost); - Filter nestedFilter = Queries.wrap(joinQuery, parseContext); - if (cache) { - nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter nestedFilter = new QueryWrapperFilter(joinQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, nestedFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java index f835b025f9c..1c5ea06f077 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java @@ -32,22 +32,12 @@ public class NotFilterBuilder extends BaseFilterBuilder { private FilterBuilder filter; - private Boolean cache; - private String filterName; public NotFilterBuilder(FilterBuilder filter) { this.filter = filter; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NotFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - public NotFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -58,9 +48,6 @@ public class NotFilterBuilder extends BaseFilterBuilder { builder.startObject(NotFilterParser.NAME); builder.field("filter"); filter.toXContent(builder, params); - if (cache != null) { - builder.field("_cache", cache); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java index 38bff1997bb..7ebf0fe82dc 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java @@ -20,8 +20,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -49,8 +49,6 @@ public class NotFilterParser implements FilterParser { Filter filter = null; boolean filterFound = false; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -58,6 +56,8 @@ public class NotFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -72,12 +72,8 @@ public class NotFilterParser implements FilterParser { // its the filter, and the name is the field filter = parseContext.parseInnerFilter(currentFieldName); } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[not] filter does not support [" + currentFieldName + "]"); } @@ -92,10 +88,7 @@ public class NotFilterParser implements FilterParser { return null; } - Filter notFilter = Queries.wrap(Queries.not(filter)); - if (cache) { - notFilter = parseContext.cacheFilter(notFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter notFilter = new QueryWrapperFilter(Queries.not(filter)); if (filterName != null) { parseContext.addNamedFilter(filterName, notFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java index 04d516b00b7..2feca66fc56 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java @@ -34,9 +34,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public OrFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public OrFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public OrFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - public OrFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -79,12 +63,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java index 22932ac8290..dae526563c6 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class OrFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -93,12 +88,8 @@ public class OrFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[or] filter does not support [" + currentFieldName + "]"); } @@ -119,10 +110,7 @@ public class OrFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.SHOULD); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java index bb41e4f104a..d202fcc281f 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java @@ -35,9 +35,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { private final String prefix; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -60,19 +57,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public PrefixFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public PrefixFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(PrefixFilterParser.NAME); @@ -80,12 +64,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java index c6bf3fe0a95..d1f291e4606 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class PrefixFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -66,10 +62,6 @@ public class PrefixFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -88,12 +80,9 @@ public class PrefixFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().prefixFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); + filter = new QueryWrapperFilter(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java index cf5db0f0ac1..c4af2419b5c 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java @@ -32,8 +32,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { private final QueryBuilder queryBuilder; - private Boolean cache; - private String filterName; /** @@ -53,17 +51,9 @@ public class QueryFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public QueryFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { - if (filterName == null && cache == null) { + if (filterName == null) { builder.field(QueryFilterParser.NAME); queryBuilder.toXContent(builder, params); } else { @@ -73,9 +63,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java index aaa5a9d1e99..bdc09dbee78 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java @@ -21,8 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import java.io.IOException; @@ -48,6 +48,6 @@ public class QueryFilterParser implements FilterParser { if (query == null) { return null; } - return Queries.wrap(query, parseContext); + return new QueryWrapperFilter(query); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index 39c0543759b..aaf247e90cc 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -22,25 +22,17 @@ package org.elasticsearch.index.query; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queryparser.classic.MapperQueryParser; import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.Bits; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,7 +47,6 @@ import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.support.NestedScope; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -75,6 +66,9 @@ import java.util.Map; */ public class QueryParseContext { + private static final ParseField CACHE = new ParseField("_cache").withAllDeprecated("Elasticsearch makes its own caching decisions"); + private static final ParseField CACHE_KEY = new ParseField("_cache_key").withAllDeprecated("Filters are always used as cache keys"); + private static ThreadLocal typesContext = new ThreadLocal<>(); public static void setTypes(String[] types) { @@ -97,10 +91,6 @@ public class QueryParseContext { private final Index index; - private boolean propagateNoCache = false; - - private boolean requireCustomQueryWrappingFilter = false; - private final IndexQueryParserService indexQueryParser; private final Map namedFilters = Maps.newHashMap(); @@ -111,8 +101,6 @@ public class QueryParseContext { private EnumSet parseFlags = ParseField.EMPTY_FLAGS; - private final boolean disableFilterCaching; - private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; @@ -120,14 +108,8 @@ public class QueryParseContext { private NestedScope nestedScope; public QueryParseContext(Index index, IndexQueryParserService indexQueryParser) { - this(index, indexQueryParser, false); - } - - public QueryParseContext(Index index, IndexQueryParserService indexQueryParser, boolean disableFilterCaching) { this.index = index; this.indexQueryParser = indexQueryParser; - this.propagateNoCache = disableFilterCaching; - this.disableFilterCaching = disableFilterCaching; } public void parseFlags(EnumSet parseFlags) { @@ -144,8 +126,6 @@ public class QueryParseContext { this.lookup = null; this.parser = jp; this.namedFilters.clear(); - this.requireCustomQueryWrappingFilter = false; - this.propagateNoCache = false; this.nestedScope = new NestedScope(); } @@ -190,24 +170,6 @@ public class QueryParseContext { return indexQueryParser.defaultField(); } - public QueryCachingPolicy autoFilterCachePolicy() { - return indexQueryParser.autoFilterCachePolicy(); - } - - public QueryCachingPolicy parseFilterCachePolicy() throws IOException { - final String text = parser.textOrNull(); - if (text == null || text.equals("auto")) { - return autoFilterCachePolicy(); - } else if (parser.booleanValue()) { - // cache without conditions on how many times the filter has been - // used or what the produced DocIdSet looks like, but ONLY on large - // segments to not pollute the cache - return QueryCachingPolicy.CacheOnLargeSegments.DEFAULT; - } else { - return null; - } - } - public boolean queryStringLenient() { return indexQueryParser.queryStringLenient(); } @@ -221,38 +183,6 @@ public class QueryParseContext { return indexQueryParser.bitsetFilterCache.getBitDocIdSetFilter(filter); } - public Filter cacheFilter(Filter filter, final @Nullable HashedBytesRef cacheKey, final QueryCachingPolicy cachePolicy) { - if (filter == null) { - return null; - } - if (this.disableFilterCaching || this.propagateNoCache || filter instanceof NoCacheFilter) { - return filter; - } - if (filter instanceof ResolvableFilter) { - final ResolvableFilter resolvableFilter = (ResolvableFilter) filter; - // We need to wrap it another filter, because this method is invoked at query parse time, which - // may not be during search execution time. (for example index alias filter and percolator) - return new Filter() { - @Override - public DocIdSet getDocIdSet(LeafReaderContext atomicReaderContext, Bits bits) throws IOException { - Filter filter = resolvableFilter.resolve(); - if (filter == null) { - return null; - } - filter = indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - return filter.getDocIdSet(atomicReaderContext, bits); - } - - @Override - public String toString(String field) { - return "AnonymousResolvableFilter"; // TODO: not sure what is going on here - } - }; - } else { - return indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - } - } - public > IFD getForField(FieldMapper mapper) { return indexQueryParser.fieldDataService.getForField(mapper); } @@ -262,7 +192,7 @@ public class QueryParseContext { } public void addNamedQuery(String name, Query query) { - namedFilters.put(name, Queries.wrap(query, this)); + namedFilters.put(name, new QueryWrapperFilter(query)); } public ImmutableMap copyNamedFilters() { @@ -315,16 +245,6 @@ public class QueryParseContext { // if we are at END_OBJECT, move to the next one... parser.nextToken(); } - if (result instanceof NoCacheQuery) { - propagateNoCache = true; - } - if (CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(result)) { - requireCustomQueryWrappingFilter = true; - // If later on, either directly or indirectly this query gets wrapped in a query filter it must never - // get cached even if a filter higher up the chain is configured to do this. This will happen, because - // the result filter will be instance of NoCacheFilter (CustomQueryWrappingFilter) which will in - // #executeFilterParser() set propagateNoCache to true. - } return result; } @@ -357,7 +277,7 @@ public class QueryParseContext { if (filterParser == null) { throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - Filter result = executeFilterParser(filterParser); + Filter result = filterParser.parse(this); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { // if we are at END_OBJECT, move to the next one... parser.nextToken(); @@ -370,18 +290,7 @@ public class QueryParseContext { if (filterParser == null) { throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - return executeFilterParser(filterParser); - } - - private Filter executeFilterParser(FilterParser filterParser) throws IOException { - final boolean propagateNoCache = this.propagateNoCache; // first safe the state that we need to restore - this.propagateNoCache = false; // parse the subfilter with caching, that's fine - Filter result = filterParser.parse(this); - // now make sure we set propagateNoCache to true if it is true already or if the result is - // an instance of NoCacheFilter or if we used to be true! all filters above will - // be not cached ie. wrappers of this filter! - this.propagateNoCache |= (result instanceof NoCacheFilter) || propagateNoCache; - return result; + return filterParser.parse(this); } public FieldMapper fieldMapper(String name) { @@ -475,11 +384,14 @@ public class QueryParseContext { return System.currentTimeMillis(); } - public boolean requireCustomQueryWrappingFilter() { - return requireCustomQueryWrappingFilter; - } - public NestedScope nestedScope() { return nestedScope; } + + /** + * Return whether the setting is deprecated. + */ + public boolean isDeprecatedSetting(String setting) { + return CACHE.match(setting) || CACHE_KEY.match(setting); + } } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java index 80149821438..42753179b07 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java @@ -41,9 +41,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { private boolean includeUpper = true; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -341,19 +338,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public RangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the execution mode that controls how the range filter is executed. Valid values are: "index" and "fielddata". *
      @@ -397,12 +381,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (execution != null) { builder.field("execution", execution); } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java index 8b5f557d0ba..83650aecd5d 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java @@ -20,14 +20,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -57,8 +55,6 @@ public class RangeFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object from = null; Object to = null; @@ -74,6 +70,8 @@ public class RangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -112,10 +110,6 @@ public class RangeFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("execution".equals(currentFieldName)) { execution = parser.text(); } else { @@ -174,12 +168,9 @@ public class RangeFilterParser implements FilterParser { } if (filter == null) { - filter = Queries.wrap(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); + filter = new QueryWrapperFilter(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java index f199f83b5a5..f730c084be3 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java @@ -38,8 +38,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { private int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; private boolean maxDetermizedStatesSet; - private Boolean cache; - private String cacheKey; private String filterName; /** @@ -87,19 +85,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public RegexpFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RegexpFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(RegexpFilterParser.NAME); @@ -118,12 +103,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java index 5f1d9174fc7..9612812ffe9 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java @@ -21,13 +21,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -53,8 +51,6 @@ public class RegexpFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; String secondaryFieldName = null; Object value = null; @@ -68,6 +64,8 @@ public class RegexpFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -91,10 +89,6 @@ public class RegexpFilterParser implements FilterParser { } else { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { secondaryFieldName = currentFieldName; secondaryValue = parser.objectBytes(); @@ -118,11 +112,7 @@ public class RegexpFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().regexpFilter(value, flagsValue, maxDeterminizedStates, parseContext); } if (filter == null) { - filter = Queries.wrap(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java index 75ffa386198..89f0fe7f033 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java @@ -37,9 +37,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { private String lang; - private Boolean cache; - private String cacheKey; - private String filterName; public ScriptFilterBuilder(String script) { @@ -79,19 +76,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public ScriptFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public ScriptFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(ScriptFilterParser.NAME); @@ -105,12 +89,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index 54dbe6cc1db..7e4b59a5748 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -24,11 +24,9 @@ import org.apache.lucene.search.BitsFilteredDocIdSet; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.Bits; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; @@ -41,6 +39,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Map; +import java.util.Objects; import static com.google.common.collect.Maps.newHashMap; @@ -67,8 +66,6 @@ public class ScriptFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; // also, when caching, since its isCacheable is false, will result in loading all bit set... String script = null; String scriptLang; @@ -81,6 +78,8 @@ public class ScriptFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("params".equals(currentFieldName)) { params = parser.map(); @@ -90,10 +89,6 @@ public class ScriptFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if (!scriptParameterParser.token(currentFieldName, token, parser)){ throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } @@ -115,9 +110,6 @@ public class ScriptFilterParser implements FilterParser { } Filter filter = new ScriptFilter(scriptLang, script, scriptType, params, parseContext.scriptService(), parseContext.lookup()); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } @@ -150,7 +142,7 @@ public class ScriptFilterParser implements FilterParser { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; ScriptFilter that = (ScriptFilter) o; @@ -162,8 +154,9 @@ public class ScriptFilterParser implements FilterParser { @Override public int hashCode() { - int result = script != null ? script.hashCode() : 0; - result = 31 * result + (params != null ? params.hashCode() : 0); + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(script); + result = 31 * result + Objects.hashCode(params); return result; } diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java index 74349a00603..3ca5069127d 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java @@ -34,9 +34,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { private final Object value; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -108,19 +105,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public TermFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermFilterParser.NAME); @@ -128,12 +112,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java index ca077b91ee3..0224a6384da 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class TermFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -63,6 +59,8 @@ public class TermFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // also support a format of "term" : {"field_name" : { ... }} fieldName = currentFieldName; @@ -76,10 +74,6 @@ public class TermFilterParser implements FilterParser { value = parser.objectBytes(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[term] filter does not support [" + currentFieldName + "]"); } @@ -88,10 +82,6 @@ public class TermFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -113,11 +103,7 @@ public class TermFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().termFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } if (filterName != null) { @@ -125,4 +111,4 @@ public class TermFilterParser implements FilterParser { } return filter; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java index a6331fb51a6..d753235e341 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java @@ -32,9 +32,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { private final Object values; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -134,19 +131,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public TermsFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -159,12 +143,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java index 46c52b80f64..ce8a8122665 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java @@ -23,14 +23,13 @@ import com.google.common.collect.Lists; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -71,7 +70,6 @@ public class TermsFilterParser implements FilterParser { XContentParser parser = parseContext.parser(); MapperService.SmartNameFieldMappers smartNameFieldMappers; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); String filterName = null; String currentFieldName = null; @@ -81,13 +79,14 @@ public class TermsFilterParser implements FilterParser { String lookupPath = null; String lookupRouting = null; - HashedBytesRef cacheKey = null; XContentParser.Token token; List terms = Lists.newArrayList(); String fieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { throw new QueryParsingException(parseContext, "[terms] filter does not support multiple fields"); @@ -137,10 +136,6 @@ public class TermsFilterParser implements FilterParser { // ignore } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + "]"); } @@ -181,11 +176,7 @@ public class TermsFilterParser implements FilterParser { for (int i = 0; i < filterValues.length; i++) { filterValues[i] = BytesRefs.toBytesRef(terms.get(i)); } - filter = Queries.wrap(new TermsQuery(fieldName, filterValues)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermsQuery(fieldName, filterValues)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java index 1c23c8f338c..1a9473ede40 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java @@ -36,8 +36,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { private String lookupPath; private Boolean lookupCache; - private Boolean cache; - private String cacheKey; private String filterName; public TermsLookupFilterBuilder(String name) { @@ -94,16 +92,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { return this; } - public TermsLookupFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsLookupFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -126,12 +114,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index 095a849b792..6c1b0e45aaa 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; @@ -29,7 +30,6 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.index.search.child.TopChildrenQuery; @@ -130,11 +130,11 @@ public class TopChildrenQueryParser implements QueryParser { innerQuery.setBoost(boost); // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); TopChildrenQuery query = new TopChildrenQuery(parentChildIndexFieldData, innerQuery, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedFilter(queryName, new QueryWrapperFilter(query)); } return query; } diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java index a6248a4e228..6c9e9523e76 100644 --- a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java @@ -21,10 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; @@ -68,10 +68,10 @@ public class TypeFilterParser implements FilterParser { //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = parseContext.mapperService().documentMapper(type.utf8ToString()); if (documentMapper == null) { - filter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, type))); + filter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, type))); } else { filter = documentMapper.typeFilter(); } - return parseContext.cacheFilter(filter, null, parseContext.autoFilterCachePolicy()); + return filter; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index 2b9363da61f..7b4faa3369e 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.XFilteredDocIdSetIterator; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; @@ -52,55 +53,41 @@ import java.util.Set; * */ // TODO: Remove me and move the logic to ChildrenQuery when needsScore=false -public class ChildrenConstantScoreQuery extends Query { +public class ChildrenConstantScoreQuery extends IndexCacheableQuery { private final IndexParentChildFieldData parentChildIndexFieldData; - private Query originalChildQuery; + private final Query childQuery; private final String parentType; private final String childType; private final Filter parentFilter; private final int shortCircuitParentDocSet; private final BitDocIdSetFilter nonNestedDocsFilter; - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - public ChildrenConstantScoreQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; this.parentFilter = parentFilter; this.parentType = parentType; this.childType = childType; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + ChildrenConstantScoreQuery rewritten = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childRewritten, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ChildrenConstantScoreQuery q = (ChildrenConstantScoreQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long valueCount; List leaves = searcher.getIndexReader().leaves(); @@ -116,9 +103,9 @@ public class ChildrenConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - Query childQuery = rewrittenChildQuery; IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); ParentOrdCollector collector = new ParentOrdCollector(globalIfd, valueCount, parentType); indexSearcher.search(childQuery, collector); @@ -141,12 +128,12 @@ public class ChildrenConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenConstantScoreQuery that = (ChildrenConstantScoreQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -155,24 +142,21 @@ public class ChildrenConstantScoreQuery extends Query { if (shortCircuitParentDocSet != that.shortCircuitParentDocSet) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); result = 31 * result + shortCircuitParentDocSet; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { - return "child_filter[" + childType + "/" + parentType + "](" + originalChildQuery + ')'; + return "child_filter[" + childType + "/" + parentType + "](" + childQuery + ')'; } private final class ParentWeight extends Weight { diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index e253e1a848d..18f004f7133 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -40,6 +40,7 @@ import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.common.util.BigArrays; @@ -63,28 +64,25 @@ import java.util.Set; * all parent documents having the same uid value that is collected in the first phase are emitted as hit including * a score based on the aggregated child scores and score type. */ -public class ChildrenQuery extends Query { +public final class ChildrenQuery extends IndexCacheableQuery { protected final ParentChildIndexFieldData ifd; protected final String parentType; protected final String childType; protected final Filter parentFilter; protected final ScoreType scoreType; - protected Query originalChildQuery; + protected Query childQuery; protected final int minChildren; protected final int maxChildren; protected final int shortCircuitParentDocSet; protected final BitDocIdSetFilter nonNestedDocsFilter; - protected Query rewrittenChildQuery; - protected IndexReader rewriteIndexReader; - public ChildrenQuery(ParentChildIndexFieldData ifd, String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int minChildren, int maxChildren, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.ifd = ifd; this.parentType = parentType; this.childType = childType; this.parentFilter = parentFilter; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.scoreType = scoreType; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; @@ -93,25 +91,33 @@ public class ChildrenQuery extends Query { this.maxChildren = maxChildren; } + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new ChildrenQuery(ifd, parentType, childType, parentFilter, childRewritten, scoreType, minChildren, maxChildren, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; + } + return super.rewrite(reader); + } + @Override public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenQuery that = (ChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } if (minChildren != that.minChildren) { return false; } @@ -123,9 +129,9 @@ public class ChildrenQuery extends Query { @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); result = 31 * result + minChildren; result = 31 * result + maxChildren; return result; @@ -135,36 +141,12 @@ public class ChildrenQuery extends Query { public String toString(String field) { int max = maxChildren == 0 ? Integer.MAX_VALUE : maxChildren; return "ChildrenQuery[min(" + Integer.toString(minChildren) + ") max(" + Integer.toString(max) + ")of " + childType + "/" - + parentType + "](" + originalChildQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + + parentType + "](" + childQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite - public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewriteIndexReader = reader; - rewrittenChildQuery = originalChildQuery.rewrite(reader); - } - return this; - } - - @Override - public Query clone() { - ChildrenQuery q = (ChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader - + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query childQuery = rewrittenChildQuery; IndexParentChildFieldData globalIfd = ifd.loadGlobal(searcher.getIndexReader()); if (globalIfd == null) { @@ -173,6 +155,7 @@ public class ChildrenQuery extends Query { } IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); boolean abort = true; long numFoundParents; @@ -230,7 +213,7 @@ public class ChildrenQuery extends Query { } else { parentFilter = this.parentFilter; } - return new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, + return new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, maxChildren); } diff --git a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java b/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java deleted file mode 100644 index 0adfff07e43..00000000000 --- a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.search.child; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.*; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContext.Lifetime; - -import java.io.IOException; -import java.util.IdentityHashMap; - -/** - * Forked from {@link QueryWrapperFilter} to make sure the weight is only created once. - * This filter should never be cached! This filter only exists for internal usage. - * - * @elasticsearch.internal - */ -public class CustomQueryWrappingFilter extends NoCacheFilter implements Releasable { - - private final Query query; - - private IndexSearcher searcher; - private IdentityHashMap docIdSets; - - /** Constructs a filter which only matches documents matching - * query. - */ - public CustomQueryWrappingFilter(Query query) { - if (query == null) - throw new NullPointerException("Query may not be null"); - this.query = query; - } - - /** returns the inner Query */ - public final Query getQuery() { - return query; - } - - @Override - public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException { - final SearchContext searchContext = SearchContext.current(); - if (docIdSets == null) { - assert searcher == null; - IndexSearcher searcher = searchContext.searcher(); - docIdSets = new IdentityHashMap<>(); - this.searcher = searcher; - searchContext.addReleasable(this, Lifetime.COLLECTION); - - final Weight weight = searcher.createNormalizedWeight(query, false); - for (final LeafReaderContext leaf : searcher.getTopReaderContext().leaves()) { - final DocIdSet set = new DocIdSet() { - @Override - public DocIdSetIterator iterator() throws IOException { - return weight.scorer(leaf, null); - } - @Override - public boolean isCacheable() { return false; } - - @Override - public long ramBytesUsed() { - return 0; - } - }; - docIdSets.put(leaf.reader(), set); - } - } else { - assert searcher == SearchContext.current().searcher(); - } - final DocIdSet set = docIdSets.get(context.reader()); - return BitsFilteredDocIdSet.wrap(set, acceptDocs); - } - - @Override - public void close() { - // We need to clear the docIdSets, otherwise this is leaved unused - // DocIdSets around and can potentially become a memory leak. - docIdSets = null; - searcher = null; - } - - @Override - public String toString(String field) { - return "CustomQueryWrappingFilter(" + query + ")"; - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o != null && o instanceof CustomQueryWrappingFilter && - this.query.equals(((CustomQueryWrappingFilter)o).query)) { - return true; - } - - return false; - } - - @Override - public int hashCode() { - return query.hashCode() ^ 0x823D64C9; - } - - /** @return Whether {@link CustomQueryWrappingFilter} should be used. */ - public static boolean shouldUseCustomQueryWrappingFilter(Query query) { - if (query instanceof TopChildrenQuery || query instanceof ChildrenConstantScoreQuery - || query instanceof ChildrenQuery || query instanceof ParentConstantScoreQuery - || query instanceof ParentQuery) { - return true; - } else { - return false; - } - } -} diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index ce1be7e25d3..5d2d1101ff7 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; @@ -47,48 +48,34 @@ import java.util.Set; /** * A query that only return child documents that are linked to the parent documents that matched with the inner query. */ -public class ParentConstantScoreQuery extends Query { +public class ParentConstantScoreQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentConstantScoreQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewrittenParentQuery = originalParentQuery.rewrite(reader); - rewriteIndexReader = reader; + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentConstantScoreQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentConstantScoreQuery q = (ParentConstantScoreQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long maxOrd; List leaves = searcher.getIndexReader().leaves(); @@ -104,10 +91,10 @@ public class ParentConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - final Query parentQuery = rewrittenParentQuery; ParentOrdsCollector collector = new ParentOrdsCollector(globalIfd, maxOrd, parentType); IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { @@ -119,9 +106,9 @@ public class ParentConstantScoreQuery extends Query { @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @@ -130,26 +117,23 @@ public class ParentConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentConstantScoreQuery that = (ParentConstantScoreQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (this.getBoost() != that.getBoost()) { - return false; - } return true; } @Override public String toString(String field) { - return "parent_filter[" + parentType + "](" + originalParentQuery + ')'; + return "parent_filter[" + parentType + "](" + parentQuery + ')'; } private final class ChildrenWeight extends Weight { diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index cf16d78af77..cc34da404bb 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; @@ -40,7 +41,6 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LongBitSet; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.mapper.Uid; @@ -66,9 +66,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds= null; @@ -98,9 +98,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds = null; diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index 388f5e46a35..ec3ed4862e8 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -24,19 +24,21 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.ToStringUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; import org.elasticsearch.common.util.BigArrays; @@ -55,19 +57,16 @@ import java.util.Set; * connects the matching parent docs to the related child documents * using the {@link ParentChildIndexFieldData}. */ -public class ParentQuery extends Query { +public class ParentQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @@ -77,26 +76,24 @@ public class ParentQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentQuery that = (ParentQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + Float.floatToIntBits(getBoost()); return result; @@ -104,31 +101,22 @@ public class ParentQuery extends Query { @Override public String toString(String field) { - return "ParentQuery[" + parentType + "](" + originalParentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + return "ParentQuery[" + parentType + "](" + parentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewriteIndexReader = reader; - rewrittenParentQuery = originalParentQuery.rewrite(reader); + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentQuery q = (ParentQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); ChildWeight childWeight; boolean releaseCollectorResource = true; @@ -140,12 +128,10 @@ public class ParentQuery extends Query { } try { - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query parentQuery = rewrittenParentQuery; collector = new ParentOrdAndScoreCollector(sc, globalIfd, parentType); IndexSearcher indexSearcher = new IndexSearcher(sc.searcher().getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { return new BooleanQuery().createWeight(searcher, needsScores); @@ -162,7 +148,7 @@ public class ParentQuery extends Query { return childWeight; } - private static class ParentOrdAndScoreCollector extends NoopCollector implements Releasable { + private static class ParentOrdAndScoreCollector implements Collector, Releasable { private final LongHash parentIdxs; private FloatArray scores; @@ -170,9 +156,6 @@ public class ParentQuery extends Query { private final BigArrays bigArrays; private final String parentType; - private Scorer scorer; - private SortedDocValues values; - ParentOrdAndScoreCollector(SearchContext searchContext, IndexParentChildFieldData globalIfd, String parentType) { this.bigArrays = searchContext.bigArrays(); this.parentIdxs = new LongHash(512, bigArrays); @@ -182,30 +165,36 @@ public class ParentQuery extends Query { } @Override - public void collect(int doc) throws IOException { - // It can happen that for particular segment no document exist for an specific type. This prevents NPE - if (values != null) { - long globalOrdinal = values.getOrd(doc); - if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { - long parentIdx = parentIdxs.add(globalOrdinal); - if (parentIdx >= 0) { - scores = bigArrays.grow(scores, parentIdx + 1); - scores.set(parentIdx, scorer.score()); - } else { - assert false : "parent id should only match once, since there can only be one parent doc"; + public boolean needsScores() { + return true; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final SortedDocValues values = globalIfd.load(context).getOrdinalsValues(parentType); + if (values == null) { + return NoopCollector.NOOP_COLLECTOR; + } + return new LeafCollector() { + Scorer scorer; + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + @Override + public void collect(int doc) throws IOException { + long globalOrdinal = values.getOrd(doc); + if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { + long parentIdx = parentIdxs.add(globalOrdinal); + if (parentIdx >= 0) { + scores = bigArrays.grow(scores, parentIdx + 1); + scores.set(parentIdx, scorer.score()); + } else { + assert false : "parent id should only match once, since there can only be one parent doc"; + } } } - } - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - values = globalIfd.load(context).getOrdinalsValues(parentType); + }; } @Override @@ -262,12 +251,16 @@ public class ParentQuery extends Query { if (DocIdSets.isEmpty(childrenDocSet)) { return null; } + final DocIdSetIterator childIterator = childrenDocSet.iterator(); + if (childIterator == null) { + return null; + } SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType); if (bytesValues == null) { return null; } - return new ChildScorer(this, parentIdxs, scores, childrenDocSet.iterator(), bytesValues); + return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 0529350863f..4fc233b21b9 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.search.EmptyScorer; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; @@ -53,7 +54,7 @@ import java.util.Set; * This query is most of the times faster than the {@link ChildrenQuery}. Usually enough parent documents can be returned * in the first child document query round. */ -public class TopChildrenQuery extends Query { +public class TopChildrenQuery extends IndexCacheableQuery { private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator(); @@ -63,17 +64,13 @@ public class TopChildrenQuery extends Query { private final ScoreType scoreType; private final int factor; private final int incrementalFactor; - private Query originalChildQuery; + private Query childQuery; private final BitDocIdSetFilter nonNestedDocsFilter; - // This field will hold the rewritten form of originalChildQuery, so that we can reuse it - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - // Note, the query is expected to already be filtered to only child type docs public TopChildrenQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.childType = childType; this.parentType = parentType; this.scoreType = scoreType; @@ -82,35 +79,19 @@ public class TopChildrenQuery extends Query { this.nonNestedDocsFilter = nonNestedDocsFilter; } - // Rewrite invocation logic: - // 1) query_then|and_fetch (default): Rewrite is execute as part of the createWeight invocation, when search child docs. - // 2) dfs_query_then|and_fetch:: First rewrite and then createWeight is executed. During query phase rewrite isn't - // executed any more because searchContext#queryRewritten() returns true. @Override public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new TopChildrenQuery(parentChildIndexFieldData, childRewritten, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - // We can always return the current instance, and we can do this b/c the child query is executed separately - // before the main query (other scope) in a different IS#search() invocation than the main query. - // In fact we only need override the rewrite method because for the dfs phase, to get also global document - // frequency for the child query. - return this; + return super.rewrite(reader); } @Override - public Query clone() { - TopChildrenQuery q = (TopChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { ObjectObjectOpenHashMap parentDocs = new ObjectObjectOpenHashMap<>(); SearchContext searchContext = SearchContext.current(); @@ -121,16 +102,9 @@ public class TopChildrenQuery extends Query { } int numChildDocs = requestedDocs * factor; - Query childQuery; - if (rewrittenChildQuery == null) { - childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery); - } else { - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - childQuery = rewrittenChildQuery; - } - IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); while (true) { parentDocs.clear(); TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs); @@ -155,7 +129,7 @@ public class TopChildrenQuery extends Query { } } - ParentWeight parentWeight = new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentDocs); + ParentWeight parentWeight = new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentDocs); searchContext.addReleasable(parentWeight, Lifetime.COLLECTION); return parentWeight; } @@ -251,12 +225,12 @@ public class TopChildrenQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } TopChildrenQuery that = (TopChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -265,25 +239,22 @@ public class TopChildrenQuery extends Query { if (incrementalFactor != that.incrementalFactor) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + incrementalFactor; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { StringBuilder sb = new StringBuilder(); - sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery.toString(field)).append(')'); + sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(childQuery.toString(field)).append(')'); sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } diff --git a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java index f8020f4b95b..880b1f54254 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java @@ -22,8 +22,8 @@ package org.elasticsearch.index.search.geo; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; /** @@ -48,13 +48,13 @@ public class IndexedGeoBoundingBoxFilter { filter.add(fieldMapper.lonMapper().rangeFilter(null, bottomRight.lon(), true, true), Occur.SHOULD); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), null, true, true), Occur.SHOULD); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } private static Filter eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) { BooleanQuery filter = new BooleanQuery(); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d8e37e40c2e..2f3c0371e15 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -555,7 +555,7 @@ public class IndexShard extends AbstractIndexShardComponent { Query query = queryParserService.parseQuery(source).query(); Filter searchFilter = mapperService.searchFilter(types); if (searchFilter != null) { - query = new FilteredQuery(query, indexCache.filter().cache(searchFilter, null, queryParserService.autoFilterCachePolicy())); + query = new FilteredQuery(query, searchFilter); } Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases); @@ -753,7 +753,7 @@ public class IndexShard extends AbstractIndexShardComponent { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times - IOUtils.close(engine); + IOUtils.close(engine, shardFilterCache); } } } @@ -1291,6 +1291,6 @@ public class IndexShard extends AbstractIndexShardComponent { }; return new EngineConfig(shardId, threadPool, indexingService, indexSettingsService, warmer, store, deletionPolicy, translog, mergePolicyProvider, mergeScheduler, - mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer); + mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy()); } } diff --git a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index ee0ff0c81c6..ddc04403e7f 100644 --- a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices; import com.google.common.collect.Lists; import com.google.common.collect.Maps; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; diff --git a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java index 7447eea4d89..0ee4f07c7e2 100644 --- a/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java +++ b/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java @@ -19,212 +19,290 @@ package org.elasticsearch.indices.cache.filter; -import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.google.common.base.Objects; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.LRUQueryCache; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; -import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.index.cache.filter.FilterCacheStats; +import org.elasticsearch.index.shard.ShardId; -import java.util.Iterator; +import java.io.Closeable; +import java.io.IOException; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.ConcurrentHashMap; -public class IndicesFilterCache extends AbstractComponent implements RemovalListener { +public class IndicesFilterCache extends AbstractComponent implements QueryCache, Closeable { - private final ThreadPool threadPool; + public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size"; + public static final String INDICES_CACHE_QUERY_COUNT = "indices.cache.filter.count"; - private Cache cache; + private final LRUQueryCache cache; + private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); + private final Map shardStats = new ConcurrentHashMap<>(); + private volatile long sharedRamBytesUsed; - private volatile String size; - private volatile long sizeInBytes; - private volatile TimeValue expire; - private volatile int concurrencyLevel; - - private final TimeValue cleanInterval; - private final int minimumEntryWeight; - - private final Set readersKeysToClean = ConcurrentCollections.newConcurrentSet(); - - private volatile boolean closed; - - public static final String INDICES_CACHE_FILTER_SIZE = "indices.cache.filter.size"; - public static final String INDICES_CACHE_FILTER_EXPIRE = "indices.cache.filter.expire"; - public static final String INDICES_CACHE_FILTER_CONCURRENCY_LEVEL = "indices.cache.filter.concurrency_level"; - public static final String INDICES_CACHE_FILTER_CLEAN_INTERVAL = "indices.cache.filter.clean_interval"; - public static final String INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT = "indices.cache.filter.minimum_entry_weight"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean replace = false; - String size = settings.get(INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size); - if (!size.equals(IndicesFilterCache.this.size)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size, size); - IndicesFilterCache.this.size = size; - replace = true; - } - TimeValue expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire); - if (!Objects.equal(expire, IndicesFilterCache.this.expire)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire, expire); - IndicesFilterCache.this.expire = expire; - replace = true; - } - final int concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); - } - if (!Objects.equal(concurrencyLevel, IndicesFilterCache.this.concurrencyLevel)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel, concurrencyLevel); - IndicesFilterCache.this.concurrencyLevel = concurrencyLevel; - replace = true; - } - if (replace) { - Cache oldCache = IndicesFilterCache.this.cache; - computeSizeInBytes(); - buildCache(); - oldCache.invalidateAll(); - } - } - } + // This is a hack for the fact that the close listener for the + // ShardCoreKeyMap will be called before onDocIdSetEviction + // See onDocIdSetEviction for more info + private final Map stats2 = new IdentityHashMap<>(); @Inject - public IndicesFilterCache(Settings settings, ThreadPool threadPool, NodeSettingsService nodeSettingsService) { + public IndicesFilterCache(Settings settings) { super(settings); - this.threadPool = threadPool; - this.size = settings.get(INDICES_CACHE_FILTER_SIZE, "10%"); - this.expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, null); - this.minimumEntryWeight = settings.getAsInt(INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT, 1024); // 1k per entry minimum - if (minimumEntryWeight <= 0) { - throw new IllegalArgumentException("minimum_entry_weight must be > 0 but was: " + minimumEntryWeight); + final String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE, "10%"); + final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString); + final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 100000); + logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], max filter count [{}]", + sizeString, size, count); + cache = new LRUQueryCache(count, size.bytes()) { + + private Stats getStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + if (shardId == null) { + return null; + } + return shardStats.get(shardId); + } + + private Stats getOrCreateStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + Stats stats = shardStats.get(shardId); + if (stats == null) { + stats = new Stats(); + shardStats.put(shardId, stats); + } + return stats; + } + + // It's ok to not protect these callbacks by a lock since it is + // done in LRUQueryCache + @Override + protected void onClear() { + assert Thread.holdsLock(this); + super.onClear(); + for (Stats stats : shardStats.values()) { + // don't throw away hit/miss + stats.cacheSize = 0; + stats.ramBytesUsed = 0; + } + sharedRamBytesUsed = 0; + } + + @Override + protected void onQueryCache(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryCache(filter, ramBytesUsed); + sharedRamBytesUsed += ramBytesUsed; + } + + @Override + protected void onQueryEviction(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryEviction(filter, ramBytesUsed); + sharedRamBytesUsed -= ramBytesUsed; + } + + @Override + protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetCache(readerCoreKey, ramBytesUsed); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.cacheSize += 1; + shardStats.cacheCount += 1; + shardStats.ramBytesUsed += ramBytesUsed; + + StatsAndCount statsAndCount = stats2.get(readerCoreKey); + if (statsAndCount == null) { + statsAndCount = new StatsAndCount(shardStats); + stats2.put(readerCoreKey, statsAndCount); + } + statsAndCount.count += 1; + } + + @Override + protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); + // We can't use ShardCoreKeyMap here because its core closed + // listener is called before the listener of the cache which + // triggers this eviction. So instead we use use stats2 that + // we only evict when nothing is cached anymore on the segment + // instead of relying on close listeners + final StatsAndCount statsAndCount = stats2.get(readerCoreKey); + final Stats shardStats = statsAndCount.stats; + shardStats.cacheSize -= numEntries; + shardStats.ramBytesUsed -= sumRamBytesUsed; + statsAndCount.count -= numEntries; + if (statsAndCount.count == 0) { + stats2.remove(readerCoreKey); + } + } + + @Override + protected void onHit(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onHit(readerCoreKey, filter); + final Stats shardStats = getStats(readerCoreKey); + shardStats.hitCount += 1; + } + + @Override + protected void onMiss(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onMiss(readerCoreKey, filter); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.missCount += 1; + } + }; + sharedRamBytesUsed = 0; + } + + /** Get usage statistics for the given shard. */ + public FilterCacheStats getStats(ShardId shard) { + final Map stats = new HashMap<>(); + for (Map.Entry entry : shardStats.entrySet()) { + stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); } - this.cleanInterval = settings.getAsTime(INDICES_CACHE_FILTER_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); - // defaults to 4, but this is a busy map for all indices, increase it a bit - this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, 16); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + FilterCacheStats shardStats = new FilterCacheStats(); + FilterCacheStats info = stats.get(shard); + if (info == null) { + info = new FilterCacheStats(); } - computeSizeInBytes(); - buildCache(); - logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], expire [{}], clean_interval [{}]", - size, new ByteSizeValue(sizeInBytes), expire, cleanInterval); + shardStats.add(info); - nodeSettingsService.addListener(new ApplySettings()); - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, new ReaderCleaner()); - } - - private void buildCache() { - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() - .removalListener(this) - .maximumWeight(sizeInBytes).weigher(new WeightedFilterCache.FilterCacheValueWeigher(minimumEntryWeight)); - - cacheBuilder.concurrencyLevel(this.concurrencyLevel); - - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + // We also have some shared ram usage that we try to distribute to + // proportionally to their number of cache entries of each shard + long totalSize = 0; + for (FilterCacheStats s : stats.values()) { + totalSize += s.getCacheSize(); } - - cache = cacheBuilder.build(); - } - - private void computeSizeInBytes() { - this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes(); - } - - public void addReaderKeyToClean(Object readerKey) { - readersKeysToClean.add(readerKey); - } - - public void close() { - closed = true; - cache.invalidateAll(); - } - - public Cache cache() { - return this.cache; + final double weight = totalSize == 0 + ? 1d / stats.size() + : shardStats.getCacheSize() / totalSize; + final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); + shardStats.add(new FilterCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); + return shardStats; } @Override - public void onRemoval(RemovalNotification removalNotification) { - WeightedFilterCache.FilterCacheKey key = removalNotification.getKey(); - if (key == null) { - return; - } - if (key.removalListener != null) { - key.removalListener.onRemoval(removalNotification); + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + while (weight instanceof CachingWeightWrapper) { + weight = ((CachingWeightWrapper) weight).in; } + final Weight in = cache.doCache(weight, policy); + // We wrap the weight to track the readers it sees and map them with + // the shards they belong to + return new CachingWeightWrapper(in); } - /** - * The reason we need this class is because we need to clean all the filters that are associated - * with a reader. We don't want to do it every time a reader closes, since iterating over all the map - * is expensive. There doesn't seem to be a nicer way to do it (and maintaining a list per reader - * of the filters will cost more). - */ - class ReaderCleaner implements Runnable { + private class CachingWeightWrapper extends Weight { - // this is thread safe since we only schedule the next cleanup once the current one is - // done, so no concurrent execution - private final ObjectOpenHashSet keys = ObjectOpenHashSet.newInstance(); + private final Weight in; + + protected CachingWeightWrapper(Weight in) { + super(in.getQuery()); + this.in = in; + } @Override - public void run() { - if (closed) { - return; - } - if (readersKeysToClean.isEmpty()) { - schedule(); - return; - } - try { - threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { - @Override - public void run() { - keys.clear(); - for (Iterator it = readersKeysToClean.iterator(); it.hasNext(); ) { - keys.add(it.next()); - it.remove(); - } - if (!keys.isEmpty()) { - for (Iterator it = cache.asMap().keySet().iterator(); it.hasNext(); ) { - WeightedFilterCache.FilterCacheKey filterCacheKey = it.next(); - if (keys.contains(filterCacheKey.readerKey())) { - // same as invalidate - it.remove(); - } - } - } - cache.cleanUp(); - schedule(); - keys.clear(); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run ReaderCleaner - execution rejected", ex); - } + public void extractTerms(Set terms) { + in.extractTerms(terms); } - private void schedule() { - try { - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); - } + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + shardKeyMap.add(context.reader()); + return in.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return in.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + in.normalize(norm, topLevelBoost); + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + shardKeyMap.add(context.reader()); + return in.scorer(context, acceptDocs); } } + + /** Clear all entries that belong to the given index. */ + public void clearIndex(String index) { + final Set coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); + for (Object coreKey : coreCacheKeys) { + cache.clearCoreCacheKey(coreKey); + } + + // This cache stores two things: filters, and doc id sets. Calling + // clear only removes the doc id sets, but if we reach the situation + // that the cache does not contain any DocIdSet anymore, then it + // probably means that the user wanted to remove everything. + if (cache.getCacheSize() == 0) { + cache.clear(); + } + } + + @Override + public void close() { + assert shardKeyMap.size() == 0 : shardKeyMap.size(); + assert shardStats.isEmpty(); + assert stats2.isEmpty() : stats2; + cache.clear(); + } + + private static class Stats implements Cloneable { + + volatile long ramBytesUsed; + volatile long hitCount; + volatile long missCount; + volatile long cacheCount; + volatile long cacheSize; + + FilterCacheStats toQueryCacheStats() { + return new FilterCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); + } + } + + private static class StatsAndCount { + int count; + final Stats stats; + + StatsAndCount(Stats stats) { + this.stats = stats; + this.count = 0; + } + } + + private boolean empty(Stats stats) { + if (stats == null) { + return true; + } + return stats.cacheSize == 0 && stats.ramBytesUsed == 0; + } + + public void onClose(ShardId shardId) { + assert empty(shardStats.get(shardId)); + shardStats.remove(shardId); + } } diff --git a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java index 2f3063a484f..a074d975c04 100644 --- a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java @@ -71,7 +71,9 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex { try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - DocSearcher docSearcher = new DocSearcher(new IndexSearcher(slowReader), rootDocMemoryIndex); + final IndexSearcher slowSearcher = new IndexSearcher(slowReader); + slowSearcher.setQueryCache(null); + DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); context.initialize(docSearcher, parsedDocument); } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); diff --git a/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/src/main/java/org/elasticsearch/percolator/PercolateContext.java index b5dfc37bcd6..d752052e829 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -439,11 +439,6 @@ public class PercolateContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 96e7f326e4c..1284edc4902 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; @@ -794,14 +795,13 @@ public class PercolatorService extends AbstractComponent { private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); - percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.queryParserService().autoFilterCachePolicy()); final Filter filter; if (context.aliasFilter() != null) { BooleanQuery booleanFilter = new BooleanQuery(); booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST); booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST); - filter = Queries.wrap(booleanFilter); + filter = new QueryWrapperFilter(booleanFilter); } else { filter = percolatorTypeFilter; } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 8bd49e39d63..54d9948537e 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -89,9 +89,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { if (Fields.FIELDS.match(entry.getKey())) { clearIndicesCacheRequest.fields(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.fields())); } - if (Fields.FILTER_KEYS.match(entry.getKey())) { - clearIndicesCacheRequest.filterKeys(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.filterKeys())); - } } return clearIndicesCacheRequest; @@ -103,7 +100,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id", "id_cache"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); - public static final ParseField FILTER_KEYS = new ParseField("filter_keys"); } } diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 38f4e03a0f1..057b92a5912 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -366,7 +367,9 @@ public class SearchService extends AbstractLifecycleComponent { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); @@ -436,7 +439,9 @@ public class SearchService extends AbstractLifecycleComponent { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index abc0827a3cc..717834b045c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -20,8 +20,8 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; @@ -119,9 +119,12 @@ public class AggregationPhase implements SearchPhase { if (!globals.isEmpty()) { BucketCollector globalsCollector = BucketCollector.wrap(globals); Query query = Queries.newMatchAllQuery(); - Filter searchFilter = context.searchFilter(context.types()); + Query searchFilter = context.searchFilter(context.types()); if (searchFilter != null) { - query = new FilteredQuery(query, searchFilter); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query, Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + query = filtered; } try { globalsCollector.preCollection(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java index aacd76b0b5b..2f50bbf69ee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java @@ -84,8 +84,8 @@ public class ChildrenParser implements Aggregator.Parser { parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); if (parentDocMapper != null) { - parentFilter = context.filterCache().cache(parentDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); - childFilter = context.filterCache().cache(childDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + parentFilter = parentDocMapper.typeFilter(); + childFilter = childDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData().getForField(parentFieldMapper); config.fieldContext(new FieldContext(parentFieldMapper.names().indexName(), parentChildIndexFieldData, parentFieldMapper)); } else { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 3775ab853a2..e456e93c8a1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -20,16 +20,21 @@ package org.elasticsearch.search.aggregations.bucket.children; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.index.search.child.ConstantScorer; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -38,15 +43,19 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; // The RecordingPerReaderBucketCollector assumes per segment recording which isn't the case for this // aggregation, for this reason that collector can't be used public class ParentToChildrenAggregator extends SingleBucketAggregator { private final String parentType; - private final Filter childFilter; - private final Filter parentFilter; + private final Weight childFilter; + private final Weight parentFilter; private final ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource; // Maybe use PagedGrowableWriter? This will be less wasteful than LongArray, but then we don't have the reuse feature of BigArrays. @@ -69,8 +78,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { super(name, factories, aggregationContext, parent, reducers, metaData); this.parentType = parentType; // these two filters are cached in the parser - this.childFilter = childFilter; - this.parentFilter = parentFilter; + this.childFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(childFilter, false); + this.parentFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(parentFilter, false); this.parentOrdToBuckets = aggregationContext.bigArrays().newLongArray(maxOrd, false); this.parentOrdToBuckets.fill(0, maxOrd, -1); this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(aggregationContext.bigArrays()); @@ -100,13 +109,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; - DocIdSet parentDocIdSet = parentFilter.getDocIdSet(ctx, null); - // The DocIdSets.toSafeBits(...) can convert to FixedBitSet, but this - // will only happen if the none filter cache is used. (which only happens in tests) - // Otherwise the filter cache will produce a bitset based filter. - final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentDocIdSet); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); - if (DocIdSets.isEmpty(childDocIdSet) == false) { + Scorer parentScorer = parentFilter.scorer(ctx, null); + final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); + if (childFilter.scorer(ctx, null) != null) { replay.add(ctx); } return new LeafBucketCollector() { @@ -141,18 +146,14 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { this.replay = null; for (LeafReaderContext ctx : replay) { - final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); - - final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, ctx.reader().getLiveDocs()); - if (childDocIdSet == null) { - continue; - } - DocIdSetIterator childDocsIter = childDocIdSet.iterator(); + DocIdSetIterator childDocsIter = childFilter.scorer(ctx, ctx.reader().getLiveDocs()); if (childDocsIter == null) { continue; } + final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); + final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); + // Set the scorer, since we now replay only the child docIds sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f)); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index 6459ff83215..ed974279133 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -42,24 +44,23 @@ import java.util.Map; */ public class FilterAggregator extends SingleBucketAggregator { - private final Filter filter; + private final Weight filter; public FilterAggregator(String name, - org.apache.lucene.search.Filter filter, + Query filter, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); - this.filter = filter; + this.filter = aggregationContext.searchContext().searcher().createNormalizedWeight(filter, false); } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter - final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.getDocIdSet(ctx, null)); + final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 913d844cb6a..267833a8d95 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -22,7 +22,8 @@ package org.elasticsearch.search.aggregations.bucket.filters; import com.google.common.collect.Lists; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -49,15 +50,16 @@ public class FiltersAggregator extends BucketsAggregator { static class KeyedFilter { final String key; - final Filter filter; + final Query filter; - KeyedFilter(String key, Filter filter) { + KeyedFilter(String key, Query filter) { this.key = key; this.filter = filter; } } - private final KeyedFilter[] filters; + private final String[] keys; + private final Weight[] filters; private final boolean keyed; public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, AggregationContext aggregationContext, @@ -65,18 +67,23 @@ public class FiltersAggregator extends BucketsAggregator { throws IOException { super(name, factories, aggregationContext, parent, reducers, metaData); this.keyed = keyed; - this.filters = filters.toArray(new KeyedFilter[filters.size()]); + this.keys = new String[filters.size()]; + this.filters = new Weight[filters.size()]; + for (int i = 0; i < filters.size(); ++i) { + KeyedFilter keyedFilter = filters.get(i); + this.keys[i] = keyedFilter.key; + this.filters[i] = aggregationContext.searchContext().searcher().createNormalizedWeight(keyedFilter.filter, false); + } } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].filter.getDocIdSet(ctx, null)); - } + bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx, null)); + } return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { @@ -92,10 +99,9 @@ public class FiltersAggregator extends BucketsAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - KeyedFilter filter = filters[i]; + for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrdinal, i); - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filter.key, bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); buckets.add(bucket); } return new InternalFilters(name, buckets, keyed, reducers(), metaData()); @@ -105,8 +111,8 @@ public class FiltersAggregator extends BucketsAggregator { public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filters[i].key, 0, subAggs, keyed); + for (int i = 0; i < keys.length; i++) { + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], 0, subAggs, keyed); buckets.add(bucket); } return new InternalFilters(name, buckets, keyed, reducers(), metaData()); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 0acf04210a1..e6a246162ce 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; @@ -57,9 +56,9 @@ public class NestedAggregator extends SingleBucketAggregator { private DocIdSetIterator childDocs; private BitSet parentDocs; - public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData, QueryCachingPolicy filterCachingPolicy) throws IOException { + public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData) throws IOException { super(name, factories, aggregationContext, parentAggregator, reducers, metaData); - childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter(), null, filterCachingPolicy); + childFilter = objectMapper.nestedTypeFilter(); } @Override @@ -145,12 +144,10 @@ public class NestedAggregator extends SingleBucketAggregator { public static class Factory extends AggregatorFactory { private final String path; - private final QueryCachingPolicy queryCachingPolicy; - public Factory(String name, String path, QueryCachingPolicy queryCachingPolicy) { + public Factory(String name, String path) { super(name, InternalNested.TYPE.name()); this.path = path; - this.queryCachingPolicy = queryCachingPolicy; } @Override @@ -170,7 +167,7 @@ public class NestedAggregator extends SingleBucketAggregator { if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[nested] nested path [" + path + "] is not nested"); } - return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData, queryCachingPolicy); + return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData); } private final static class Unmapped extends NonCollectingAggregator { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java index 56da7f51b17..ddf6bf17b6e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java @@ -64,6 +64,6 @@ public class NestedParser implements Aggregator.Parser { parser.getTokenLocation()); } - return new NestedAggregator.Factory(aggregationName, path, context.queryParserService().autoFilterCachePolicy()); + return new NestedAggregator.Factory(aggregationName, path); } } diff --git a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java index e65be1c314b..94071f8f6bd 100644 --- a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java +++ b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java @@ -36,10 +36,13 @@ public class CachedDfSource extends IndexSearcher { private final int maxDoc; - public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity) throws IOException { + public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) throws IOException { super(reader); this.aggregatedDfs = aggregatedDfs; setSimilarity(similarity); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) { maxDoc = Integer.MAX_VALUE; } else { diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 5eef114e5cb..0037bf322c3 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -390,7 +390,7 @@ public class FetchPhase implements SearchPhase { parentFilter = Queries.newNonNestedFilter(); } - Filter childFilter = context.filterCache().cache(nestedObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + Filter childFilter = nestedObjectMapper.nestedTypeFilter(); if (childFilter == null) { current = nestedParentObjectMapper; continue; diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java index 92968309fcc..05cbb4e178a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java @@ -22,8 +22,6 @@ import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.InternalSearchHit; @@ -42,14 +40,12 @@ public interface FetchSubPhase { private LeafReaderContext readerContext; private int docId; private Map cache; - private IndexSearcher atomicIndexSearcher; public void reset(InternalSearchHit hit, LeafReaderContext context, int docId, IndexReader topLevelReader) { this.hit = hit; this.readerContext = context; this.docId = docId; this.topLevelReader = topLevelReader; - this.atomicIndexSearcher = null; } public InternalSearchHit hit() { @@ -64,15 +60,6 @@ public interface FetchSubPhase { return readerContext; } - public IndexSearcher searcher() { - if (atomicIndexSearcher == null) { - // Use the reader directly otherwise the IndexSearcher assertion will trip because it expects a top level - // reader context. - atomicIndexSearcher = new IndexSearcher(readerContext.reader()); - } - return atomicIndexSearcher; - } - public int docId() { return docId; } diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 6f36da8ee83..8f0e2a1799c 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; @@ -129,7 +130,7 @@ public final class InnerHitsContext { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } BitDocIdSetFilter parentFilter = context.bitsetFilterCache().getBitDocIdSetFilter(rawParentFilter); - Filter childFilter = context.filterCache().cache(childObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + Filter childFilter = childObjectMapper.nestedTypeFilter(); Query q = new FilteredQuery(query, new NestedChildrenFilter(parentFilter, childFilter, hitContext)); if (size() == 0) { @@ -166,6 +167,28 @@ public final class InnerHitsContext { this.atomicReader = hitContext.readerContext().reader(); } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + NestedChildrenFilter other = (NestedChildrenFilter) obj; + return parentFilter.equals(other.parentFilter) + && childFilter.equals(other.childFilter) + && docId == other.docId + && atomicReader.getCoreCacheKey() == other.atomicReader.getCoreCacheKey(); + } + + @Override + public int hashCode() { + int hash = super.hashCode(); + hash = 31 * hash + parentFilter.hashCode(); + hash = 31 * hash + childFilter.hashCode(); + hash = 31 * hash + docId; + hash = 31 * hash + atomicReader.getCoreCacheKey().hashCode(); + return hash; + } + @Override public String toString(String field) { return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")"; @@ -283,7 +306,7 @@ public final class InnerHitsContext { term = (String) fieldsVisitor.fields().get(ParentFieldMapper.NAME).get(0); } } - Filter filter = Queries.wrap(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent + Filter filter = new QueryWrapperFilter(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent Filter typeFilter = documentMapper.typeFilter(); // Only include docs that have this inner hits type. BooleanQuery filteredQuery = new BooleanQuery(); diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index 8b227e1d224..f30a0545d95 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { //we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value) int values = mergeValues ? 1 : textsToHighlight.size(); for (int i = 0; i < values; i++) { - Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments); + Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.reader(), hitContext.docId(), numberOfFragments); if (fieldSnippets != null) { for (Snippet fieldSnippet : fieldSnippets) { if (Strings.hasText(fieldSnippet.getText())) { diff --git a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0e38c150030..482e3ef9153 100644 --- a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -20,9 +20,10 @@ package org.elasticsearch.search.internal; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; @@ -151,7 +152,8 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work - collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter()); + final Weight filterWeight = createNormalizedWeight(searchContext.parsedPostFilter().filter(), false); + collector = new FilteredCollector(collector, filterWeight); } if (queryCollectors != null && !queryCollectors.isEmpty()) { ArrayList allCollectors = new ArrayList<>(queryCollectors.values()); @@ -194,7 +196,9 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (searchContext.aliasFilter() == null) { return super.explain(query, doc); } - FilteredQuery filteredQuery = new FilteredQuery(query, searchContext.aliasFilter()); + BooleanQuery filteredQuery = new BooleanQuery(); + filteredQuery.add(query, Occur.MUST); + filteredQuery.add(searchContext.aliasFilter(), Occur.FILTER); return super.explain(filteredQuery, doc); } finally { searchContext.clearReleasables(Lifetime.COLLECTION); diff --git a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index f0839688761..cd50594dc0e 100644 --- a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; @@ -44,7 +43,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; @@ -235,14 +233,17 @@ public class DefaultSearchContext extends SearchContext { if (queryBoost() != 1.0f) { parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery())); } - Filter searchFilter = searchFilter(types()); + Query searchFilter = searchFilter(types()); if (searchFilter != null) { if (Queries.isConstantMatchAllQuery(query())) { Query q = new ConstantScoreQuery(searchFilter); q.setBoost(query().getBoost()); parsedQuery(new ParsedQuery(q, parsedQuery())); } else { - parsedQuery(new ParsedQuery(new FilteredQuery(query(), searchFilter), parsedQuery())); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query(), Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + parsedQuery(new ParsedQuery(filtered, parsedQuery())); } } } @@ -255,12 +256,12 @@ public class DefaultSearchContext extends SearchContext { } BooleanQuery bq = new BooleanQuery(); if (filter != null) { - bq.add(filterCache().cache(filter, null, indexService.queryParserService().autoFilterCachePolicy()), Occur.MUST); + bq.add(filter, Occur.MUST); } if (aliasFilter != null) { bq.add(aliasFilter, Occur.MUST); } - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } @Override @@ -480,11 +481,6 @@ public class DefaultSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 3a6a48531f0..7fe5373b5e5 100644 --- a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.internal; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -79,7 +78,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter searchFilter(String[] types) { + public Query searchFilter(String[] types) { return in.searchFilter(types); } @@ -288,11 +287,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.bigArrays(); } - @Override - public FilterCache filterCache() { - return in.filterCache(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return in.bitsetFilterCache(); @@ -364,7 +358,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter aliasFilter() { + public Query aliasFilter() { return in.aliasFilter(); } diff --git a/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/src/main/java/org/elasticsearch/search/internal/SearchContext.java index f5377c98040..1ae74abaaf9 100644 --- a/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; -import org.apache.lucene.search.Filter; + import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -34,7 +34,6 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMappers; @@ -43,7 +42,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -112,7 +110,7 @@ public abstract class SearchContext implements Releasable { */ public abstract void preProcess(); - public abstract Filter searchFilter(String[] types); + public abstract Query searchFilter(String[] types); public abstract long id(); @@ -213,8 +211,6 @@ public abstract class SearchContext implements Releasable { public abstract BigArrays bigArrays(); - public abstract FilterCache filterCache(); - public abstract BitsetFilterCache bitsetFilterCache(); public abstract IndexFieldDataService fieldData(); @@ -243,7 +239,7 @@ public abstract class SearchContext implements Releasable { public abstract ParsedFilter parsedPostFilter(); - public abstract Filter aliasFilter(); + public abstract Query aliasFilter(); public abstract SearchContext parsedQuery(ParsedQuery query); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java index 4d8f618ba79..5e6a6df5fc0 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.lookup; -import org.apache.lucene.index.CompositeReader; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.MinimalMap; @@ -40,14 +40,11 @@ public class LeafIndexLookup extends MinimalMap { // The parent reader from which we can get proper field and term // statistics - private final CompositeReader parentReader; + private final IndexReader parentReader; // we need this later to get the field and term statistics of the shard private final IndexSearcher indexSearcher; - // we need this later to get the term statistics of the shard - private final IndexReaderContext indexReaderContext; - // current docId private int docId = -1; @@ -90,15 +87,9 @@ public class LeafIndexLookup extends MinimalMap { public LeafIndexLookup(LeafReaderContext ctx) { reader = ctx.reader(); - if (ctx.parent != null) { - parentReader = ctx.parent.reader(); - indexSearcher = new IndexSearcher(parentReader); - indexReaderContext = ctx.parent; - } else { - parentReader = null; - indexSearcher = null; - indexReaderContext = null; - } + parentReader = ReaderUtil.getTopLevelContext(ctx).reader(); + indexSearcher = new IndexSearcher(parentReader); + indexSearcher.setQueryCache(null); } public void setDocument(int docId) { @@ -175,13 +166,10 @@ public class LeafIndexLookup extends MinimalMap { } public IndexSearcher getIndexSearcher() { - if (indexSearcher == null) { - return new IndexSearcher(reader); - } return indexSearcher; } public IndexReaderContext getReaderContext() { - return indexReaderContext; + return getParentReader().getContext(); } } diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 4993c426629..ae16834f7af 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -166,9 +166,9 @@ public class GeoDistanceSortParser implements SortParser { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index 7caf89e9c08..651e5ab3a8f 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -139,9 +139,9 @@ public class ScriptSortParser implements SortParser { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index aa2f1315960..3dcaf5a4896 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -255,9 +255,9 @@ public class SortParseElement implements SearchParseElement { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java index 01abea2b4f4..43165fa4b1c 100644 --- a/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java +++ b/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java @@ -82,7 +82,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -90,15 +90,15 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); highlighter.setBreakIterator(new WholeBreakIterator()); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test. Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -106,15 +106,15 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //let's try without whole break iterator as well, to prove that highlighting works the same when working per value (not optimized though) highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -177,7 +177,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -190,7 +190,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -205,7 +205,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); List snippets2 = new ArrayList<>(); for (int i = 0; i < fieldValues.size(); i++) { - snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, searcher, docId, 5))); + snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, ir, docId, 5))); } assertThat(snippets2.size(), equalTo(4)); @@ -292,7 +292,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -305,7 +305,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -379,7 +379,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("none", "highlighting")); IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -392,7 +392,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //no snippets with simulated require field match (we filter the terms ourselves) boolean requireFieldMatch = true; BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); @@ -400,7 +400,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //one snippet without require field match, just passing in the query terms with no filtering on our side requireFieldMatch = false; filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings.")); @@ -435,7 +435,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -446,11 +446,11 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, scaledRandomIntBetween(1, 10)); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test.")); diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java index b20e544866d..7bd77155962 100644 --- a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java +++ b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java @@ -26,8 +26,6 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 9a319f77b9e..1e533ef5333 100644 --- a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -195,7 +195,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { .get(); assertSearchResponse(searchResponse); Global global = searchResponse.getAggregations().get("global"); - Terms terms = global.getAggregations().get("test"); + Terms terms = global.getAggregations().get("test");System.out.println(searchResponse); assertThat(terms.getBuckets().size(), equalTo(4)); logger.info("--> checking single filtering alias search with global facets and sort"); @@ -948,7 +948,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { @Test public void testAliasFilterWithNowInRangeFilterAndQuery() throws Exception { assertAcked(prepareCreate("my-index").addMapping("my-type", "_timestamp", "enabled=true")); - assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").cache(randomBoolean()).from("now-1d").to("now"))); + assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").from("now-1d").to("now"))); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", queryFilter(rangeQuery("_timestamp").from("now-1d").to("now")))); final int numDocs = scaledRandomIntBetween(5, 52); diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java index 086966cef6b..8445f8a1f45 100644 --- a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.common.StopWatch; @@ -211,7 +210,7 @@ public class TimeDataHistogramAggregationBenchmark { private static SearchResponse doTermsAggsSearch(String name, String field, float matchPercentage) { SearchResponse response = client.prepareSearch() .setSize(0) - .setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.scriptFilter("random() terms) { + throw new UnsupportedOperationException(); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public float getValueForNormalization() throws IOException { + return 0; + } + + @Override + public void normalize(float norm, float topLevelBoost) { + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + return null; + } + + }; + } + } + + public void testBasics() throws IOException { + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + QueryUtils.check(query); + + Query rewritten = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten); + QueryUtils.checkUnequal(query, rewritten); + + Query rewritten2 = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten2); + QueryUtils.checkUnequal(rewritten, rewritten2); + } + + public void testCache() throws IOException { + Directory dir = newDirectory(); + LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE); + QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE; + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); + for (int i = 0; i < 10; ++i) { + writer.addDocument(new Document()); + } + + IndexReader reader = writer.getReader(); + IndexSearcher searcher = newSearcher(reader); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + assertEquals(0, cache.getCacheSize()); + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + searcher.count(query); + int expectedCacheSize = reader.leaves().size(); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + writer.addDocument(new Document()); + + DirectoryReader reader2 = writer.getReader(); + searcher = newSearcher(reader2); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + // since the query is only cacheable at the index level, it has to be recomputed on all leaves + expectedCacheSize += reader2.leaves().size(); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + reader.close(); + reader2.close(); + writer.close(); + assertEquals(0, cache.getCacheSize()); + dir.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java new file mode 100644 index 00000000000..f6873d0c075 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class ShardCoreKeyMapTests extends ElasticsearchTestCase { + + public void testMissingShard() throws IOException { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + w.addDocument(new Document()); + try (IndexReader reader = w.getReader()) { + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (LeafReaderContext ctx : reader.leaves()) { + try { + map.add(ctx.reader()); + fail(); + } catch (IllegalArgumentException expected) { + // ok + } + } + } + } + } + + public void testBasics() throws IOException { + Directory dir1 = newDirectory(); + RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1); + w1.addDocument(new Document()); + + Directory dir2 = newDirectory(); + RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2); + w2.addDocument(new Document()); + + Directory dir3 = newDirectory(); + RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3); + w3.addDocument(new Document()); + + ShardId shardId1 = new ShardId("index1", 1); + ShardId shardId2 = new ShardId("index1", 3); + ShardId shardId3 = new ShardId("index2", 2); + + ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3); + + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + assertEquals(3, map.size()); + + // Adding them back is a no-op + for (LeafReaderContext ctx : reader1.leaves()) { + map.add(ctx.reader()); + } + assertEquals(3, map.size()); + + for (LeafReaderContext ctx : reader2.leaves()) { + assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey())); + } + + w1.addDocument(new Document()); + ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + reader1.close(); + reader1 = newReader1; + + // same for reader2, but with a force merge to trigger evictions + w2.addDocument(new Document()); + w2.forceMerge(1); + ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + reader2.close(); + reader2 = newReader2; + + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + + final Set index1Keys = new HashSet<>(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2)) { + for (LeafReaderContext ctx : reader.leaves()) { + index1Keys.add(ctx.reader().getCoreCacheKey()); + } + } + index1Keys.removeAll(map.getCoreKeysForIndex("index1")); + assertEquals(Collections.emptySet(), index1Keys); + + reader1.close(); + w1.close(); + reader2.close(); + w2.close(); + reader3.close(); + w3.close(); + assertEquals(0, map.size()); + + dir1.close(); + dir2.close(); + dir3.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 38054992298..5aabe4a594c 100644 --- a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; @@ -158,7 +159,7 @@ public class FreqTermsEnumTests extends ElasticsearchTestCase { } } } - filter = Queries.wrap(new TermsQuery(filterTerms)); + filter = new QueryWrapperFilter(new TermsQuery(filterTerms)); } private void addFreqs(Document doc, Map reference) { diff --git a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java index 3e8b6dd8218..d06c2613e19 100644 --- a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java +++ b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java @@ -373,26 +373,6 @@ public class CountQueryTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareCount().setQuery(bool).get(), 1l); } - @Test - public void testFiltersWithCustomCacheKey() throws Exception { - createIndex("test"); - ensureGreen(); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); - refresh(); - - CountResponse countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - } - @Test public void testMatchQueryNumeric() throws Exception { createIndex("test"); diff --git a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java index 95f95defec2..ecf36582437 100644 --- a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java +++ b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java @@ -66,8 +66,8 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true)); assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false)); - assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("cache(QueryWrapperFilter(animal:cat))")); - assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:cat)) cache(QueryWrapperFilter(animal:dog)))")); + assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("QueryWrapperFilter(animal:cat)")); + assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:cat) QueryWrapperFilter(animal:dog))")); // Non-filtering alias should turn off all filters because filters are ORed assertThat(indexAliasesService.aliasFilter("all"), nullValue()); @@ -76,7 +76,7 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test @@ -86,13 +86,13 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("dogs", filter(termFilter("animal", "dog"))); assertThat(indexAliasesService.aliasFilter(), nullValue()); - assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("cache(QueryWrapperFilter(animal:dog))")); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:dog)) cache(QueryWrapperFilter(animal:cat)))")); + assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("QueryWrapperFilter(animal:dog)")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:dog) QueryWrapperFilter(animal:cat))")); indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test(expected = InvalidAliasNameException.class) diff --git a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java index 72b0134b4ca..a57e81ff7f3 100644 --- a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java +++ b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java @@ -31,11 +31,11 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ElasticsearchTestCase; @@ -72,7 +72,7 @@ public class BitSetFilterCacheTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); BitsetFilterCache cache = new BitsetFilterCache(new Index("test"), ImmutableSettings.EMPTY); - BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(Queries.wrap(new TermQuery(new Term("field", "value")))); + BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "value")))); TopDocs docs = searcher.search(new ConstantScoreQuery(filter), 1); assertThat(docs.totalHits, equalTo(3)); diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 2ac8608fa96..d97c0fe769c 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.engine; import com.carrotsearch.randomizedtesting.annotations.Repeat; import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.ImmutableMap; + import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -31,7 +32,14 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.*; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LiveIndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -260,7 +268,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.index().getName())); + }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 87ea42c9dcf..69ae60591a6 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -252,7 +253,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, null); + }, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 5c29fe57713..b2964807d87 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -91,7 +91,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.search.NumericRangeFieldDataFilter; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.geo.GeoDistanceFilter; import org.elasticsearch.index.search.geo.GeoPolygonFilter; @@ -861,7 +860,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -872,7 +871,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -883,7 +882,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -908,14 +907,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolFilter().must(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")).mustNot(termFilter("name.first", "shay2")).should(termFilter("name.first", "shay3")))).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -926,14 +925,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -942,9 +941,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(and)); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -954,11 +953,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -968,11 +967,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -982,11 +981,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -995,9 +994,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(or)); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1007,11 +1006,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1021,11 +1020,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1033,7 +1032,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testNotFilteredQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notFilter(termFilter("name.first", "shay1")))).query(); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1045,7 +1044,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(parsedQuery, instanceOf(FilteredQuery.class)); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1056,7 +1055,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1067,7 +1066,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -2497,9 +2496,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = indexService.queryParserService(); Query parsedQuery = queryParser.parse(query).query(); assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); - assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(CustomQueryWrappingFilter.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->cache(QueryWrapperFilter(_type:foo)))")); + assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(QueryWrapperFilter.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->QueryWrapperFilter(_type:foo))")); SearchContext.removeCurrent(); } diff --git a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java index 21bae1d20ba..d6aa83c341b 100644 --- a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java @@ -135,10 +135,6 @@ public abstract class AbstractChildTests extends ElasticsearchSingleNodeTest { } } - static Filter wrap(Filter filter) { - return SearchContext.current().filterCache().cache(filter, null, SearchContext.current().indexShard().indexService().queryParserService().autoFilterCachePolicy()); - } - static BitDocIdSetFilter wrapWithBitSetFilter(Filter filter) { return SearchContext.current().bitsetFilterCache().getBitDocIdSetFilter(filter); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 4f7d62e1283..6dff9747127 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; @@ -92,7 +93,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { Query childQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } @@ -125,7 +126,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { )); TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3)))); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int shortCircuitParentDocSet = random().nextInt(5); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 3f2d4413984..52ffbf022ea 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -109,7 +110,7 @@ public class ChildrenQueryTests extends AbstractChildTests { ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)]; ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int minChildren = random().nextInt(10); int maxChildren = scaledRandomIntBetween(minChildren, 10); Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, minChildren, diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java index 49496d8f6e6..feb320942b0 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java @@ -38,13 +38,13 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -93,7 +93,7 @@ public class ParentConstantScoreQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java index 79b4a9bc79e..0614a6c2439 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.join.BitDocIdSetFilter; @@ -49,7 +50,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -96,7 +96,7 @@ public class ParentQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java index 4af03801c94..940e10e77df 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -216,8 +217,8 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData MultiValueMode sortMode = MultiValueMode.SUM; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -251,7 +252,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData assertThat(topDocs.scoreDocs[4].doc, equalTo(3)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9)); - childFilter = Queries.wrap(new TermQuery(new Term("filter_1", "T"))); + childFilter = new QueryWrapperFilter(new TermQuery(new Term("filter_1", "T"))); nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), @@ -328,7 +329,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index 800320323cc..12776cec73a 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -69,7 +70,7 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests { @Override protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index b1b1433cdfc..12cd10a2cd2 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -68,7 +69,7 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests { protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1029523a325..e4885727434 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -117,8 +118,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { } private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException { - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("__type", "child"))); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "child"))); XFieldComparatorSource nestedComparatorSource = indexFieldData.comparatorSource(missingValue, sortMode, createNested(parentFilter, childFilter)); Query query = new ConstantScoreQuery(parentFilter); Sort sort = new Sort(new SortField("f", nestedComparatorSource, reverse)); @@ -283,8 +284,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { MultiValueMode sortMode = MultiValueMode.MIN; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); PagedBytesIndexFieldData indexFieldData = getForField("field2"); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -324,7 +325,7 @@ public class NestedSortingTests extends AbstractFieldDataTests { BooleanQuery bq = new BooleanQuery(); bq.add(parentFilter, Occur.MUST_NOT); bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST); - childFilter = Queries.wrap(bq); + childFilter = new QueryWrapperFilter(bq); nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), diff --git a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java index 36bdfd15af6..9013156a59b 100644 --- a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java +++ b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.cache.query; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; diff --git a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java index 4ba7f711429..d992991fa18 100644 --- a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java +++ b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.indices.stats; -import org.apache.lucene.util.Version; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; +import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -39,11 +40,13 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.cache.filter.FilterCacheStats; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.query.FilterBuilders; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.sort.SortOrder; @@ -59,9 +62,7 @@ import java.util.Random; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -77,39 +78,12 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put("indices.cache.filter.clean_interval", "1ms") .put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms") .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .build(); } - @Test - public void testClearCacheFilterKeys() { - client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); - ensureGreen(); - client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - - NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - - SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - - client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet(); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - } - @Test public void testFieldDataStats() { client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); @@ -991,4 +965,90 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { } } + private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) { + assertEquals(stats1.getCacheCount(), stats2.getCacheCount()); + assertEquals(stats1.getCacheSize(), stats2.getCacheSize()); + assertEquals(stats1.getEvictions(), stats2.getEvictions()); + assertEquals(stats1.getHitCount(), stats2.getHitCount()); + assertEquals(stats2.getMemorySizeInBytes(), stats2.getMemorySizeInBytes()); + assertEquals(stats1.getMissCount(), stats2.getMissCount()); + assertEquals(stats1.getTotalCount(), stats2.getTotalCount()); + } + + private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) { + assertAllSuccessful(response); + FilterCacheStats total = response.getTotal().filterCache; + FilterCacheStats indexTotal = new FilterCacheStats(); + FilterCacheStats shardTotal = new FilterCacheStats(); + for (IndexStats indexStats : response.getIndices().values()) { + indexTotal.add(indexStats.getTotal().filterCache); + for (ShardStats shardStats : response.getShards()) { + shardTotal.add(shardStats.getStats().filterCache); + } + } + assertEquals(total, indexTotal); + assertEquals(total, shardTotal); + } + + public void testFilterCacheStats() throws Exception { + assertAcked(prepareCreate("index").setSettings("number_of_replicas", 0).get()); + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + ensureGreen(); + + IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertEquals(0, response.getTotal().filterCache.getCacheSize()); + + SearchResponse r; + assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertTrue(client().prepareDelete("index", "type", "1").get().isFound()); + assertTrue(client().prepareDelete("index", "type", "2").get().isFound()); + refresh(); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L)); + + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L)); + + assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L)); + } + } diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index bd664694c9f..f2487ec9e4f 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -346,7 +346,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { .addAlias(new Alias("templated_alias-{index}")) .addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}")) .addAlias(new Alias("complex_filtered_alias") - .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool").cache(true))) + .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool"))) .get(); assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ")); diff --git a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java index deb955ed51c..5802d20f1dd 100644 --- a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java +++ b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; + import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java index f585824a267..855f21de852 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -123,7 +122,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest { AggregationContext context = new AggregationContext(searchContext); AggregatorFactories.Builder builder = AggregatorFactories.builder(); - builder.addAggregator(new NestedAggregator.Factory("test", "nested_field", QueryCachingPolicy.ALWAYS_CACHE)); + builder.addAggregator(new NestedAggregator.Factory("test", "nested_field")); AggregatorFactories factories = builder.build(); searchContext.aggregations(new SearchContextAggregations(factories)); Aggregator[] aggs = factories.createTopLevelAggregators(context); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index 44b57045965..357a19afa60 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.MergeMappingException; @@ -73,9 +73,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import static com.google.common.collect.Maps.newHashMap; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath; -import static org.elasticsearch.common.settings.ImmutableSettings.builder; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.boolFilter; @@ -106,7 +104,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; /** * @@ -118,7 +123,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the filter cache size - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -418,15 +423,15 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { for (int i = 1; i <= 10; i++) { logger.info("Round {}", i); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())).cache(true))).execute() + .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())))).execute() .actionGet(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")))) .get(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")))) .get(); assertNoFailures(searchResponse); } @@ -843,7 +848,8 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max")) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().toString(), equalTo("1.0 = sum of:\n 1.0 = not implemented yet...\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = Match on id 0\n")); + // TODO: improve test once explanations are actually implemented + assertThat(explainResponse.getExplanation().toString(), startsWith("1.0 =")); } List createDocBuilders() { @@ -1085,41 +1091,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { assertSearchHit(searchResponse, 1, hasId("2")); } - @Test - public void testHasChildAndHasParentWrappedInAQueryFilterShouldNeverGetCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(ImmutableSettings.builder().put("index.cache.filter.type", "weighted")) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get(); - client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(hasChildQuery("child", matchQuery("c_field", 1)))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - - searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(boolQuery().must(matchAllQuery()).must(hasChildQuery("child", matchQuery("c_field", 1))))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - } - } - @Test public void testSimpleQueryRewrite() throws Exception { assertAcked(prepareCreate("test") @@ -1797,8 +1768,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(2l)); } @@ -1810,8 +1780,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); @@ -1862,104 +1831,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } } - @Test - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") - public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(builder().put(indexSettings()) - //we need 0 replicas here to make sure we always hit the very same shards - .put(SETTING_NUMBER_OF_REPLICAS, 0)) - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("field", "value") - .get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("field", "value") - .setRefresh(true) - .get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - // Internally the has_child and has_parent use filter for the type field, which end up in the filter cache, - // so by first checking how much they take by executing has_child and has_parent *query* we can set a base line - // for the filter cache size in this test. - IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - long initialCacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(initialCacheSize)); - } - // https://github.com/elasticsearch/elasticsearch/issues/5783 @Test public void testQueryBeforeChildType() throws Exception { diff --git a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java index 8e390c4dcfd..5bf2a8a38d6 100644 --- a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java +++ b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java @@ -31,12 +31,12 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext.NestedInnerHits.NestedChildrenFilter; import org.elasticsearch.test.ElasticsearchTestCase; @@ -79,8 +79,8 @@ public class NestedChildrenFilterTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(Queries.wrap(new TermQuery(new Term("type", "parent")))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("type", "child"))); + BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("type", "parent")))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("type", "child"))); int checkedParents = 0; for (LeafReaderContext leaf : reader.leaves()) { DocIdSetIterator parents = parentFilter.getDocIdSet(leaf).iterator(); diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java index 5157c235349..13f95320993 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java @@ -172,7 +172,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { SearchResponse responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km")) .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2)) .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()").setWeight(3)) @@ -184,7 +184,7 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(weightFactorFunction(4.0f)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), diff --git a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java index bf76e8c3b5a..4b7eeadf59e 100644 --- a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java +++ b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; @@ -498,7 +497,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { Map expectedCounts = new HashMap<>(); Map expectedResults = new HashMap<>(); - Map cacheKeys = new HashMap<>(); expectedCounts.put(geoHashCellFilter("pin", geohash, false), 1L); @@ -516,19 +514,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { for (int j = filterBuilders.size() * 2 * randomIntBetween(1, 5); j > 0; j--) { Collections.shuffle(filterBuilders, getRandom()); for (GeohashCellFilter.Builder builder : filterBuilders) { - if (randomBoolean()) { - builder.cache(randomBoolean()); - } - if (randomBoolean()) { - String cacheKey = cacheKeys.get(builder); - if (cacheKey == null) { - cacheKey = randomUnicodeOfLength(6); - cacheKeys.put(builder, cacheKey); - } - builder.cacheKey(cacheKey); - } else { - builder.cacheKey(null); - } try { long expectedCount = expectedCounts.get(builder); SearchResponse response = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()) diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index 8d8e948f769..e72cad5dfdd 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -808,10 +808,10 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); - searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java index 1f071e95bb5..286e7f72af8 100644 --- a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java @@ -20,12 +20,11 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -36,8 +35,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.scriptFilter; import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; /** @@ -50,7 +47,7 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the number of iterations of the script filters - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -116,58 +113,4 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { public static int incrementScriptCounter() { return scriptCounter.incrementAndGet(); } - - @Test - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/10897") - public void testCustomScriptCache() throws Exception { - assertAcked(prepareCreate("test").setSettings( - ImmutableSettings.settingsBuilder() - //needs to run without replicas to validate caching behaviour and make sure we always hit the very shame shard - .put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("test", "1").field("num", 1.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("test", "2").field("num", 2.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("test", "3").field("num", 3.0f).endObject()).execute().actionGet(); - flushAndRefresh(); - - String script = "org.elasticsearch.search.scriptfilter.ScriptFilterSearchTests.incrementScriptCounter() > 0"; - - scriptCounter.set(0); - logger.info("running script filter the first time"); - SearchResponse response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter the second time"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "2"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(0)); - - scriptCounter.set(0); - logger.info("running script filter with new parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter with same parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(matchAllQuery(), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(3l)); - assertThat(scriptCounter.get(), equalTo(0)); - } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java index 457c9278222..c048e1f5ed1 100644 --- a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java +++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.search.sort; -import com.carrotsearch.randomizedtesting.annotations.Repeat; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -134,7 +132,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { ensureYellow(); SearchResponse allDocsResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(docs).get(); assertSearchResponse(allDocsResponse); @@ -143,7 +141,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numiters; i++) { SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(scaledRandomIntBetween(1, docs)).get(); assertSearchResponse(searchResponse); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 187afe1b658..109bd030023 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -114,7 +114,6 @@ import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.index.translog.fs.FsTranslogFile; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -520,7 +519,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase if (random.nextBoolean()) { builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); - builder.put(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); } if (random.nextBoolean()) { builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms"); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 0f71b7239e0..02c02b2ed6e 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -30,7 +30,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import com.google.common.base.Predicate; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -60,7 +59,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.junit.*; import org.junit.rules.RuleChain; -import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.file.FileSystem; @@ -128,19 +126,6 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { protected void afterIfSuccessful() { } - // TODO: Parent/child and other things does not work with the query cache - // We must disable query cache for both suite and test to override lucene, but LTC resets it after the suite - - @BeforeClass - public static void disableQueryCacheSuite() { - IndexSearcher.setDefaultQueryCache(null); - } - - @Before - public final void disableQueryCache() { - IndexSearcher.setDefaultQueryCache(null); - } - // setup mock filesystems for this test run. we change PathUtils // so that all accesses are plumbed thru any mock wrappers diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 5d2d00c4870..4c857c24027 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -78,8 +78,8 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.cache.filter.none.NoneFilterCache; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreModule; @@ -449,7 +449,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? WeightedFilterCache.class : NoneFilterCache.class); + builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class); } if (random.nextBoolean()) { diff --git a/src/test/java/org/elasticsearch/test/TestSearchContext.java b/src/test/java/org/elasticsearch/test/TestSearchContext.java index e1ccd525546..bcfa48a5813 100644 --- a/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -69,7 +68,6 @@ public class TestSearchContext extends SearchContext { final PageCacheRecycler pageCacheRecycler; final BigArrays bigArrays; final IndexService indexService; - final FilterCache filterCache; final IndexFieldDataService indexFieldDataService; final BitsetFilterCache fixedBitSetFilterCache; final ThreadPool threadPool; @@ -84,7 +82,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; - this.filterCache = indexService.cache().filter(); this.indexFieldDataService = indexService.fieldData(); this.fixedBitSetFilterCache = indexService.bitsetFilterCache(); this.threadPool = threadPool; @@ -94,7 +91,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = null; this.bigArrays = null; this.indexService = null; - this.filterCache = null; this.indexFieldDataService = null; this.threadPool = null; this.fixedBitSetFilterCache = null; @@ -313,11 +309,6 @@ public class TestSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return filterCache; - } - @Override public BitsetFilterCache bitsetFilterCache() { return fixedBitSetFilterCache; diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index e0e1d2db4ea..b321a0dfbb2 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -24,6 +24,8 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; @@ -56,6 +58,8 @@ public final class MockEngineSupport { private final AtomicBoolean closing = new AtomicBoolean(false); private final ESLogger logger = Loggers.getLogger(Engine.class); private final ShardId shardId; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; private final SearcherCloseable searcherCloseable; private final MockContext mockContext; @@ -78,13 +82,15 @@ public final class MockEngineSupport { public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); shardId = config.getShardId(); + filterCache = config.getFilterCache(); + filterCachingPolicy = config.getFilterCachingPolicy(); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow Class wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class); boolean wrapReader = random.nextDouble() < ratio; if (logger.isTraceEnabled()) { - logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), config.getShardId(), seed, wrapReader); + logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, indexSettings); this.searcherCloseable = new SearcherCloseable(); @@ -123,6 +129,8 @@ public final class MockEngineSupport { // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); + assertingIndexSearcher.setQueryCache(filterCache); + assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; } From fe7d018f0cb431d16062f18d3f7552781caf4ea6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 4 May 2015 10:33:47 +0200 Subject: [PATCH 236/236] [TEST] make LuceneTest extraFS proof --- .../java/org/elasticsearch/common/lucene/LuceneTest.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java index 0420f4b2966..816409675af 100644 --- a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java +++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java @@ -144,8 +144,11 @@ public class LuceneTest extends ElasticsearchTestCase { } Lucene.cleanLuceneIndex(dir); if (dir.listAll().length > 0) { - assertEquals(dir.listAll().length, 1); - assertEquals(dir.listAll()[0], "write.lock"); + for (String file : dir.listAll()) { + if (file.startsWith("extra") == false) { + assertEquals(file, "write.lock"); + } + } } dir.close(); } @@ -200,7 +203,7 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); for (String file : dir.listAll()) { - assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); + assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2") || file.startsWith("extra")); } open.close(); dir.close();

DE?qi7@jtkci%XpNhO6BtS zY-=W>2xoCYiXZqqjKD{N6TEeF=HPFBtWsFUWP-}Zy|RMvu*i}?wZ=r}mU^ek|B&L^ zIYpu@SB|fekBbBRYeN^ex;wz8`%!^{eH`_vJpyzi<2p+dv0_57@-_ z6GRoK(*EYC^-OO^C0l0@jpm zca*#^e_ZZ?GHL9h<$Ow0U*teWl4o4xfe0v&?l=L{t~-MW0d+sMD$)$T1A zmkYQv$=CXYCGxcb1K*g6o!%PKnY8Bp3391Jy?`570g0R_hT51=Nx_ z+}wHM_q$rxB2D-O6E_upt~*IA<6f#sE#o3xi{zhWW=?WAHQ}P0+4Y;Fbr*F&^e(oZ z2ReS#JQ=#13w7^xVw+3Fw2pcWk#MtZtQOsU-T@(s)HvlQd1P7c5-;xp@W$D4UMXfvhEZT za-g4|euV7XyY)>Ado;S@0L#ncfQ4#rA2HisFa!^J)g!}%=*?sl+a*u|K~Aa#ot^Nn^Z1@@$p``0f4 zoc<%qNibATooIhHZw;i1TlQ+2w0ro<=b=HVDnGiN;?d)<#TY4l!5jpLj{>_Ltuk)a z&~14;bdXqHZ6Eo7GZB|s?LDr`>&kkIdbq`(`K|HzP*`H{(SP}ufD4t?1%zZ|6l-Bg z=|Gr#xc-L?*CHXTR@zmcOuNJ}DPIFrDkjBq79)-yO9B1_t42!3eKY|!8PL8`UXXrS zW>&*|B$9?!j1;5c7atdj;t1ces}^ez}qJvVeA(7k25r4S6GCd zT(jcGZ-krbL7H90XgG`0Y7vBIQ`Gc6Jy02G=8sC%r^Z1k*{5D;yxSvt4Z9i767Hx~ zhC8}g2d;x){JEqM6KqQx5^Bz}`}1Mdi}MPY(A_-hp3L1`KkD8TgiPqGN~tacb=+X>j>x}Ot?`pc(fkCWLrzm>~-M%6}JzKXVc3!bL0@27vb`mzrY zjanDNM9(k>)1Mc7k7r(w$0pZ0CiOPEKZ`c*uq7*gYlIeUF`q1^_Q!$&<~s_fWNT`F zb*zGAODI}-+M23Lc>*9TAxfG7qwNP)U>)I6+)_ziW+$qz>> zToIzOV4k;hAp`}-RY%=49v|Tm(aUIInnz5u5pTdiQtP|49mucJR-vXo#NDp#*j0y` znbBZn?54S$=`)JeqLoB$9|s>3xVm70Wr0xRZkWp!a@ zxy-%GWPEJc&+=>zDc&OLn;Np`uHl!sIAnR2>vk@ni>u9ZO!o=q9p2l4o36;H+fjU} zTT>$y+KfyC4+4)3gJS+_v0ka$%b>Bnk=V1HGpR!`!CqyklNb8Tf(@rCOb|Y8c!xLK z;u(2x@A0aVtov^tujGytHs8cDKo)=j(Tf!5u>-DpF*Bl9MiT5*N8(Vdo?q3aK@bK| zq2q$!kwR);*lqb^zg(n9nwv!pdGM*byD#uJOalBGgp(i>(DcKNCIya`l}<3J;#J&C ziW}1X<`B?*tQcr1S7UHmE|e=*YN^$=h(xV^I^py0ar@c_m3;Q>xiL3RXI^G}FzyX_ z5G0+>Wfn6tGu?%w4PXoqgb^Fxu!BveZ(m4Ea>da$b^NMENzqF*6_3?mh!)gt=ZwE( zi;-kHxrrZ|S_2`|d^899(?nY7T>v2RaMJkWb%+1@LnX88>_JQS5Eq*#w|i521J{~O(uc6*Z+O4-*raxI?czUM9f*AGu7ha{B=Td)~*P(BYd7r!ZE=Fo8S*0%dXt4V$S$Ub40}QX zWSBA%qWPc5mk_+FcZ@9fUkwIB>=edmJUvDf$$8({dYY-g=AWi6%m{g60R%#xx}I>K ziQzz#-EbM7_kqcpi4VTxDH-ezhNpkCyTym>J3ThQd^P;Q1LXpN+MpZYRC`K0)XFfs z;O6rXXDtn}CsK%U>|49Iut`c-iLsEYljEv~QsSbp|yW>kl1?cA91FIUkm>-Z-T5?GiHer0Nz?DPbcbhyYJT8-P-D6xI0$)(B# z4BheR(BQJ^-ZAC2vuhXf!wP%K9;{Zl!44#YMBq%DG3CZm*xB()XEp@i<5<}zQ7ZNl_0|K zLB9OT<`JUm?^H(j3k1Bzxt6K$>n(R!Qc;>qHnT~OR_A?=bS}xyl9&#x{gk~a1_-*~ z8$U2N^}mJlm7CdPO&LmfCxZvvH6{Zw_0T*Ks+i}}g~m?EPhQ z{DXrU>^2{U$f1?^+UCbuzm5YSo}L5i^Blf*QJ5bSrX#6>8wHvrtap7Mwao{&BeuMP zpL3o^VOOJQ8XeV1WO8Ym2ZGTINZJVs)FPvz`$aRcV@VgCYMQ>Ta4b5i>hfilO4slPXGrp^e@_YV^fi5Z-Ya;P4OK6X{~u$*d26;kM`5NW=F zf|Tx01ViIN`BEeF<9x4!Wp(p_b@Y6|Z0j|Fr25y`0r_Lr#q_-+?WP&Szi}SZu!A}| zlNY4J$!@Y3)J1j8UQNF?l>j|7aOzH(rfsq%_?r?zxv+t;GEMeDpf!T^+iXzT0lS@& z{nd84&UyZF%Oh|OO&EuwPNB%0K3?DKWu=0@6(%+A)=#w8VXHy^|91gM!#5QCl}k3h z3<{7lb@|(AC@DBkEsj1X5?oGO+pLpPa6{hN!h1A5Q$J!JY~Vbs87d0mhdm^ z=x8t}AfRDzp?krL(FgNi)pdDp(NG|l0li=c@m1&&cidW$^o%pM z!b+XkdKEu*R^`sp#HNZB6QPL@cOg-gWHBEta1#>f0FH-OTK1OJGOc~pAW6Iani%#q zpiIamZZ$HhEIQLGvX7WS-wxIxUTs7=V|iVR$$4sDGa)G3ZMV{h$-+&++YSpDTa7{A zkxbu78}xC*w%OlaHb0f$K&(^pnK?prvSnxZ;Pbf-RMEJZ^?6x~>7E53`VBK!z_7N~ z^3&*GS|YmxiRX6H(n?~QDJ#pgQmzltIO3JYn8BcJnDNeCg+_ zWHgguMeJDPYLmo!lg`PF(h9tk?K7Q3u(cuqSXpCP`SgfJIB!k_{7joS#}sgP<9yO( zfG7sgEQ_L_jp%91=}qWq<71&GgmV>jNMJwldGp&A@^1+*f+>W>X zFop_DJIBrnRILANo5GBB-C>I|u|SPh^m~l5@ek#hhYO=f=ZEJk!%D-o&HGPI>T9Y} zm-qT4HEMU`B)wKVVuVjD7nw4eM`r}}Xbnl}nY_Pe3jqK4JI%AfUu@ zc}TX^#-CJo!r(R42xpxD0KB5)#Ay&Sr})b?}% zf4{Go-VFCKL$xl$CPEyI~y;AM~*jb_YiIKPS>DCU@Y{VVGcTaoBVH;NUe*QwFTR zXy7m;J&K&k8LMh?12n#;2mYy93HS(F0mF#QRjSA-3W=?1r`mR7%R|B!1~_LN$|{A7 zj8Dmvf}@|J&%qt=A=Vd4RW=ragVh{FaeT->-xy^p+DpB5kDO;62W3gM-WjKhZ1?sm zcK39EOzs9vJA!HEODOuJdV72{I{?SBWq;9JGR zN{PRww*C!9Wl1Sp2m{mgx{GnV-9&pm*%=%-9Lj;dsR&gA^cUY0pJ0j~mSQ$TWE}Xy zyJs0KSfE@CP}?b)tEjb9gws83YOGn#UmuIY;y6rO3FnHKshFzpHYSU>GGM-Al^>r#PY-yW_9x{h|Co{00yT|Zw2 zQ}Ar(vu{Zj;Q`f0coeKczY2JyYtdcDJKEw0b9Db?u?4_g+uq_tyw2!>`GI5oZ1!1C}29G2Dj>()N1TJi-!1m6CCOI9e zw%XGuU?0$dWf4#{;Z9U8;okZDh6nb>n&z#4r||eNQ_=i}2NrZ7O-h`^6df?gv!vuH z!#-yqL45*4f-$)v1w9A~`;i}Ew3M?El>;pX?-jERU6z;5lEuh4fb-GJ@EXe|M(y88 z^0&6_VHD^m4s&J`-`oAl8aKFI+tAuF()4-2-l?6l;9Oej?Qb*WCymiYRB>PB?Qt!y ziqLY_!#aOVewPibA$;e>sTEDIN~Y`&&Z${7DW`5n0m=5Vmhu`sif@Mye?`o?XI8GlC{trlpR3_c63zm4?9bDL$JPOHL^ArCAuZjY*L6rMd4 zC==d|^kdJ6vNvl&{CK&C|CQmI8lHytMn~IS7rrv4C8!1q=)c8ZR~BF!>gYNGq8OMi zjt#MiRP50CIlv9*snH9{KMFhUx8#1=`Gy{b(hgCJZCQ$$^GNm(NNAptptFY-VbHo# z7fG8Ai@M~AV{N;XOi+1J&H>)+VVW(d!Jey zQ!AlUrc8CGQ;cFSRnfY8T~RxF2pbd7t!Z@}G%8O>&=>6WMy#uS@sj^8rpSQ`9BHvp zT@D29yZF7bIh%JLx=f4eq&c#@~ zj&aiu%efGu|8LjG?h)@u=H(Q#FlS&PHT_DWv`x}@OtP5q`{TZF;(R%@&}Fx988fz4 zZLTCsVZYV|DDOZ(4O#p2Lh*Q4*pU0!p7K=gd)9}oj}O*XJi3tU$u;=8?2%v3hJsNz z#9EWUIV$^MXdM$81&ZQ4-eym+OmMC-X{iCyKRp2#7yu_|m)^(2BbGjMa>*#-mmhOe z(P~2EoKWwWx%n$^r>+L$JGp&5wI}TPTpNRtP2Sm{uJ!~Cl7kkgkC5{_OmAaWlj!E% zG0Uz~F={jhLdHaZ6n1#GF&Qd%c4~D}c1)b7(Zgp;E(N(%8-r5sp}az3G9LfOEDl|_ z@*;)!dL~?Wv3Av;qQZq|?57SfPoSnlWNU8hocVHQ1*+-h5z~%Oq}hrXdeuGOfW#Pl z=;=}Cy@6KDHoCH5V_b6R(k#crZu#52)bY#a1{qfbdQT$=|H#K&%%XUelmRX{a6(TD z9op}8`qB{iBi+MLf%^Nm?=iPKv5^w@woP%>y%5ojX1cY1`h^*iS&>@+e5k$J>m^;?o^6!_3#8 zmll_{V3=yKGqLy^MvqBe!;TpTC(|N@${IIev;d`608;uk4K)9vidI@nobERes>cUp z-~&n7qCw&B6j!(w7hDoNiLAn^?(YL&ck|DB`OA;9=voCLK_Qt*-_~OaSecTvxT#YX z!ADz#`V8!_yjPc5u_qF3{;|Pz;IjzBBW0XvIufQ53`rHzZeuw9yq3if54sw$4V~PBVLa>4 zfsmKtb|@x%yQBwo%Ccl)w0^v-hss6&d4)JIF)XLsAPvy*aNz@654BkE)B3)%yxg5Q za(LW8UGRFKe#Z^0h(8O|z-XSl;7k8l2M4}P6^UIViLkJy7!}Pp`~kirFbF%bdy-%y z8o*qD!*S3sX$t4g_FKHHF5mUv=~RGHh{1MIpDau}8+tpke+Jj?a>>ju2(0rPrNuAA zvX0Q^XPa+qO*)K1-B#pYFr3$}B-#!A4h+cahLYadnzG$tB^i#wQaAn@2Rb76n$XCO z5M)>I`n%C6WPMaDokHB~G)$(}QsDm+$H5fiAeTbj0jUFuqak~pZZj;$I+jDX^*3$z zG&BCWq#a&XhsmVF)lgKCM2IB`tE7C))FFPC(+j9iva&VSk%p&em-p2f%FPFMr~VjO z9y!EyHyl^)YN-^VyR}*&y!*K*6-R=I-QJ|iN+=g}us7-s-ox5gEp4FK+|4JX zXYz;A()nxm#g+X#uk8fWOh2aQZds@Qh|()$b`tj)_`Ce&p(25DAqNY4gbQ9!fX`4h zx_xXp@BS$6OJZz=q`fH9VWe_PK-GVnU4a{6*k%JZD-2DM@J!ugn;VB;Lq}0UZcfoZ z*3UoiY$~}i^rXk-jDDWQ|KMnifngZ0P7>?iD)Q=U5p1h#_^Tb4WgZ(d-Hv~a5l%|9Yd~`=w8E~;5C6||$ zRMo+7%@5JD5f&nc`CL78hkfFE^|3^MyMS$M2&wX$V@-`!AZgQOMXiOgBJWrFq{bVF8gK@Hw%DRmG zTF!^|cG@WWE_kqgufEckwR!BTS{-^;)A zimBEk?vJxO<+`z}ojr+miVNi+!!Q+tT{HL~P?i=&BXoQ_5;HW`*+pQ_J9#Z}%7%^c zjhc!ew}*_IWTDvHrs@<(tVHu>oHqRzW`YTT`j9x3s`=iyI$?fTz38J={&?yre9t&& zFezB#)~pvvHJvW$Fbdu}=H#vX_IN^(nA9=5`BhjAif5*~l9OZQpAhr3J zBe{7!c0Ej`YXxe(1s{#t~-dTZA_CgSKaRxF#rxABAN8f9BNiN@>hXnK3B(_VE9 z+JoTRpeM6VQrOKb$0{QE5TICf;uMF~cDIiD$yVKc-a=!|oZ~}0-)X-_wyqf3p#Lu$ z!vR}I+Z)oWfc@H-N28paX*^lv#8hF&6N^Aw1wr}N<&7LvqqP=R%fdkTGhjQ5R1T*%&|#sQ-&w%|_sy@V1LtLFQ)^4`J9AF0%`Z_Pf&mNfLyeS!SU zYD!;?EjiBes!=y%nr$7$Jfm%7QWtKG9{(=1@~lC&ds}e;6cyv_FNT( zfVKl?oNW@T3gu8^(`{`knDPVJ-NOmk)qNu9$ga?&@pSSDfiU6U_GQ=)M# zk9b?O20DQFSuY4Mi)J~iP<8Nz#ROH{Dk3Vx3w~1q#}+WkBndDv%Z%5&NaP((r z8fi8su*aLVl)ubJKlMzTX>`R*;7Q(VPRtd^o$brbFL;j>p+ZTGo`q!mI)FvhC_Xi_{fEzu5;|#M+XKL!mzJgSF z7(Hir_&v~3;1b`@Cm%On2Xd(?t^wXSJ9au#v*2nSqWJt1UxK44-FQ#r6A`u`#-C>rCs+VIlOLD4?T#|5F$*CRZU(m_~{^9C}$w)S?s& zte6R7c^dVXQ4Z5bfY{#z{wX;n(YPAvyUhsGF~m8z`PiRf1uzHFg;xLXrG^ESQbO8xQld+h=9FsH$s9 zjS}+T#>+y%o!L%mVZX9$sAObc{DksxStl%l>oRM1Q#yVCBc|bI>mqB}>rjHM?0V_@ z%9+>RtM04FUUBi=)OpH}M2HCKnEUkjOLLw)A5C9>c|oCA4hZ9(t$X0<2wT(0#DM2rQ5eK9v*2W=#ju1T&>+8+9ip{nTk zf?EQk`ZReAk*l~nAgK#y_9l?^(@&O#FVeaAhMQ5cA)pU}E!`fjpLs3)|Miw zx_7jDlyef1=S-HDn(6;_?}5!EjDK2;utI;A;^W%Z>RO#pg7Nj_^ZX&y>8YrF9>niM z!z6Yh--r2Vo3h{Q*(kvO>iXgXgad2))uLO+c?0j8|&1eze`zK`gSIs$rXX6C+=^ z#0;OH*lc=7&U!iQvfb9%Lh>-sAb8;cDMKwaFP&>iQS;1{Z2uFS-jy@QPel0>_YNw1 zo(V_kYz#l2k2o6ethZWxjnyTI2^1A95Pll;%Kn`=30*z)@wh6>15Q2|fh_IvIB%YN zLj}ls!GM1d8_dA$_x&wCa}(O#;z`N1TCFSn(s+WZDc82w9gGyuOb3+kDlf0NsCv=R z@T!i7vJ^e9C-cuIZ+u0s3W7te-;s6c8U7aFy|%+VJi`XvKJpt?U5jA=25L)xNH@`F zWz9tS6dK>D7yRKb3A*{8Z4~j5yznXGJE{J!tafEaV;hjP_scj?yYNxcVbX?4B^P)x z$I!Y-04#Bx@pX2evY!;WwWfrx-2<<;9`VeTppPV}B~Qy;j<47HXf|PFt5^2kYBKx` zY&2VG=0L*3!q+zCDg_=CK)9><@5J^ET!(}{rwA4;=}mHg3NrTIq~6=O7eB>`1!(?+ zXFom`8*z3ANSwx5?%mmf>U=x43M zX#rWGw+PB0SDg~3K??6+?y1-*p2XoTo7g%Nd#OWG8?bv4z^{sXcJ%Mp>Gx^mB9v;0 zPVc4W7e@00QT`uH?cH2=CzkUHIBvg&E&x^)j^(s{7X8*+U=Pn-Ch`KOA!3USG{CT2 zqVlu(_;rF!*Io_LQ9#@&MU>fgoDv^@XtIWY8TRkrs&MXxo$y$8zA+O;YXtLwGf%6+ zbzKe zP*$Q(?|b_gAFUWjKP|66;iXU?xPr}n^T`X9bmYkZ#2mZT5JEC1|H6iAff>he$l+V2Q9PC;@#Mj63df1ZpmqnC_Kn+YH%IwEbbL>57-Zro?D;m^v3p5K^UIQ+374ooV$U*cM*eWGrxhSMEY z%st@)QM=+FJ9o)xnhkl#Id;D-c60I1#$<6is1ZAI@56k&S~1rVo6&3aONm;oe0I-(Yj5q zzB(yn+K#y>1IiJI%3100M3nb1<+}S`N3v(h8_h!2|{rrbuAxLRl)3e=$W8n4l zLYf)rv{ed?S9V=P5>F7Xm_-rju|FfISEk(qmX?n~m|waZ6S#bQ6|&NDBpiTrfHSKA zixHKX_j(S7>6%Hz_s2F5`#d6o+<3Wy$m?;al{r&Ww1s%%kC^^GYyv5h2-Vo>M#_E% z(pH?rNl6p3xwYuO-}OamEz#%jffl_HY<_#O*;y|6=}vNkU1A}KMX<7Axa0+r7*XW+ z)1ovbWpYo_&uteVE6HxAAQbUrwx#R!n3W@3pW-ac?ySA(ih(KM?Pt84a>b<(mnbON z9D^QB>_j{8k^Z~9&TFnX?R~*}6E3~eYeFhvd^c{!c*N6RVqsjN>Nr@#$i=SQLTpHmUgJ)-YGWIuANtAT_bGu-I|Q+K_T2L%KOG8(rYM*!2lkyPS^0F7MW ztJHJQFoqXa7l*(f=tAD&OX3&koY*X2*P8-MEBaueXenX?`|MHgL#i5^J2f^ADKvuz z#t9jiq%&o>#^#<7^TqdqA9Z_a|8Y8MV#nV%Tg`jX^&pyF|GB8A>A!_BM-~!}j`|yQ zM+UCv*1g`vnVxaq>ev_tFj0jlk|xgu8VAe?M=Jqk;A^8(d^I`HBpD_^0z=meIm3uV zGpAODHj!C13LGKlF%d7{)m6d%J5RG$Pyi*JprbNCcw!&!*=V-Ay)8sTTX+U)y_^Z?fKVJ~kBc?t{?>!D4(DbS}(2Xyg zwQ&s?Gs^S&=i=uWC=2}?fCLaN^uc6?*xh$Vetiw=on$6M6%9-no{!}9g=@R@_H1uJ zaXTFuSNJkWzc;hl%0dWmPKOxR%se*QH*RpC2+z_oAnV8yAPt$)N;1l+(6Ap%T3ni* zJ6eLX(5(-{Y*g#G{fk3!q;y=AiZgap@_07#6mL}E<~k${(5MpjSaF=kxp4rmhZh(Y z&C-?jq2uzF4;*`P6i6gVI@JbrzNil1EBdLs%VHMAF4{tmlh9r}%4UbKLY){ELdF>& zXCTNG2JC~%8kTREHo*NcekF7rViQ=^5Huf;H~;6I%@qK6U!)54ZQVk-)$o)>vd zWUzpLVEw|l&f!r^^?MN8!*unvJf!$WymN_-L{GW%sJd5NOTwBcWh{ z#6j@CQZhn3D?Rjl^|ML#z;)wzn2u=a874}UAgz(;Jo8;`}1r_<9 z_H}#O%|!~BQd#^_JlYDiLqxoFZOhInD2rxs$~Ktz&rcNc`T#=?96sWJ1^AyU0!k-9 zd@`P|v|9zER-FJ>E`j&SW$*{Lu}zS!*y!K0G~<6oL=Y@_FO*-4cQs1o(h!U-cj_4Z zil8`m1mAA@xN?OmmQX}$>6p`D7y*R-*ptZZDC|P?rM)! zQ_Sv>`%O@;pZG)x2gbd2I-q>#8xOs@ACW3r@{cRdG^=N`$+FS*jnCt*CMCLlH$Fgf z3-B(A2JCTbe_)DI4RY0JNleeT_5tmj<<@7PIT#P23qO@|uwp^-haYebJ&rdMxIGr{<3XQ!6 z1>gV#Wkm*P=Ev6rUv?o==f91R`;3l!A1yW|YR~i7-6XuJ;aNI!-7$W*s@q>wa=#5G z&Aw=b-beSIb%d9xBvxA zGz_@dK+~JPJJq@lr8Z7^ycM)=KW9!KjoARZ8AC?aVMYuW8U7MgcEVmJj+B@F^%#05 z*H}cxQhu5)6;}XcOV`|6FQ5@+2j7X?cHJF4r4QP-8`ePFMQ;V z{p-?0tncihg1i9JOt^4Dw8wC(dH%}?EGHciO~*@j9up_l)5kl?Z2A+F#oc(BSpg2q zT}U#)jCuuJ{WO53AG`*6+~V-B8hl2c{;xB=g{m83|LXQOFo}4hETn(TaAA*utqR|a zw$jg&bl-IwaaZ%2Wdq}*;|=VVd8rS9042_BeR>e$Ncg~|y<$O-LH>oFX@D+HD@=t# zEF2uDld0L=n_-yznhv^}fn`Qd3;X{@gfc|J{eqG-1gaHf9^u?Zxju-@Rk{L(_U3}B zG4WbbezY7lM5z?_RF>N=Y?x0wilemTe{L5u21c)$>%K9YhAZ^%E66(2KACi$x>~+i z|2%9ih$i!&^}n9J%x_RgTR#+s{jyQjqwJkeldGr`;s5(zo|u44bhwyM@!U@$9s1gU z^<8|#F;0SW!;1$hMq(~#J4~B8P_I@Nr7m+M4X-PX?ug`xH|q*^M`M#A-#)@BhtkJe z-Zy8S>)i1L9_bIr$%U+j6$9rB4B&sZ=~8R}9RUzs#R~M~0*RmREwOmsD0#QgT{d9_ zJ`P!2mN23rOR;CJhK9-+{lCsC}QChl>yCZWL47{LtL&XcVT$ zb_BNR;|S&>szVIl$V(~(UHcxrs{e6<7;&rUqcUR$WyK{7`gf%=Ar z=HbvK+nk%dsL?1%`c6ht>|Gfo6NB-t&?fu4h~<$fTQ>hL2KX$B;`>GO9IUXzLm={} z7~vB3=_APiZ!e1eh6WS`CR&Vv3Tna%YcL~{Zdnx!$$f6bd56c5K{#01N#-bRI{xE2_U?W6-=r8Z_@)Qa`hF|&rImH^=Zo>Mx2yn9r$_MT~X%t z1c~TrxEnHTHf#N%|4<6RzyWql)Ig!Gb;JwR;D@Rl+?$D{aa$D!wvtiRSSZippBHVbC4GYw$A6>}0TE$T zErG6#xUgpr;{6qt8i1y?sem1Q!{lmr}#)@o~aJ8LfnQIiyO=MHBI39A1N?m8B*qR$4_&6AzoX+<>< zPpP>DZBeS4 zMo#TGL4T1>W|z_-vK_cdWD z8se?u?AT=P%WKZIZ}9%ss?M_p!+V$ZJoOcQGU6)jP&XG?y#Ai{ue}&hAW!x;$vxWX zARoE27pKdOZYwIvOm+KT%Z~)uDLjEcd-K4M;y&ij#dKJh8__0|TOfqzk!yD69KIre zDk4W#?XF#|l<@CZBM0UNj^7U?*}f=mafQyFi$Zd{Xrw&;-27TWqlQ;?D*lz+<|N1= zhAqFjw2(g9ssL>W&)fF9D6KY0w@u02J6qWC71&=3n(C#+lYzS9r|U!1Ej}E8&pMih zOP0SxL0$!EbX(dJlTvX{_3?yP(gR}`;b;l zzFj^WEG;;D;4m^UP1I?nzw2zNcf-~eI+$CBfXyFGNu(AjzE73Z$2%E`-@ zxn%vne1qbl?mqfHb&y5g<*Yu7kM#Av9$@0I$s3K+yyvz9g=6kfOcd7F%Uv0pT~+#g z;DC~MJaz$YykGF*&Tk-^{sW;Z9C&Mxq<5pH=;7Z}0<2VuK9@SMZzn{h;)#m--JsTP zlr83OB`?_aX#xHpD7!ojEf7D2ZWrV|yxmLu|Ch+n0C!u&Rel+?vn2w&Z2u&gH)RL2{NN6yNTJMg(#*6!72rkN&PRo%7z-f~#mhAoYUNrnWQQ{7LMfd1s>0)h4W$r56W4@>Xp?lRoaYHYc zYsrov-Lv!gXDiELyXuCW;2ISmW`hJM#060Gx!DE$Y%A8Y@Sqi<2yhGFZ)(fR((73n zt`%vc+~h82bq{tte*FGnd`qA=eI2%1*-z>so6)fLZdT$K7~ zprLyu9R{6Ci1?nQA9YO_y?+&;UUpXY!u;(FT9i8|ybLJ32^721mMXb8r8YHmLp_Z& za9Ousye+a{tbY>O$Pnv{o{y{ISMyz4E#Y&E6uGJH%dCb2wM_Vk|m(HDroYPG=8AtOt*@wJ2 zGrr&TwzmP0HT=Qg?1SDZKOei@+4>8DN7W*wu7!V$^w)~XEBmIE{FeoT0v4bYuJlrM z{=`|&6Xv@sG2ssNgj6-w-Db4?%G~u#q*{0{&#g*qVl>Xz@L3wNe0Sp0>-iZf>hOQ; zy=7CJThKO&O9<}nE`z%zxI4iK?(PmDNN|VXPH=a3cMIb}~0^?j)Y-ofabieu2#e4^5v2PK=M5h=M@e^VVs(=@7=0H; zhsMnptI16i|LzHi)`MWwOmk1MwRfB9_11c+lCI8QY%R{d-d~T(b<(hT%2Sca;2~0P z^>@f9^>=1IqWt-D3{AH=x=b%J{uTu%_onpGwo1w4{-5Y6f4?2?^2S4BKCMM8{WinGv(SgM^ zC+<{)6ly=Oqxteg5xFBKn%QZ){=w(Uwb++*AWxj>{qy-Sx$Bq@}Zy+A*+&wzcr4mH$U)(&Xty2Ns?WG3QZpeb5F&qTXvfW{==P%zvzswr2@Pm+U|CP{Ih@i;fN&llC_CrfpsqRoidc7w;%~g>-g_*;(q%&_BFMel zn-@v&n1Y8354`!^ZyhuBwEhd72Ehu@RsqQ2nepYHXjm_FJ`P|d2NJ^nOR!%w#+A*G z+Xa9}4q1%=tp9Dl@}vKT2?ZPjvlHm3FD~q1VV{G@#T^?Ult!M%sJ4Fj#dCZQ1dWkG z^8i2D)ER|$is=PjeWH<>;PQ+_DD_4CHU_%ae7b(2l=^_IH~=tcm**-ti*ii@rPkYz zwIb#NZ}t%Jp>HB9{>QSjfmzqyA4dPTGU5~;WaaXGWyE##QTvSvdv{wCf$iIh&`4U( zur2=<>3tO|gz>X2dk?m7LQ}`VbV2882LEYJe&?zg_=vo+>G37In zK<(InZu~;w;|`p7ThQG3I&VFZUln?AB3Tr#xR~smO0TJ|>yII^#b#xQVSB4ZJYnpfw?m1rUspLz$3qbUaANp*aSG>`-D6effhLS8D13^_yc7ieD%|O?sr`0)MBCf4bQxL z{)mfKjrZ1g9J=HW74s|$EXS@LQk2lZ@);SWXI`fP#XD9 ztw&iV(%3JnKUH@>2cZNqmloW?1D ziO6+$2({-Te#rZt8w|tTWGQG0YZ^!^E#F=_(OJ&HS4$m~sC!CGm)niom4W z=-C=T@!~Cv1$w)tgi}jLgzj`V1;ihwC0(CitYoQE>zo=qQc6q|k^-qfp00=53=p0S ztM+vSMNnH!6PELrQ=NP`3jj300qwPnEB;hi{=uNR4DRAk*WG&Q*8DvKweVmM#Et@5w@XN=+}d zweCu?Qpq9Sk4iQS=GH`er<_QFmrgOjXh9zhNABArl=e0+-7tWL_P(arVD#s0vgL5M zjMUACfRcb&s3Y)sHC+;4;fSl~6y1L_!rYZF+JR)hUQ?9tA9IEN5xG(=cB`F_e*}hN zAhJ84nVOf`D%eOjPwh)8L+#lTqvcGiPyzY^>eQR`wVABUap2CtF&QFe;$RUD!?;oc zyfuxg`?#0_sP7^_&=D znZFmC-IM!y(_}g;KOdvX5pnl8=%?5wY3TP~Q2ClD2@+G9&r&^V4&E0+4!T;0O6Nz) z4%I8F>={(sb!{1zJ+Ept(u6$1wjzX{8px}d`Nl7RRGJE^gIARhdJ2x_+%n|zl+i)#;1h{4abD?W1}O}cwGA`H|nk$(D=Kj{xQbfck&t*!$J zd;(5ua(ZQ`P-;@gl$T1gGU}dnUGPy|ny+1x%%DTZYEj(52(K+wk` z4qEK5rNShAX~NV>;hkHX)PEW_Tz zO&|W70?ccc#af1+zga5^F?6@2`=l-&O7P(ROW_#I;a{unqkTUR>axQ*S{Niy^h6}P>$OTpCgTe=_a z=v%afVZ09D7e68M*s_rdXaZ_YH0;af_~HvdLn;AOgZ@#&Lc;%?4p_@^mA-J&L|m#Q zp)kzK?aqR?l{*fmpg&Rud-Ol!cmR%XsofAaZc?dZECa4Vdn|MA6&VC_Va(VWjzJt6 zo`NXY-ot@N;&b=ttb3du1!2=(3>Lip<~z9n$o_9hq#F7aBp!zgrtF5*T?}0fkq#L+ zl`(g~|B|@33`)H|!_T^fgJ-a7kL?W10Y{brTxz>4)HPFZiK9 zLf~SMNHeVB^C;oC_efcP>aHoH*)Y~oe7uc9$_))uC<^Kyu237VhJ{iqBP;=F1{)8O zbDuH2tZ zrLp1=Ov6_JW3xJ>C)TcF+4tl}dLk7IiF_lQKDp9XDSAI}qhVK!TfGBU#9-qSGcUhq z?xM3k_IxWx$9J{^2^~&W&~;~pZpN+r6`gkF0hlDS@ChE@g;pLUO#Avk#XR4Fp5Wek za6ka-``Dy+u&)>VI3o1nNYtDW?Pck?8oH9v8D9EMKR7Vh*yxrD8tH0a@|4Jm$C_{& zQ^1L|!a^03&O{D+{rnbv#Zd6K;#+&GmqoNVRK!;Qp<5WGP+|!HxCmjMK(v^~M?7h?M{UYO=dt>vBJdO|62b~Of$ran%l zYE*B=tSlD?ei>8o36lQV>a4=dBCaLBqRJBvn-zv@!n9f(4)_7#i33*_m;C$@9wdB=VCbB*)P9vFw}%H@?{7Rvzx?t z!+)naF^_BvJB{SLE(5(>{RJDrdRJc)BR=jG<2_{Gc5GU==saE3vhk@et;ix?+RR0m zH_%r?8o2H2FT3y>>@~w}jV1Lc1ZTl=>L{yJy0!A*$5Pu&RN{$3blpcJjWAU;6c$7q zM?NwUBELBBNxL@3`-jfMt;l#i8LZf}wQXs4y($kP4GCE?0chZ@RPYWbdss4B+m6>a z(fI>8Y4YW&A2-5e)l$^0@B30Axo>_L5r5RFzI1fIXL5?B5?{!S4X8wn!W>qi?8Y>Q zw|e;H##`DktOF>;XnS44p{HLtaon5gx;C%eRDM;7E+rT-6EC}&+{-TsjanZ~VD5WF z4;vY8K;+@(RT0of25#~gWNx23k=Do*RT;{T(J9R2sVpB{@`Vd~0Zw zq~A*AI3`rfw6+aH2(Ft(O7C|uDs6b$k9cQKxn?!0eZ2RhHdeeiUe z9v4}EaT@mIRkIJUU3T*kzdxr4?g!CN(6w!iMuO3Ipucm%&_n@dg=unOpucTXz z=Xz1VnyDKc`OoWbM}f^(G$}I}KD_mzk!veFlf0C?R6k28K!buv&CoR*sq^>b$>}uW zK??!pTkW-)6}p)5OVjCuyi^Qh6z(6>9%UqqFjr@aytER|mHNMI3K>qOTEy%>c+l#wx zkqR9abtUf=VW~YgL#4RY7rP#@GLj`qjb8IL#K88XgkgaSjFZ{_>ZWscedAaDLx*gIefUqaeB_-UiP$U$Lxd5^^BHl{H9snj?BHB}^7ST} zyh3ll72|#lu^z%!chd@`P*yKm0k;ECwm0b7?9&byYbM+DFEEwH6jcO*-P zVfxpt%}|il8NNJFh}KzR_G%yD!Z?xO5OE`2EH55kL}{yLEf~)(j@194mi}XL6b{YiA)he*3WEiAl=?#($?c z+tELYaB-M}*`!DUyg^Vb3@*;RziT*P-lh}3^)#`R*YLkCnWp4qfI}%QpJS*biSm^Q#s;;f+<$r@}sLRA|mZZWp z+n{RkHX3O8pjRWH4>`3XOcX8aDkEBS9>{1ftiO~_f| z{*TXGe+sj6$5TIHZ>8(G7#W*7gPQPN`ACsyc>NlHEN$}p&jR8 zPT_vlAmk`XQAXQSJE-Pzox?I4jb=pErUJiP;Utch*GFP6iQ-|mxeLosGC|fqB^7i( z@&n9?a1;(Mpl^m!FUwb5RL)7K&d{ zwY)sp5UC2t2N9XuHrEPBP#5^3R#Iy7<|C%00;tE1S9%ImnvnSa?c>!5fC0_6BvnoeIn0rGi2zCw z&2k5U50s@8xwIt!YxCzDX<^LJpLi-syN}*hs!*m9O&eu-Dl}&~KTV9-XQt@vNLDE@ znk!#aT8d8e1Z_?x(cVXl?=1{X#XFw3pJ3n4Qt;OLPDc^qbXLM>$!H(7{wWJ*azVI5 zWkXg5+3(FrmRPyfIbw5p06X0@iDpQgB7qPNR8Xi-8{YV1dYXw5SEJTctP5$n%;GPW z!%bF8vagy|W6VL|5f-xnY56Hvz!s>Lc|z;YxGSXjtq3gR-OPp}nCitDF0Mu=;BYN5 zNOIZ|Gh0Fq=fxb`L=ABM5?h%o3Q@y5mX)5~QsT+ZKRqF@*)K>%sxH9LpM=xex~| zAVoCuc7=QFl3?a73hYkwTbh|Mz3sM48KT?zSx(2c(?=C?hXfSM4-f1l8ngyKmmZxMf6e+3sS$mWpX%np_b?siXG?;nHwI6@4dv)>x%B z(5%#v2Wjh}Q^LikRP(e;EY7iT{qyXDw+}IC2LGkeZQa1gW7~ znlS|KXV>nrYFC+8w$}TjJU4~r*k2cijZ7np`Z<5Ku8B)&x2u9P~!}H^!)xK3*L% z)uF$BAIx1N7wQbK7;zjaEGt4N1jv)!5m<^WPXL6*{qt}T2N<#&c%^t;>tWzEcklC~ zE$)|S@(q;UvE-o)2GgtJEzi5G=Br3b;GI%?(3FrG8Mkj7k>7C3#>i;`7t&F(N~e?a z=4b2Z#tn>`0vgPQFGow?uAUiuX@o$lD@r(ABJdm#!C=AB?)sq%q78>4CR*DBE}HRZ zWn)oeWm}*GMl?>UZW6o8D$+tojLZ`5b$Bv|8U8^?uwa3b=RyDgaat~umyf$MT%JaB zw>~y~Lyf3jl`j6n*R6{}^g-YjYHFyP7)x+v=;87#DY7NlwWaC|h|h0Er?xEy%Y2@4 zHnexeJDa6zbq=X<$J{zFTy}Jbj}=LEfcu71ZVWf-9y(QQWg!sX~JIgI+Dsjx+Q% z_(X&bpY0a7Hyqp)^*#pN*>JH)?=n%qkH4?ZN5s3{RTE7^>o(<64X5I^c}h6w z>pXoT>bjm$c!N3qiyK^;AyeLv-+NeT=T9ZG&&u`MarR;($j;qay3 z9VWS^kkDH*6T&2L$7>E5k+;>04CKB$6Bsb7uR!v3^-nxds+;k0X3>Gzx(uaMa60JKT$(%%JppzCwNwjhp3|s% zsiO6mGpe?_G)Bq))6Zxqszz+ZKk?m!rWQ)7d#FbMQv@2ra~GAQnNM;Ac5E@l;t@UW zB8!-QL^~8`(q#vI?y16SCChP|OA`2msh;SiBPeT28uz&^AG9(Muiu4~*D)e-3yXs$ zbtRHn9gEkJNPUQb8~eTe#5U7LSt0RP$P+R_zi%rWu9CI|vZ?KTU25L27)akYyBm+T zA^!uh`HqfmAEu=`%lg@)JgA^dgxHZNBOG`HS%=YVFEzj_Vz2b0wVVz;#VfSA|Kb?7lIe^|MeST{8P{VL?OVh4_4=o># zZ?M(3WNhM&)l!;UjiKe9P&;upeu#8vZv3l6yw7i;2I5p^Xfl?_Y$Zn20w=5Wlg(D7 z(P(I8L7`NEmVI5Ao0%!c(_K<#lat26Mj%Lg!w+>24g6tw!~X<5*6SnefB~}>?kGps zJ&_b9)Qt1=R$jOD)xU{}M*PIFD)P~fy9YL5q1&uh)vbP!h|DV9qUmhQeA|-s!D8{3 z@Ow`|srX|`%G4v5`>C~v2(?AWo%~Rf;}@zn=E`uH+D^C{7g#2e#P`3Ax2{-y-?5w@ zTpT8l5Klmp&yF+IyLZCi<4A;4S%Y~BMXzS*w^?GfyF2{sYY9htwy+y-m@@5l^=5M> z-I3ii)$#`TwD1L@6W&$`t%uxN!ZL@+T-ZP#g(oyQw>fq-Mxx0i|0$(B5n^=Lpuidn05tfO{3sAoA zZnL!FF;Gmd+aW|D0!7)cgE$7S>A=d6qwHTjynh8AK)rKneMi+*srz08}+DO;klc~ z9I73ueOS8HpE=)EI5XDIaZd!cV0B+sVjts~9a0i@r*^BCgpcUwg8FiN@K|m3PBig% zMZrEO{^@(ShLj955gnwiLauRppx5LwK7V%@ayCwx?_yXvk^&*_3(>^t*GwAZ8aF5H+ZM<5jafYX`iCr`m7J2EIH=bi#->|Nxn6Kqi;^w6ssb+y(I*!QJqDyO}70%17 zif&8!IzCT^t3?<)WLCT6uyzD{r^@wz^vH7#|7Wcj~x9d|v}66dGOZ z+X4-WVUhhSkCS*yW?BmOsb2JE2dgSwKe+!1f}!4-zsL)IZCmZdsFlwsgIAGox3S<) zmCOPS;v_@XiIRnO%-Ue5(h!qkuC{s=z$`cJ0+w>MA4XlFLtBmJ5?3hIhvlBh^y*Li zH&Zv+NwEEhB+N0cguIx%<_KtmZaIvVkYL2y7zGG4PsXZ0gee`8wdVljC((fLs z6eo~;{!eTHn`bRz5K18_T2&$Qj1rs?TOd~HB9LzLG-o6A**I7OcwGfqZgS!MD7C@63zh&)Hm3T4OUWU`Ua_HMDm~GVS7Je)b;2_g81&&*9 zSx+78#g^-bhlW`K9Qt}@_SdE+jbtP>? zskq_wg?ZX7B;~4*LMcCGHM#^3Sx7gw+F2iY_$P~Uo}nhS`G#^?>sIHlxSJ&{T5^$d zFS0G(MdG1vp4^FbcD`4a5yp+}nyD&4E06W?c05vfzhiK>HPrqF6DrC<=*{mFtZb|;GgBnY70rS0`tLoJXe zZaO4fID(#VE@|RE0B`-AZYf=_z+7k&bM@G%OIw`r(}M)sO!H*iY(>dd)Sgnk1eg3^ z)no7`;t}whv%B)luAn?Do$6B;_k-=l<;v+9vXjlr=Ca0%OA#}TVtR+wI-^OXvfDqL zPZQiiZCa^5UH%--b}2g0QaoReKl3w7sd&r^H9;0=i2`6acbRp}6{xzeuEIcXi3jQP z8^U2)JxqTw!(DH*(eKFX-GP+GUFV0mOPTR(Hg)SIDgGdPcJ@)ti6QMDo6~jj`nDrd z+ZinHH48e57{E=e>g1iNLc~qkGZ1xE2T{5Ly>Inuta`O}y2e1*NAi2Cbl1H&lBJN( zeRHj}dWSSJ%zB-1uP)PA>s|XA48XjL?{NU=YuiJYC_}^nyx&_+cdjB*g0O*5i4ufH zo(Ruze=`MiVNq{k)maF%H>%z3d&<4tBcO;@;b$TWK0bEssK!TI=w5vrY$CaV{6pN` zkh`}r%I2B!iN~seU(E5U4R1Yv&~9Pjv3Qq^uzWsuloa*Uk<;(` zyl)b!^fgoe?XRBew(3`!exrQw_C8V*0S8V-4}%WY?XVYY=d`&*s4d1l1RGflpj8(H zV}N*8ysJcgM&?ALerc=QdirxdL}rPPM`<-0end%B5zV zAg)h=%%eA7gVE zWzmgW>liOW#CFb#vYAE?2*n6*Vi<-`1R>{7cglYdS;|!4kF1plvJ{&rn*%NM2Q(upy5%7U3ZIjPZto@@is15ayQC>(o?b zeV@sQr%7Lr=;fpq63*Lj0}H0nOv(2BvQ>!9P2e$wPNkLy?Lg0q&EoZPtH4RBJnOpc z84W+_L3mhE9H>D)r0eq`hJ9=eDNI((lMs=wDMD+{o1*k;() zp2fe&?X4Mm++m2EOU1>Y>GsaKiHx_9&9Gq_?2jkcz@txdIF^5kGTU>|S>y<0un_tk z1T)T9+jb-qhjGYUn6)i~uW8mjG8|y%h!(`~rwy+S0}-GN)8`ts?THs3p!MG+MJQnZ9T7{9(BJFtVS&m@! z#rQ+^^->*J>C_miE(nRrb;Bof^AXNXXn6_Izc54;x^_2JU7ZuS;%H0{3@y}3G2OeF zOXHWHYEU5iLnF3uc}rUqJ4w|;$V-9i1_MXDR)}M=ncsH0H|p*Aan^p zr*MP{GfCg}2RXFdOMBV)>r&7ck1g=c5#rWLGpTTEd;7-kXZ{0;2|ty+I!Y-@P#}pWVBQuNYzcv`3~9QPX}GCR@9|nD zj49yg@<$ouZ;$;JhBEJ|Q!6$6;MpNugq)hjn8_v=t^QPPA*_OvPF#~((!CvD9f3Rm z^h}loJkQ{|HOCO0szpK6B1@f}Sx%Sf$q+y?(sT(qn+%}j;BnHO+IpeaDZzRQypOKo zEneEVB*3|gQHcYO%R~%DxQt3!io1@3NR={ehiWw zuIgexC1TId#_{Ir0&gVoIa8&{_lHkSk-5(!>pZ0iz0%8>92;kQ3(5V+9Je*P=?AA~ zzW#|@+M7N}PnJ8l`Qa6O+cWUR6>t=nutR${vABeyX`}OP7_pGeMH80;)_XCbD4(G_R za#u5aN2>>{X8+>R-gG`5-0muanU?Rci`)%E_B06xV>PMlwMY?>cwr(rMl;d($w=j@ z9U|(R;sL%yqsa{mr=t&)c)KMI7nAMX-8aOi{YT~9woUB(*>=>Vgryk06z@`Nf7jEq z9HH3fvQ9PPcchtq18Y5L)Q0PLlXUGMw-9=eKA3H4j9~a>vz1wZLu08sMYY%P8T=M0 zOmCr&`ODF9cj~kvl)Lyw>vSQXqiUaO{BgxTMsN-(tlTg1dx1JaH0Us=#pJS7=pwy~ z6M;)6AvpcXZ&qi#-~QIqiXObjH61=M_ocvoqaqGQ_~x_D+e2X$1rR+a5}A`8R(a{A zg_v;xQ(W7B6Pc5@$>j#kO&LjgHdMo2O{aD{xZ$?--bU%PCA=%)?j5o4OU8Fv*Q_Cx zNqFd*M6&NgSPp|NS}%sj>&$A^lbO_)E+cJF*fx()O7x zdqtV*!A2<#hb_*j8+pVF8hQk;Sl#9D7BwOH>W)SdxHE&KcfyRT+De&uI-FLi{Ae0H zn}j~ZQ3ShbToc%egwDV;fE)h9!7XN@lXA3rS5%N z5@g!bL#zppW;DK+>BT>)kj|8b8gZEEu1uS|qBaEJH6fOIblDstuA7)5Ii&E2XW5Bl zM9JtqEZwJzfE{vJ=@YI2;s=PaGpCC6kDqZQ2UGTJR_tqBo+ZkfTVJW}yFP8XU{D z*Xs=rSOg-c_cs>R;CI$~rhuU9u2~C6%Dwy0dKtk||Kd5?**NLJUiHCctGoSFj4k|r zRZ;S=TDPG5n|WD!r>*Uw^VH%5M2+9&C7VNiFmCt>Mk z$=|MZuL+JQ+4R}oMcDBMBY$zz&TbU`Ay5^F8P6G$&ECcsue8-?+A6XrK03fuB5HE+ zZoc!^Bo1*G4lAlU29m$?T`I%SPI(Wl49rbj_YXaeblSZ077qC!GIP# zi|JdHO?HDof^D%;c+=vuP7XqP9d~l3c=obe^3^WcM|D{?YYa8c8$>#q{EvsiNswmt7txAHMICA=Sc)(JXaCVmId z$H!TL^5lW%gL_wRzp^ju>8}^bL@A*+8>0?$XOj`Szxg|l!15R8h4^#;QW!VoQ$y{Z zWq6q8}+WhmrCtKN$ZeOm70eS zms88U5gtIQ6A$ZxuXNdfZ}`GB$D|d)vrCX@xS6A#>mt3xZgDlSZMBH2xzP z9fh6PTuOx<{|Kn-NiuiS%{Ls0Q%hUZm`1oH@vIAp{~o^GDfWSWxlAfz-Kx%xO%85U z!K}6FVOx2K*TDJ1vOVK>0eIJy_dnlqq|D7DT6z(#k5q{i7RzG9IlC745I@m8O0xoi z0eZW}mfG_`=avciMeVZ7|2d*Bgchv)h0{(a=?DMPA&#YR3WtgtTeW*b9=kV4uWzZ! zL`8(yK3N!@mj7gV;HW)mk=3x4piTApmq|dB2Le%^VMg;w^8I}HVnm~Z9u6iWCpXH~ zVS2ESc~gRTEp8Yl7DJ#ro=r81Jng2ARRy`cA3E_4B0(?&pxE%K8b0{5lGqUjYNxSd zA}x8uJf}l0Z!axJR0J)L>04h_bpzF8csDF=Q9kDXZ$SY_g$5ZAK_bFoA7Aw z9)*VrX>>A6SE57TxYLy`CoxHymbfv>j=g7De?KNs=aFO01=sfv-rfQ6^P{O`ZoTCke(yv72rj1)J;V!MO_!rFaTuFUGY z4ov^Z-Zk9!a&uXQmy4schGO2=7JHKpW9=ntaR=KVkS)B=;J8F8>FJz$!_yU^7h{yK>gKBnZmhNbmm7*Ccjk z1ooM%RSBq+o6mLeNleBUc)22XJ=$08;gDDT#3Ka##t3n&Du2&6cr|7t70NTxQrZq6=6D10TXgn03=sP&|Rp{5ekR0Vyg9142&l*vXx zbzt^=2c@gev{W98rn$ge zMUy%IsFdxvd_*1aFEGwA!A^?q7k_wUwQbX>+VB=!G^SQ#LV{9|^#x|roe{Ac>ITr) z5C*CrRhn%{SPeK#)_GN^XvIU*0)_mQK~ z^~Y8h@tDDXJcoGZ6SC>I-ua(Yyw|2QU=DMev-7K6nn!RV(P{o}Uf{Mn`vk}Q_OKMc zFP`?F9f?ffyh-i&IX&Seu&)fMYT(V6Yx7i>)kS3cSPHWiEA}iaAz1#if64Q}2udSS z+xZv0@V{~+n{a|nvP9wj+LD~C@w*!L^EmA#`?|3a{*Uf%Yvcl_n8L<(b1c?>bkf%c zwLwEgs&~FUC8?J5AAjP9Ks-S_RvMQ}oPE8@4M9TE(jC(7G}-cD=E&A#A@sX6-0SWQ zYpGcO(`!L!RMYvaF?>4O>VN&)f3E4>VED14W^OJS8%qBJ@Bj04T?%MM9u+MVRR33B z|Ftpy_f!9uhyVY_2AG$`-?{S)4EMi{m-jlJqa*a z&}8GF{U4kApBk=20t4IA`Q4I_dj7wL<>P~nwC`zxibHkPKgRTbUngE*fF`C%w^sfC z@U#Ev?dexvjJ4CLsS?Wn%)@_VI4Zq=p;>qJ;{Sftzb;Q3Cdr37t64eW#u5@#3VW_} zsOJ)1*T7`V4dPBp^lL6QyRx3oE~V>Pae3v-ggV5n)E7m$YSQi^q2i1FR^w$Jk~urs zrNXh^jMcw^na!fv6}UF#f5Y&hBlX8S{W-M=LVpS6XkR6e;xZU?S4CEO zPU}S9fpwt-X8AM>A_Jbb$kj}6BA70$Ke_Z=hNo2>m%6@9JE^r_abA`M5wUS0@Cjf4 zc+uTVi{vo4O5frnT*fqb6k%8WvkBwisu@Y@&pB5P4Z?g59|%VWv+ zI6T8ZVpr92)deN7`;oX*&NcMr*75DHydRs5ckouLsGgy;!_etj2Gp@u9UiSSVP{s3 zSjF8rEFO1UK-V9`+`fiYX!;d_^zF+Nc9er=ae<&B7PU%8HvXk1#I@9hu)j5iJ&o+q zflYbNhX^lPGM%6;&2*@`C)~lPiC?4GNpz0hSOrg3$JtiM6n^Sqfo#VMWU%5b1Nh6=`?w2?Gofkxs*#Lc)i?kt}^ev(kuw zFR)5_91u#HJL9e^(+c3#)1&441yC*TIjc*|&hA8yaHleQb)zl&Tb8xB=|0MN8E4Zu z2~2hnyZrCu`ocWUI%l@ANPi8po9ui#288!MQxbG~wK9C~j)8ArU>dqa(0>p+)4vIz znHdxs-xFE@ZK)+j%#VAjG+kv>-nQ({V8;O6W8du74%lb$V!)6N|I&FY?WsxvPXV~~ zn16$B_cB4Gu}Gd@wSR>3bkF~^6JI+DC}K&!Y_$U^FCG1|Ucb8;Al8>j<^uPcJkJ|B zWRS77RWdB@W6Gb7hx&61S5;4Lh5phB5HOzB>gmd+Y7+n5w6ELMQH#@_y}YXc@`k`| z_3%|8etcXjq$C+id4}Djq^!B!2P`@&J&<~KwMXk{>L$f4(v2f|yVa~zT=EhqK*Om~2j}?kz z?6_x16X2{S@U6}PQkAeh^ccu!+~y!M*cyfEV`Zq@5;{?Yle z-g_(%#q)ZDJ@u1hXIKB3SPNCB6#w5Kce%UB7TleazhQT)_YjRYv!k7}X+4DOXjvQX z>P9Lz~2%dOoZo4l|PjZ?OY9@rrbaU~!Cuzv;UW-pUK|^_oBpQ#< zcb#I$>Y_=`mQ1`YW@*IKl(mcdyzp{)PcuAw%DpEKQB1L12P+k*P5x;mNw;{)k6pB& z;;Af{IjD~Yivyewmudx06LX8W6{7HhUwvgFR?tqPfR#OSsd3VdDZ=p)>E4OBr}LAn zj?pRH0f8t)ixD(lT3v%_3f_=HVvKEkY6$7t2G8lA!j-#=(in+B38U^%K4Wyk!WAa% z&gqx*f9+xF{&<|{z=9)jeaF*rwPZJ@Wam(bPooy=Scg8hl@!R8mJuVO`xCtuRN(2A zgRYTqd0D8awn@-oUQ_Bd^GDOONXT4+m3kE1@Q$0UO4u1Vz$L_*3IX84VExW{4!DW(Bs<9))-b9FIq>BjhcLs8i#wV6V~Y_=|GD@lU(qD7G>?gA0Vj4{7!^1&=DP7 zZW2=kahb>03U_yryopCe+baxA$mYBWM;;pV>B%v;-1)*KA==5Zz&gZ2H>guDQS%Er7&G{5i7iS$hT&%3^j<_YIzI zPVE}8l2~&L0vdXu7~3Co>Wg*@14YYZ&Uw!d1wkE(SBLi4T}`rtj^ppV4>=e2mnWqJ z7gU;-)1IL=aiHiFS}XCt-WKvG@foc^(+M&6Vj^QWwt5q5^w^IQtvcGI(ErG#2CmY~ z?LSqz)=+6=w1tD2ZtB)02|Qke{Qxtp%I!9PIdZAX~1h(2_xl)h5f5rT=gP(oTd=4mM4RZyf}S zAGr|6bLMUxV8m^ocr6OwSa61vc!+1b-Yjyj8cS#PGQ2s_Krf5(`9GJexe~xLp&}W@ zP9mS45<(a#_i2TFrDxWCn%Dt<=(VD)=x4_eNLG(4>B~j-IV%Ch^XAnY zuK~lC2@XTQ!7<2O%|QqiQt@+Rg#3}C0 zYv&ZwFYNJA6shl5o+|l3T@-!Hr4~=D?_fzWWH_8l11+O6HY9`B-?3$iBYraF3(ON4Kv` zr&2}>K^o zHNFLdFa^#)!*{x1%FOZ4u*sCKG0vSFU?T>)>9X~bQ~lTYuB)SJ4*~bbEs$m` z7~eZhR~}csS9aj&t8Mxn#j8TN-jfxzft)R>GT@vzJ@3}m|BR?1u3LXAW7_g46pCW# zUYxZMR9k~##dFB85h@)?0xflO$XOz&r_P`&Ts+b}nCRMbW)M z(>;)?p7GHdHH^>eCQEW%-Cu{t4;26E_7BzRSb4d|EUv3T?IB)Xq`j=%9QvlOj>&w9 zj)%M2uM3{h8~4RuK?Rp+$eiy{7&3&s;o-$#Iq_QrL5B=B!Q)@SEq=C~cBRL*EOPs| zZReDy{vD1v^hd5Er|l&$+i@3yPJi=MJq=DBuOr;;IhpSQ^SK$t@rZmCkQOho=KYe` zv-)2Fi4<%Ii#ZublXuq7mKXBdY0W<~d)gZA7fd*L8KH<3!e7XqxX!K}ymD|dY(#N! zHC@ig`M`vli+wlcOxWiXNAodsTrNmZ28mh9iCklc6-C1a_P zbe}iqJq!6;hPL+WZOZ!P+WD}$MIi=waOE+V*IvKW-t3R)d1+7SqV}iis)Tn>PxpPl zJ!jf1Fyhi#BI&L|?U9q9J-eGr97`h0L}gqJ9`J{#$zu9V>yksFN8^2A$o<2@+^$+N zkYpIM2U$XO`}nW_5$T!Oi=Bd{OlJ~{8sXFen)dm`Z`W6|9jY(x{f}BL^)$E!1nRyX zBt{K8*VGJ?w9DH(E*+3`ZZ4EDhB})r+eYW-i{;3^U$8KdLm3OMhfoc@2GvVeT@l&` zu!4uVJlF!B%G7Q!4s^e8G7?X)b_~x zxKzAv#SapA4}gJ0RE)&h^@r5sG5F=qt&Q%MCSWCq%C6$##)u`G^7^ZO&66jt)a?l$ zTg|r3{;Zwn<-SrC9y4cp!Eb9A>>5IkweBTZlON}Z7^sQa}i*sAIi+0S* z(oSUehX1Fw>kMjYYu5;(f`Ig9B+|PQlwLv;r1yI07=Z|c-ULGL5D-M93P_QzNGJ5( zQAp$nN^b!nQUZo@<56esoO^$LGvA*5Z_k>&)|&mU_gT;L>~}T86Y&OP64kphBuO37 z3S7ESt(tH7b8em&lA%M%28k_7UvKOh*;2xt1@O?i@>VzvU-j*yS^n4d+k;-wg*T2W{FgJL#dNm@rE_`F%- zF}@=Sn1qSS$`Y7M!PO3$%!88-fueY4SsO^IA4MGn9L4tqU8M01{}TkU7d2ncObekhuMx0oR&(y^(8DoUpzB+&@y|kX=5( z-Z37@*4Mh%iuT>~sVB8VA@T?0_Scsd{=#5m&^k`*c->3{nYMiIR)cUefHvyVnYXUA zIELt@&9j6JF8#py$W*uQw}3M@unxr}HXXDz)l99g+bBYwOeH?HVbYH6Rz{S>7nRie ziVWhQjmm!-mwry?=u{qUvcm z-8<#{J)0_U)`xqdN=8Azf|8hMYxYmR$i|WbR&S!7wp?B=YVxPIsoFctL+(O-n+9k6D6DBK+D3=n^sUu+SczjwBWB33k; zX|Bj0TghU=N@>(wZ^BEvHa^xM&J2W?`p9O-j@xz6Hw~Lx;nTr{>LGM$K5_#?vhFtu zWah!+*IG2d66}RQ+S1j<9qo#}EgtRymN#`w;uzCbLz#fB^>fHMpHMsIaz^!;;JKlX zw<4LpfJ2#A9rW&If1J$DdiRwv75luvFPk_Im%#n?c!7j7%g^}c{&jr}UaH7yrU8W9 z^V>h4JV2@xW2betvS?WGHp_{-u20By3N2KZjbSh3A+1&+9&CqcFB}{FFC@|8WsWe4 zT11nw51-c4KK&9tb)ggQ*_PH77?p>%teM=1x9{5Z=wXqnFL_+LB=3_#Kcna`F zFT{|B#KRw)7_OT*$-qTD@?jBX;==YGzA$s&YRx3G(lPcV<;9#DpP1l;pxG)VY7XVU z8|`FQF`Ge#C`MN8sV z*jRG0b+j{myNK@*q$buxmX2brYBsRtuJO~Ep~0+-AI3*s)#-9r@1&U!wQsu>YbQ^k zrE8$G1Ffs`U_UL*m2zhh&exlGP1EPNWWiAi=`=$69e+LX?l+JBhh`&aZ_+uTn9ND4 zyUC>gn=t#s%KH(t#lt(Wj9>ocKSbM4&T9ce2oWSjPTy}$=?^nTXzBlgASdpAf9(8k zZj7ML5wv3TNT)gA--KF9=zsN9-fF}W6)IN)q6k?F% zfxy$OVt*vxt@6?=lZgYL;j)isvN0Y=FK)Yk%oKzXPZkrzWPWEpihueZYD3%P?S4}{ zN1_0(#~O{hOdQceO#@XmFmjJUqaO?OzxeFe16A+cNR8W=TIBY%z9b${F1Y%|(g7pO zZYKJB{N9Q`c|_YHJ+7as!5%NMqj+_vr+%PuQ!VyZoB93g`zl|n8qM5`P{f+&cTRCH z9BCw8gP{}oYRgddjj!lfr(BnC??zOWIZa zsGEIK#~SI@*(@S);XzV<*W3BtE_ILV)=F1g7NhLixHUuKBXP%e`E&mA`?6pH9D!=U z+?Q^@?tiHlP(`Hq!m*3-gOjm%La|ia59;%G@-~K|c&-~t_GTpIBjkIFEola3!qxGj z*49S1HEN90KKm9?e{NV?idUcM4lf@GFsolKRQ{&PUEIV1h|W>Ql_ug4w%e*7s7jKid!8;G&HLvIWEshZZzZps{|D~_O9GFgW2v*DG8r1BG*@x z!x`jg@sc0Be)%;+FQ3K{A{-9YMiGpfglW7T*1L(YTeJ-h_4uGXvDo~EjHB4;y>>Xa zFW`KK7u-pw{-cQBe^2OiUpFL?f2yUy?*z*43lu^da6caHs?!}+dN)K~aYF2yQ>mgH zk!P18M&7fFWb#934752ryxohzv1_Co955%EwqH;i1gebG(BOv)6l=vo=T(Y>GpdC?yR{cDc_AZt}{O4J>QO0JyhL0gt_k>s{`gCmLBsK5j`}5rK4Rf-K;4oi6T-|-+*L*=5M09 zlFlw!d6I7Q22Zzpus;Nw^e8Fxqpj4aS$hALTVu0IO}=%HQC~)4xqn89a1u?D43Wfy z%fL;zCznD}+T2J(Xwcm7QRTE30v~vED`Bcc z2v8x|AARePk0#-*lrx35&!>8Nwm+r>@J|QKjjBQEpTQc6pspp2r^T>mrc~VSL$uhl1dy{o^Y-e;68H18xw&{3C%|C5Ki^W>nh5BLvub*-LhB_|xwG{2 zT)N)6wo=OuMsEj`W)(`T=F!tvehMojqPsFDaov!%PgF3zh9+Id^}(g{gDL4b^n^T* zh2`zPMs)4Yl4+Zv;o5cgF`fe4&GlUdO{DOXHfDVYFNCeuslQA8A>BO6s;Mc2G~@X} z*H~qpr@7IJ;t(^c6p($#N@)`-_lf=s5OOuSpmsR&*x5Prc3>5`$^2d=Q1UEEbDJ`! zIkG|qZEJ2U3Pr388q=kpilH$FnQj;|t}nUCot3cOsf6hR~NQ^^iJe&J2!b@eFmdmGRUJsXXXc3R_;&3T5up{~eP_IAWz$6?0 z-kRrr?7eb1{PUUn)Db0P0|&AGvLINbi7IwH>Dd2sfU6UBPYPG)hp@{85$X%IXraF6 zim;H7xQNW;^p#flE#PywgCv6XWEVif8E*8F7v4eVoDyH`j&N{|oyoV~n+tz}7QV(5khL*rA%5}_=5Vs9FdA?YpjB&9 zFh?6}y{J7s8*O1^g6igBE98_#T=Nsm=;E{(hOrU~Fq;I@=5I?U?~@j1scpc7&p~Xl zu&ieTf4T|2%nGi@>*ku#{jxoPd2Ol4l;2-n6_J$HGI{SR6)7p~HSM{*2GTnGQM<8a zQLGTdY2kM~CtH}~tmhTkN8*#DLCYF#{mnkEPsTEXt0mW6knP)+xoJjJn-_#5%@Zm{ ziqqdj-rCms6(tMG1mWzkpue!}S0AimL(QB7t7AXhyct!-D9`y9RKQ&wVAJ>DK}-F$ zr$`eHycooa(a~W{Xu_H)n)KN(psFzN)5(ENG$;N)NK}EB_LWzKPUJ-4|AUtE2tYd7 pag>&j_WpHCrxG6h2dz3UuJ+3l>#Iy}2@nx3^#?jC)k?PE{{sMLEXV)= literal 0 HcmV?d00001 diff --git a/docs/reference/search/aggregations/reducers/images/double_prediction_global.png b/docs/reference/search/aggregations/reducers/images/double_prediction_global.png new file mode 100644 index 0000000000000000000000000000000000000000..faee6d22bc2b8db0aee280859181b0abcc2b2673 GIT binary patch literal 71898 zcmce+V|->yvpyViV%rnj=ESyb+nm_8or!HtJaHzrZQ~C9+2=gZJ{#}n_x`O9>t5B> zUDaKBdn|u<2lRcn0R5lks_#W4O?PnZnm@-X~-ttTdQ!Vn$vVMz?`;u%dP82K>A*v)4 zl4Lw8AAu_LLpem+1EKu#^26uj@|N$?H^=AVu)1quCAaHw#Vgl^9tgLxC@u~Nl@=&K z(7@OV)J9i~+)Q^(3kVq7AKXwNeh(kJz{J7==6$%HCCHp)KX@Re=0oH1L+gl#M|d6B zp9bmd0O@rB8zDz$@L#@c^xZnkp&`N_+^JbbU{G-KJMyZe<<@A3!hdolH95u z5Lnj-|3F+r+%vTe(Dwx6zrJ5u*z6$1gaA5%DG;q5=Mi9w792hA7 z)+(|v1Mi7jOX67fz}kd1=8*>K(0xw+80!*JZ$p30B(1ZH1~1I*o7g1&-}^x`M%uG!?ilAd7oq z5n0V&p9!6=J5oA|JYsTYZHIdUd4uPTeGeuw|X;Ksm_$0!YJY>BrrXd_>MTtHMq zFUDr}y$vYscwE)H`*bq%K@r4G{nQ&~-vM0DKC!}R!U)2sz`(*FL{LOHM<7RdM&L(K zMzDh538v-A){;FT#h^GsCkMIq;q)O$N{;FzQ6;G+btgF_wI#JsAx}n@1pyrx2MVQqR9ejUSQ#9e)0QmdVEH48fNBXI>Fq?;@14sJk$c?81u;Zh~^07SawFI)Ju|G zIJOX@V6pHqPp*i*#I873-yiD<%NcVG3mH=qa|Npevo5MHN;Jwh$|8y#s{`vW{V07e z{VW}xd7qKj7~3o)Jvc2o4K!^w{gMHJv8LX>&b-dOzNl8Y{;q+h!Lin|-m|`>UbPOs zzPkRp{bZ$s*EDtTmS_#p{BN)5UlkR6>J&Ls~4 zc`lJ2$0l00ZFiGSU&op2;AQy=>!{q8+QH+_)Q;VDF2)uT0}2O~JP`x2I8hnR5sABQ zt+2g@xcZ3TQIUM~bQD8HZM=P>eee|yQbAbA2vIz7Jg`D#S$$cLd4PEhrlhpsxEPIG zr5G+*HN^$BF5yNCPJ@<-+w?`jdOxx=_}cDD$ExcQy6kI zO;k42WYk{~$q@~bAd=OR&yxM4QKKoNrAfJE`pS(eF+W(!D$A(KLw;;1yp?~=Kd5ol zu`!Lk!k^+L^ZGjH_9JnwrY!byfBm%m@n*vTjt?$DG=9iS##aVmLUF=^R)Lnk3P)3~ zvAF5i%41Wii?WNMbB2q74(Ph_x-G9KFLzg@r_oEtUDrMJ-60(6ml#YI9K$a%Uzopy zMz=<-MUO@wL?=fXMXgGgNn16F&x%M+O5Fj)loFFuW z@B@F8k427+C^bYVA0TYij~TC)U2PwKg|dA~{2;pb?KP?~w$m%^!hU+Ynxd{OsqEPN z?WjU*T5K)05qF>4^Yy3$vnq|cPQFshdW`YiD15kN=gZD$3`z`uzz?4u&yUWR=qFJ= zUpzG?DR}xK$!M~1-H}@`3+mc`@dlY#Uxo3-J9e*##wdJ1h&^mEF z-W_1oFs~m+9H&kiPC;bjjZTkHaErOoy{w6pXqdX7+o@VE2tE8fE9Jc53_gTD?4OD~ z%s5!kMQpWy=zIB=8yV=a;-T`K^-TFZ296K@mdZk}g6@b`f>y2j(Dix~@iX~^IKLpv z(Aq)dIusd@5K}R#HYT+!H7m(p4X%Sj-#}+Ytwu%PGwH$G1)_qa%jng=FM27np4pU) zoUB%!R`J_5Kx?~&Z^N~V*LiqPc1&l#Bl!{Ra(~mTHCGc(MNDOC@o6qI*i#48kgXO6$yM^D4 z`iFy6sH)g3$~`%poJ#J33zgdfkCM0QdhxW&M_b=5U_D}eo>!8$sf()X-pA_&8^HQP z&^7II?cQs?wVd~?zZthaJ!X))eVvL=Z>9_0`ND?GSpUFv0e2vCLUC?BiFv3#qczi_ zvKz6BuzB2l>S=fTejfxFDE&44>;A_8!TLq0eItz~T?{QaT@`KVyD>ncxuElR-AOH1 z7kXQa*3>%UdfA7`&FO^c)`2Nhxb%uInty!6%?-#FfE6fA7Knk+12@2v{NqJO-<T;a*^g86gCo)-UaSZ#^0Yzq0`<%#3l3Zk zsaD^svu&9@uSq#_eOhFz{M1Wje49a#hUl) z_yz4Zx@ys=6RVnb&R6Xbo)<4>aF%exQA8-fN}$aAx6YAXDfCsoD-5fn%Nk3NTv@J_ z&YlOawws+}H}6e>y~zTGPKox5H?E zvpJufA0IR4&i3T$jH`3Cq_w3rWd!m~C3OJ4|NQpYY4qF%o}8KildFT@%B0Am%cE5< z52I&9g+jg9eW8ftL2`PKJ=$Gv*K&L@vfk8&;rU_h;vwU6Uf90yasY{deZgO8Nn=N4 zOXr@np0MKCI8n2HP-h5fH9VSlUCNsso7?2d>$x$lGoQGMvr;-Mcjk9ZY}acgZve&2 z!lV7bw3B=Qw-nW*9C$Kv@yq{}l?iv}j56oaxNBOsiiX}C++lqR-(2Hn@##c>K6Le%Nrd8et zMp;QZ&I=zNhfcMp4A-s$J=7x99VAsmMr0SXNaU$CAFTo@M^S9re)0~nNew<}BAGgo zAF|JFqh9mDyy`A7T};@f8NiyyD=p5RF8rsvaIIfVqVJ;4rPpKv$Kf-FHA&TTY9;Gs zn*X4aHr+NE#Giptg`0}|%(Sv{KG z?$aP|;2;Td%xP>a&27D%yY{WEq_vE_)^_h=zLO8i(Yxe$pl_JmY8T+F^Zs@!Wv`m5 z**po}@ZZRrk6X2W8XHg}7c