Save memory when histogram agg is not on top (backport of #57277) (#57377)

This saves some memory when the `histogram` aggregation is not a top
level aggregation by dropping `asMultiBucketAggregator` in favor of
natively implementing multi-bucket storage in the aggregator. For the
most part this just uses the `LongKeyedBucketOrds` that we built the
first time we did this.
This commit is contained in:
Nik Everett 2020-05-29 15:07:37 -04:00 committed by GitHub
parent b15a304155
commit 4263c25b2f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 465 additions and 288 deletions

View File

@ -492,7 +492,65 @@ setup:
- match: { aggregations.histo.buckets.0.doc_count: 1 } - match: { aggregations.histo.buckets.0.doc_count: 1 }
--- ---
"profiler": "histogram profiler":
- skip:
version: " - 7.8.99"
reason: debug info added in 7.9.0
- do:
indices.create:
index: test_2
body:
settings:
number_of_replicas: 0
number_of_shards: 1
mappings:
properties:
n:
type: long
- do:
bulk:
index: test_2
refresh: true
body:
- '{"index": {}}'
- '{"n": "1"}'
- '{"index": {}}'
- '{"n": "2"}'
- '{"index": {}}'
- '{"n": "10"}'
- '{"index": {}}'
- '{"n": "17"}'
- do:
search:
index: test_2
body:
size: 0
profile: true
aggs:
histo:
histogram:
field: n
interval: 5
- match: { hits.total.value: 4 }
- length: { aggregations.histo.buckets: 4 }
- match: { aggregations.histo.buckets.0.key: 0 }
- match: { aggregations.histo.buckets.0.doc_count: 2 }
- match: { aggregations.histo.buckets.1.key: 5 }
- match: { aggregations.histo.buckets.1.doc_count: 0 }
- match: { aggregations.histo.buckets.2.key: 10 }
- match: { aggregations.histo.buckets.2.doc_count: 1 }
- match: { aggregations.histo.buckets.3.key: 15 }
- match: { aggregations.histo.buckets.3.doc_count: 1 }
- match: { profile.shards.0.aggregations.0.type: NumericHistogramAggregator }
- match: { profile.shards.0.aggregations.0.description: histo }
- match: { profile.shards.0.aggregations.0.breakdown.collect_count: 4 }
- match: { profile.shards.0.aggregations.0.debug.total_buckets: 3 }
---
"date_histogram profiler":
- skip: - skip:
version: " - 7.8.99" version: " - 7.8.99"
reason: debug info added in 7.9.0 reason: debug info added in 7.9.0

View File

@ -0,0 +1,124 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo;
import org.elasticsearch.search.aggregations.bucket.terms.LongKeyedBucketOrds;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.function.BiConsumer;
/**
* Base class for functionality shared between aggregators for this
* {@code histogram} aggregation.
*/
public abstract class AbstractHistogramAggregator extends BucketsAggregator {
protected final DocValueFormat formatter;
protected final double interval;
protected final double offset;
protected final BucketOrder order;
protected final boolean keyed;
protected final long minDocCount;
protected final double minBound;
protected final double maxBound;
protected final LongKeyedBucketOrds bucketOrds;
public AbstractHistogramAggregator(
String name,
AggregatorFactories factories,
double interval,
double offset,
BucketOrder order,
boolean keyed,
long minDocCount,
double minBound,
double maxBound,
DocValueFormat formatter,
SearchContext context,
Aggregator parent,
boolean collectsFromSingleBucket,
Map<String, Object> metadata
) throws IOException {
super(name, factories, context, parent, metadata);
if (interval <= 0) {
throw new IllegalArgumentException("interval must be positive, got: " + interval);
}
this.interval = interval;
this.offset = offset;
this.order = order;
order.validate(this);
this.keyed = keyed;
this.minDocCount = minDocCount;
this.minBound = minBound;
this.maxBound = maxBound;
this.formatter = formatter;
bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), collectsFromSingleBucket);
}
@Override
public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds,
(bucketValue, docCount, subAggregationResults) -> {
double roundKey = Double.longBitsToDouble(bucketValue);
double key = roundKey * interval + offset;
return new InternalHistogram.Bucket(key, docCount, keyed, formatter, subAggregationResults);
}, buckets -> {
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator());
EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
});
}
@Override
public InternalAggregation buildEmptyAggregation() {
InternalHistogram.EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
}
@Override
public void doClose() {
Releasables.close(bucketOrds);
}
@Override
public void collectDebugInfo(BiConsumer<String, Object> add) {
add.accept("total_buckets", bucketOrds.size());
super.collectDebugInfo(add);
}
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.search.aggregations.bucket.histogram; package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.collect.List;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
@ -36,8 +36,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map; import java.util.Map;
/** /**
@ -54,40 +52,11 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
static void registerAggregators(ValuesSourceRegistry.Builder builder) { static void registerAggregators(ValuesSourceRegistry.Builder builder) {
builder.register(HistogramAggregationBuilder.NAME, CoreValuesSourceType.RANGE, builder.register(HistogramAggregationBuilder.NAME, CoreValuesSourceType.RANGE,
new HistogramAggregatorSupplier() { (HistogramAggregatorSupplier) RangeHistogramAggregator::new);
@Override
public Aggregator build(String name, AggregatorFactories factories, double interval, double offset,
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
ValuesSource valuesSource, DocValueFormat formatter, SearchContext context,
Aggregator parent,
Map<String, Object> metadata) throws IOException {
ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource;
if (rangeValueSource.rangeType().isNumeric() == false) {
throw new IllegalArgumentException("Expected numeric range type but found non-numeric range ["
+ rangeValueSource.rangeType().name + "]");
}
return new RangeHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound,
maxBound, rangeValueSource, formatter, context, parent, metadata);
}
}
);
builder.register(HistogramAggregationBuilder.NAME, builder.register(HistogramAggregationBuilder.NAME,
Collections.unmodifiableList(Arrays.asList(CoreValuesSourceType.NUMERIC, List.of(CoreValuesSourceType.NUMERIC, CoreValuesSourceType.DATE, CoreValuesSourceType.BOOLEAN),
CoreValuesSourceType.DATE, (HistogramAggregatorSupplier) NumericHistogramAggregator::new);
CoreValuesSourceType.BOOLEAN)),
new HistogramAggregatorSupplier() {
@Override
public Aggregator build(String name, AggregatorFactories factories, double interval, double offset,
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
ValuesSource valuesSource, DocValueFormat formatter, SearchContext context,
Aggregator parent,
Map<String, Object> metadata) throws IOException {
return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound,
maxBound, (ValuesSource.Numeric) valuesSource, formatter, context, parent, metadata);
}
}
);
} }
public HistogramAggregatorFactory(String name, public HistogramAggregatorFactory(String name,
@ -123,10 +92,6 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
Aggregator parent, Aggregator parent,
boolean collectsFromSingleBucket, boolean collectsFromSingleBucket,
Map<String, Object> metadata) throws IOException { Map<String, Object> metadata) throws IOException {
if (collectsFromSingleBucket == false) {
return asMultiBucketAggregator(this, searchContext, parent);
}
AggregatorSupplier aggregatorSupplier = queryShardContext.getValuesSourceRegistry().getAggregator(config.valueSourceType(), AggregatorSupplier aggregatorSupplier = queryShardContext.getValuesSourceRegistry().getAggregator(config.valueSourceType(),
HistogramAggregationBuilder.NAME); HistogramAggregationBuilder.NAME);
if (aggregatorSupplier instanceof HistogramAggregatorSupplier == false) { if (aggregatorSupplier instanceof HistogramAggregatorSupplier == false) {
@ -135,7 +100,7 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
} }
HistogramAggregatorSupplier histogramAggregatorSupplier = (HistogramAggregatorSupplier) aggregatorSupplier; HistogramAggregatorSupplier histogramAggregatorSupplier = (HistogramAggregatorSupplier) aggregatorSupplier;
return histogramAggregatorSupplier.build(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, return histogramAggregatorSupplier.build(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound,
valuesSource, config.format(), searchContext, parent, metadata); valuesSource, config.format(), searchContext, parent, collectsFromSingleBucket, metadata);
} }
@Override @Override
@ -143,6 +108,6 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
Aggregator parent, Aggregator parent,
Map<String, Object> metadata) throws IOException { Map<String, Object> metadata) throws IOException {
return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound,
null, config.format(), searchContext, parent, metadata); null, config.format(), searchContext, parent, false, metadata);
} }
} }

View File

@ -21,27 +21,19 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo;
import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.Map; import java.util.Map;
import java.util.function.BiConsumer;
/** /**
* An aggregator for numeric values. For a given {@code interval}, * An aggregator for numeric values. For a given {@code interval},
@ -49,39 +41,43 @@ import java.util.function.BiConsumer;
* written as {@code interval * x + offset} and yet is less than or equal to * written as {@code interval * x + offset} and yet is less than or equal to
* {@code value}. * {@code value}.
*/ */
public class NumericHistogramAggregator extends BucketsAggregator { public class NumericHistogramAggregator extends AbstractHistogramAggregator {
private final ValuesSource.Numeric valuesSource; private final ValuesSource.Numeric valuesSource;
private final DocValueFormat formatter;
private final double interval, offset;
private final BucketOrder order;
private final boolean keyed;
private final long minDocCount;
private final double minBound, maxBound;
private final LongHash bucketOrds; public NumericHistogramAggregator(
String name,
public NumericHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, AggregatorFactories factories,
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, double interval,
@Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, double offset,
SearchContext context, Aggregator parent, Map<String, Object> metadata) throws IOException { BucketOrder order,
boolean keyed,
super(name, factories, context, parent, metadata); long minDocCount,
if (interval <= 0) { double minBound,
throw new IllegalArgumentException("interval must be positive, got: " + interval); double maxBound,
} @Nullable ValuesSource valuesSource,
this.interval = interval; DocValueFormat formatter,
this.offset = offset; SearchContext context,
this.order = order; Aggregator parent,
order.validate(this); boolean collectsFromSingleBucket,
this.keyed = keyed; Map<String, Object> metadata
this.minDocCount = minDocCount; ) throws IOException {
this.minBound = minBound; super(
this.maxBound = maxBound; name,
this.valuesSource = valuesSource; factories,
this.formatter = formatter; interval,
offset,
bucketOrds = new LongHash(1, context.bigArrays()); order,
keyed,
minDocCount,
minBound,
maxBound,
formatter,
context,
parent,
collectsFromSingleBucket,
metadata
);
this.valuesSource = (ValuesSource.Numeric) valuesSource;
} }
@Override @Override
@ -102,8 +98,7 @@ public class NumericHistogramAggregator extends BucketsAggregator {
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) { return new LeafBucketCollectorBase(sub, values) {
@Override @Override
public void collect(int doc, long bucket) throws IOException { public void collect(int doc, long owningBucketOrd) throws IOException {
assert bucket == 0;
if (values.advanceExact(doc)) { if (values.advanceExact(doc)) {
final int valuesCount = values.docValueCount(); final int valuesCount = values.docValueCount();
@ -115,7 +110,7 @@ public class NumericHistogramAggregator extends BucketsAggregator {
if (key == previousKey) { if (key == previousKey) {
continue; continue;
} }
long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key)); long bucketOrd = bucketOrds.add(owningBucketOrd, Double.doubleToLongBits(key));
if (bucketOrd < 0) { // already seen if (bucketOrd < 0) { // already seen
bucketOrd = -1 - bucketOrd; bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd); collectExistingBucket(sub, doc, bucketOrd);
@ -128,43 +123,4 @@ public class NumericHistogramAggregator extends BucketsAggregator {
} }
}; };
} }
@Override
public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds,
(bucketValue, docCount, subAggregationResults) -> {
double roundKey = Double.longBitsToDouble(bucketValue);
double key = roundKey * interval + offset;
return new InternalHistogram.Bucket(key, docCount, keyed, formatter, subAggregationResults);
}, buckets -> {
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator());
EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
});
}
@Override
public InternalAggregation buildEmptyAggregation() {
EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
}
@Override
public void doClose() {
Releasables.close(bucketOrds);
}
@Override
public void collectDebugInfo(BiConsumer<String, Object> add) {
add.accept("total_buckets", bucketOrds.size());
super.collectDebugInfo(add);
}
} }

View File

@ -21,10 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeFieldMapper;
import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.RangeType;
@ -32,51 +29,57 @@ import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo;
import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
public class RangeHistogramAggregator extends BucketsAggregator { public class RangeHistogramAggregator extends AbstractHistogramAggregator {
private final ValuesSource.Range valuesSource; private final ValuesSource.Range valuesSource;
private final DocValueFormat formatter;
private final double interval, offset;
private final BucketOrder order;
private final boolean keyed;
private final long minDocCount;
private final double minBound, maxBound;
private final LongHash bucketOrds; public RangeHistogramAggregator(
String name,
public RangeHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset, AggregatorFactories factories,
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, double interval,
@Nullable ValuesSource.Range valuesSource, DocValueFormat formatter, double offset,
SearchContext context, Aggregator parent, Map<String, Object> metadata) throws IOException { BucketOrder order,
boolean keyed,
super(name, factories, context, parent, metadata); long minDocCount,
if (interval <= 0) { double minBound,
throw new IllegalArgumentException("interval must be positive, got: " + interval); double maxBound,
@Nullable ValuesSource valuesSource,
DocValueFormat formatter,
SearchContext context,
Aggregator parent,
boolean collectsFromSingleBucket,
Map<String, Object> metadata
) throws IOException {
super(
name,
factories,
interval,
offset,
order,
keyed,
minDocCount,
minBound,
maxBound,
formatter,
context,
parent,
collectsFromSingleBucket,
metadata
);
this.valuesSource = (ValuesSource.Range) valuesSource;
if (this.valuesSource.rangeType().isNumeric() == false) {
throw new IllegalArgumentException(
"Expected numeric range type but found non-numeric range [" + this.valuesSource.rangeType().name + "]"
);
} }
this.interval = interval;
this.offset = offset;
this.order = order;
order.validate(this);
this.keyed = keyed;
this.minDocCount = minDocCount;
this.minBound = minBound;
this.maxBound = maxBound;
this.valuesSource = valuesSource;
this.formatter = formatter;
bucketOrds = new LongHash(1, context.bigArrays());
} }
@Override @Override
@ -88,8 +91,7 @@ public class RangeHistogramAggregator extends BucketsAggregator {
final RangeType rangeType = valuesSource.rangeType(); final RangeType rangeType = valuesSource.rangeType();
return new LeafBucketCollectorBase(sub, values) { return new LeafBucketCollectorBase(sub, values) {
@Override @Override
public void collect(int doc, long bucket) throws IOException { public void collect(int doc, long owningBucketOrd) throws IOException {
assert bucket == 0;
if (values.advanceExact(doc)) { if (values.advanceExact(doc)) {
// Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc // Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc
// values, so it isn't clear what we'd be iterating over. // values, so it isn't clear what we'd be iterating over.
@ -113,7 +115,7 @@ public class RangeHistogramAggregator extends BucketsAggregator {
continue; continue;
} }
// Bucket collection identical to NumericHistogramAggregator, could be refactored // Bucket collection identical to NumericHistogramAggregator, could be refactored
long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key)); long bucketOrd = bucketOrds.add(owningBucketOrd, Double.doubleToLongBits(key));
if (bucketOrd < 0) { // already seen if (bucketOrd < 0) { // already seen
bucketOrd = -1 - bucketOrd; bucketOrd = -1 - bucketOrd;
collectExistingBucket(sub, doc, bucketOrd); collectExistingBucket(sub, doc, bucketOrd);
@ -131,39 +133,4 @@ public class RangeHistogramAggregator extends BucketsAggregator {
} }
}; };
} }
// TODO: buildAggregations and buildEmptyAggregation are literally just copied out of NumericHistogramAggregator. We could refactor
// this to an abstract super class, if we wanted to. Might be overkill.
@Override
public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException {
return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds,
(bucketValue, docCount, subAggregationResults) -> {
double roundKey = Double.longBitsToDouble(bucketValue);
double key = roundKey * interval + offset;
return new InternalHistogram.Bucket(key, docCount, keyed, formatter, subAggregationResults);
}, buckets -> {
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator());
EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
});
}
@Override
public InternalAggregation buildEmptyAggregation() {
InternalHistogram.EmptyBucketInfo emptyBucketInfo = null;
if (minDocCount == 0) {
emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
}
return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, metadata());
}
@Override
public void doClose() {
Releasables.close(bucketOrds);
}
} }

View File

@ -29,9 +29,21 @@ import java.io.IOException;
import java.util.Map; import java.util.Map;
public interface HistogramAggregatorSupplier extends AggregatorSupplier { public interface HistogramAggregatorSupplier extends AggregatorSupplier {
Aggregator build(String name, AggregatorFactories factories, double interval, double offset, Aggregator build(
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, String name,
@Nullable ValuesSource valuesSource, DocValueFormat formatter, AggregatorFactories factories,
SearchContext context, Aggregator parent, double interval,
Map<String, Object> metadata) throws IOException; double offset,
BucketOrder order,
boolean keyed,
long minDocCount,
double minBound,
double maxBound,
@Nullable ValuesSource valuesSource,
DocValueFormat formatter,
SearchContext context,
Aggregator parent,
boolean collectsFromSingleBucket,
Map<String, Object> metadata
) throws IOException;
} }

View File

@ -23,23 +23,32 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.metrics.InternalMin;
import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.function.Consumer;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public class NumericHistogramAggregatorTests extends AggregatorTestCase { public class NumericHistogramAggregatorTests extends AggregatorTestCase {
@ -55,11 +64,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, longField("field"));
assertEquals(4, histogram.getBuckets().size()); assertEquals(4, histogram.getBuckets().size());
assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(-10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(2, histogram.getBuckets().get(0).getDocCount());
@ -86,11 +93,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, doubleField("field"));
assertEquals(4, histogram.getBuckets().size()); assertEquals(4, histogram.getBuckets().size());
assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(-10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(2, histogram.getBuckets().get(0).getDocCount());
@ -119,10 +124,7 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
"2019-11-10T22:55:46"); "2019-11-10T22:55:46");
String fieldName = "date_field"; String fieldName = "date_field";
DateFieldMapper.Builder builder = new DateFieldMapper.Builder(fieldName); DateFieldMapper.DateFieldType fieldType = dateField(fieldName, DateFieldMapper.Resolution.MILLISECONDS);
DateFieldMapper.DateFieldType fieldType = builder.fieldType();
fieldType.setName(fieldName);
fieldType.setHasDocValues(true);
try (Directory dir = newDirectory(); try (Directory dir = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), dir)) { RandomIndexWriter indexWriter = new RandomIndexWriter(random(), dir)) {
@ -161,11 +163,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(Math.PI); .interval(Math.PI);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, longField("field"));
assertEquals(4, histogram.getBuckets().size()); assertEquals(4, histogram.getBuckets().size());
assertEquals(-4 * Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(-4 * Math.PI, histogram.getBuckets().get(0).getKey());
assertEquals(1, histogram.getBuckets().get(0).getDocCount()); assertEquals(1, histogram.getBuckets().get(0).getDocCount());
@ -193,11 +193,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(10) .interval(10)
.minDocCount(2); .minDocCount(2);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, longField("field"));
assertEquals(2, histogram.getBuckets().size()); assertEquals(2, histogram.getBuckets().size());
assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(-10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(2, histogram.getBuckets().get(0).getDocCount());
@ -222,11 +220,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(5) .interval(5)
.missing(2d); .missing(2d);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, longField("field"));
assertEquals(4, histogram.getBuckets().size()); assertEquals(4, histogram.getBuckets().size());
assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(-10d, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(2, histogram.getBuckets().get(0).getDocCount());
@ -304,8 +300,7 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); MappedFieldType fieldType = keywordField("field");
fieldType.setName("field");
fieldType.setHasDocValues(true); fieldType.setHasDocValues(true);
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
@ -331,11 +326,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(5) .interval(5)
.offset(Math.PI); .offset(Math.PI);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, doubleField("field"));
assertEquals(3, histogram.getBuckets().size()); assertEquals(3, histogram.getBuckets().size());
assertEquals(-10 + Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(-10 + Math.PI, histogram.getBuckets().get(0).getKey());
assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(2, histogram.getBuckets().get(0).getDocCount());
@ -365,11 +358,9 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(interval) .interval(interval)
.offset(offset); .offset(offset);
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, doubleField("field"));
assertEquals(3, histogram.getBuckets().size()); assertEquals(3, histogram.getBuckets().size());
assertEquals(-10 + expectedOffset, histogram.getBuckets().get(0).getKey()); assertEquals(-10 + expectedOffset, histogram.getBuckets().get(0).getKey());
@ -403,7 +394,7 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
fieldType.setName("field"); fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, doubleField("field"));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-15d, histogram.getBuckets().get(0).getKey()); assertEquals(-15d, histogram.getBuckets().get(0).getKey());
assertEquals(0, histogram.getBuckets().get(0).getDocCount()); assertEquals(0, histogram.getBuckets().get(0).getDocCount());
@ -421,4 +412,37 @@ public class NumericHistogramAggregatorTests extends AggregatorTestCase {
} }
} }
} }
public void testAsSubAgg() throws IOException {
AggregationBuilder request = new HistogramAggregationBuilder("outer").field("outer").interval(5).subAggregation(
new HistogramAggregationBuilder("inner").field("inner").interval(5).subAggregation(
new MinAggregationBuilder("min").field("n")));
CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
List<List<IndexableField>> docs = new ArrayList<>();
for (int n = 0; n < 10000; n++) {
docs.add(org.elasticsearch.common.collect.List.of(
new SortedNumericDocValuesField("outer", n % 100),
new SortedNumericDocValuesField("inner", n / 100),
new SortedNumericDocValuesField("n", n)
));
}
iw.addDocuments(docs);
};
Consumer<InternalHistogram> verify = outer -> {
assertThat(outer.getBuckets(), hasSize(20));
for (int outerIdx = 0; outerIdx < 20; outerIdx++) {
InternalHistogram.Bucket outerBucket = outer.getBuckets().get(outerIdx);
assertThat(outerBucket.getKey(), equalTo(5.0 * outerIdx));
InternalHistogram inner = outerBucket.getAggregations().get("inner");
assertThat(inner.getBuckets(), hasSize(20));
for (int innerIdx = 0; innerIdx < 20; innerIdx++) {
InternalHistogram.Bucket innerBucket = inner.getBuckets().get(innerIdx);
assertThat(innerBucket.getKey(), equalTo(5.0 * innerIdx));
InternalMin min = innerBucket.getAggregations().get("min");
assertThat(min.getValue(), equalTo(outerIdx * 5.0 + innerIdx * 500.0));
}
}
};
testCase(request, new MatchAllDocsQuery(), buildIndex, verify, longField("outer"), longField("inner"), longField("n"));
}
} }

View File

@ -21,29 +21,35 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeFieldMapper;
import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.mapper.RangeType;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.junit.Rule; import org.elasticsearch.search.aggregations.metrics.InternalMin;
import org.junit.rules.ExpectedException; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Set;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public class RangeHistogramAggregatorTests extends AggregatorTestCase { public class RangeHistogramAggregatorTests extends AggregatorTestCase {
@Rule
public final ExpectedException expectedException = ExpectedException.none();
public void testDoubles() throws Exception { public void testDoubles() throws Exception {
RangeType rangeType = RangeType.DOUBLE; RangeType rangeType = RangeType.DOUBLE;
try (Directory dir = newDirectory(); try (Directory dir = newDirectory();
@ -63,12 +69,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-5d, histogram.getBuckets().get(0).getKey()); assertEquals(-5d, histogram.getBuckets().get(0).getKey());
@ -111,12 +114,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-5d, histogram.getBuckets().get(0).getKey()); assertEquals(-5d, histogram.getBuckets().get(0).getKey());
@ -157,12 +157,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-5d, histogram.getBuckets().get(0).getKey()); assertEquals(-5d, histogram.getBuckets().get(0).getKey());
@ -204,12 +201,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(3, histogram.getBuckets().size()); assertEquals(3, histogram.getBuckets().size());
assertEquals(0d, histogram.getBuckets().get(0).getKey()); assertEquals(0d, histogram.getBuckets().get(0).getKey());
@ -243,12 +237,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(Math.PI); .interval(Math.PI);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-1 * Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(-1 * Math.PI, histogram.getBuckets().get(0).getKey());
@ -292,12 +283,14 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(5) .interval(5)
.minDocCount(2); .minDocCount(2);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = searchAndReduce(
searcher,
new MatchAllDocsQuery(),
aggBuilder,
rangeField("field", rangeType)
);
assertEquals(2, histogram.getBuckets().size()); assertEquals(2, histogram.getBuckets().size());
assertEquals(5d, histogram.getBuckets().get(0).getKey()); assertEquals(5d, histogram.getBuckets().get(0).getKey());
@ -329,12 +322,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(5) .interval(5)
.offset(4); .offset(4);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
//assertEquals(7, histogram.getBuckets().size()); //assertEquals(7, histogram.getBuckets().size());
assertEquals(-6d, histogram.getBuckets().get(0).getKey()); assertEquals(-6d, histogram.getBuckets().get(0).getKey());
@ -387,12 +377,9 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
.field("field") .field("field")
.interval(interval) .interval(interval)
.offset(offset); .offset(offset);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType));
assertEquals(6, histogram.getBuckets().size()); assertEquals(6, histogram.getBuckets().size());
assertEquals(-5d + expectedOffset, histogram.getBuckets().get(0).getKey()); assertEquals(-5d + expectedOffset, histogram.getBuckets().get(0).getKey());
@ -431,16 +418,64 @@ public class RangeHistogramAggregatorTests extends AggregatorTestCase {
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg") HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
.field("field") .field("field")
.interval(5); .interval(5);
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
fieldType.setName("field");
try (IndexReader reader = w.getReader()) { try (IndexReader reader = w.getReader()) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
expectedException.expect(IllegalArgumentException.class); Exception e = expectThrows(IllegalArgumentException.class, () ->
search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); search(searcher, new MatchAllDocsQuery(), aggBuilder, rangeField("field", rangeType)));
assertThat(e.getMessage(), equalTo("Expected numeric range type but found non-numeric range [ip_range]"));
}
} }
} }
} public void testAsSubAgg() throws IOException {
AggregationBuilder request = new HistogramAggregationBuilder("outer").field("outer").interval(5).subAggregation(
new HistogramAggregationBuilder("inner").field("inner").interval(5).subAggregation(
new MinAggregationBuilder("min").field("n")));
CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
List<List<IndexableField>> docs = new ArrayList<>();
for (int n = 0; n < 10000; n++) {
BytesRef outerRange = RangeType.LONG.encodeRanges(org.elasticsearch.common.collect.Set.of(
new RangeFieldMapper.Range(RangeType.LONG, n % 100, n % 100 + 10, true, true)
));
BytesRef innerRange = RangeType.LONG.encodeRanges(org.elasticsearch.common.collect.Set.of(
new RangeFieldMapper.Range(RangeType.LONG, n / 100, n / 100 + 10, true, true)
));
docs.add(org.elasticsearch.common.collect.List.of(
new BinaryDocValuesField("outer", outerRange),
new BinaryDocValuesField("inner", innerRange),
new SortedNumericDocValuesField("n", n)
));
}
iw.addDocuments(docs);
};
Consumer<InternalHistogram> verify = outer -> {
assertThat(outer.getBuckets(), hasSize(22));
for (int outerIdx = 0; outerIdx < 22; outerIdx++) {
InternalHistogram.Bucket outerBucket = outer.getBuckets().get(outerIdx);
assertThat(outerBucket.getKey(), equalTo(5.0 * outerIdx));
InternalHistogram inner = outerBucket.getAggregations().get("inner");
assertThat(inner.getBuckets(), hasSize(22));
for (int innerIdx = 0; innerIdx < 22; innerIdx++) {
InternalHistogram.Bucket innerBucket = inner.getBuckets().get(innerIdx);
assertThat(innerBucket.getKey(), equalTo(5.0 * innerIdx));
InternalMin min = innerBucket.getAggregations().get("min");
int minOuterIdxWithOverlappingRange = Math.max(0, outerIdx - 2);
int minInnerIdxWithOverlappingRange = Math.max(0, innerIdx - 2);
assertThat(min.getValue(),
equalTo(minOuterIdxWithOverlappingRange * 5.0 + minInnerIdxWithOverlappingRange * 500.0));
}
}
};
testCase(
request,
new MatchAllDocsQuery(),
buildIndex,
verify,
rangeField("outer", RangeType.LONG),
rangeField("inner", RangeType.LONG),
longField("n")
);
}
} }

View File

@ -416,12 +416,6 @@ public class SignificantTermsAggregatorTests extends AggregatorTestCase {
} }
} }
private NumberFieldMapper.NumberFieldType longField(String name) {
NumberFieldMapper.NumberFieldType type = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
type.setName(name);
return type;
}
private void addMixedTextDocs(TextFieldType textFieldType, IndexWriter w) throws IOException { private void addMixedTextDocs(TextFieldType textFieldType, IndexWriter w) throws IOException {
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
Document doc = new Document(); Document doc = new Document();

View File

@ -1347,12 +1347,6 @@ public class TermsAggregatorTests extends AggregatorTestCase {
} }
} }
private NumberFieldMapper.NumberFieldType longField(String name) {
NumberFieldMapper.NumberFieldType type = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
type.setName(name);
return type;
}
private void assertNestedTopHitsScore(InternalMultiBucketAggregation<?, ?> terms, boolean withScore) { private void assertNestedTopHitsScore(InternalMultiBucketAggregation<?, ?> terms, boolean withScore) {
assertThat(terms.getBuckets().size(), equalTo(9)); assertThat(terms.getBuckets().size(), equalTo(9));
int ptr = 9; int ptr = 9;

View File

@ -73,9 +73,11 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.BinaryFieldMapper;
import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.FieldAliasMapper; import org.elasticsearch.index.mapper.FieldAliasMapper;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.Mapper.BuilderContext;
@ -865,4 +867,50 @@ public abstract class AggregatorTestCase extends ESTestCase {
Releasables.close(releasables); Releasables.close(releasables);
releasables.clear(); releasables.clear();
} }
/**
* Make a {@linkplain DateFieldMapper.DateFieldType} for a {@code date}.
*/
protected DateFieldMapper.DateFieldType dateField(String name, DateFieldMapper.Resolution resolution) {
DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name);
builder.withResolution(resolution);
Settings settings = Settings.builder().put("index.version.created", Version.CURRENT.id).build();
return builder.build(new BuilderContext(settings, new ContentPath())).fieldType();
}
/**
* Make a {@linkplain NumberFieldMapper.NumberFieldType} for a {@code double}.
*/
protected NumberFieldMapper.NumberFieldType doubleField(String name) {
NumberFieldMapper.NumberFieldType result = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
result.setName(name);
return result;
}
/**
* Make a {@linkplain DateFieldMapper.DateFieldType} for a {@code date}.
*/
protected KeywordFieldMapper.KeywordFieldType keywordField(String name) {
KeywordFieldMapper.KeywordFieldType result = new KeywordFieldMapper.KeywordFieldType();
result.setName(name);
return result;
}
/**
* Make a {@linkplain NumberFieldMapper.NumberFieldType} for a {@code long}.
*/
protected NumberFieldMapper.NumberFieldType longField(String name) {
NumberFieldMapper.NumberFieldType result = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
result.setName(name);
return result;
}
/**
* Make a {@linkplain NumberFieldMapper.NumberFieldType} for a {@code range}.
*/
protected RangeFieldMapper.RangeFieldType rangeField(String name, RangeType rangeType) {
RangeFieldMapper.RangeFieldType result = new RangeFieldMapper.Builder(name, rangeType).fieldType();
result.setName(name);
return result;
}
} }