DateHistogram: Fix 'extended_bounds' with 'offset' (#23789)
This fixes a bug in the 'date_histogram' aggregation that can happen when using 'extended_bounds' together with some 'offset' parameter. Offsets should be applied after rounding the extended bounds and also be applied when adding empty buckets during the reduce phase in InternalDateHistogram. Closes #23776
This commit is contained in:
parent
0353bd1fb6
commit
f658a8d137
|
@ -369,8 +369,8 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
Bucket firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
|
||||
if (firstBucket == null) {
|
||||
if (bounds.getMin() != null && bounds.getMax() != null) {
|
||||
long key = bounds.getMin();
|
||||
long max = bounds.getMax();
|
||||
long key = bounds.getMin() + offset;
|
||||
long max = bounds.getMax() + offset;
|
||||
while (key <= max) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = nextKey(key).longValue();
|
||||
|
@ -378,7 +378,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
}
|
||||
} else {
|
||||
if (bounds.getMin() != null) {
|
||||
long key = bounds.getMin();
|
||||
long key = bounds.getMin() + offset;
|
||||
if (key < firstBucket.key) {
|
||||
while (key < firstBucket.key) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
|
@ -405,12 +405,12 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
}
|
||||
|
||||
// finally, adding the empty buckets *after* the actual data (based on the extended_bounds.max requested by the user)
|
||||
if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() > lastBucket.key) {
|
||||
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
|
||||
long max = bounds.getMax();
|
||||
if (bounds != null && lastBucket != null && bounds.getMax() != null && bounds.getMax() + offset > lastBucket.key) {
|
||||
long key = nextKey(lastBucket.key).longValue();
|
||||
long max = bounds.getMax() + offset;
|
||||
while (key <= max) {
|
||||
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||
key = emptyBucketInfo.rounding.nextRoundingValue(key);
|
||||
key = nextKey(key).longValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1162,7 +1162,61 @@ public class DateHistogramIT extends ESIntegTestCase {
|
|||
assertThat(bucket.getDocCount(), equalTo(0L));
|
||||
}
|
||||
}
|
||||
internalCluster().wipeIndices("test12278");
|
||||
internalCluster().wipeIndices(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test date histogram aggregation with day interval, offset and
|
||||
* extended bounds (see https://github.com/elastic/elasticsearch/issues/23776)
|
||||
*/
|
||||
public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception {
|
||||
String index = "test23776";
|
||||
prepareCreate(index)
|
||||
.setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
||||
.execute().actionGet();
|
||||
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 1));
|
||||
builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 2));
|
||||
builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 3));
|
||||
builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 4));
|
||||
indexRandom(true, builders);
|
||||
ensureSearchable(index);
|
||||
|
||||
SearchResponse response = null;
|
||||
// retrieve those docs with the same time zone and extended bounds
|
||||
response = client()
|
||||
.prepareSearch(index)
|
||||
.addAggregation(
|
||||
dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.days(1)).offset("+6h").minDocCount(0)
|
||||
.extendedBounds(new ExtendedBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z"))
|
||||
).execute().actionGet();
|
||||
assertSearchResponse(response);
|
||||
|
||||
Histogram histo = response.getAggregations().get("histo");
|
||||
assertThat(histo, notNullValue());
|
||||
assertThat(histo.getName(), equalTo("histo"));
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(8));
|
||||
|
||||
assertEquals("2016-01-01T06:00:00.000Z", buckets.get(0).getKeyAsString());
|
||||
assertEquals(0, buckets.get(0).getDocCount());
|
||||
assertEquals("2016-01-02T06:00:00.000Z", buckets.get(1).getKeyAsString());
|
||||
assertEquals(0, buckets.get(1).getDocCount());
|
||||
assertEquals("2016-01-03T06:00:00.000Z", buckets.get(2).getKeyAsString());
|
||||
assertEquals(2, buckets.get(2).getDocCount());
|
||||
assertEquals("2016-01-04T06:00:00.000Z", buckets.get(3).getKeyAsString());
|
||||
assertEquals(0, buckets.get(3).getDocCount());
|
||||
assertEquals("2016-01-05T06:00:00.000Z", buckets.get(4).getKeyAsString());
|
||||
assertEquals(0, buckets.get(4).getDocCount());
|
||||
assertEquals("2016-01-06T06:00:00.000Z", buckets.get(5).getKeyAsString());
|
||||
assertEquals(2, buckets.get(5).getDocCount());
|
||||
assertEquals("2016-01-07T06:00:00.000Z", buckets.get(6).getKeyAsString());
|
||||
assertEquals(0, buckets.get(6).getDocCount());
|
||||
assertEquals("2016-01-08T06:00:00.000Z", buckets.get(7).getKeyAsString());
|
||||
assertEquals(0, buckets.get(7).getDocCount());
|
||||
|
||||
internalCluster().wipeIndices(index);
|
||||
}
|
||||
|
||||
public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception {
|
||||
|
|
Loading…
Reference in New Issue