[TEST] Increase InternalDateHistogramTests coverage (#36064)

In this test we were randomizing different values but minDocCount was hardcoded to 1. It's important to test other values, especially `0` as it's the default. The test needed some adapting in the way buckets are randomly generated: all aggs need to share the same interval, minDocCount and emptyBucketInfo. Also assertions need to take into account that more (or less) buckets are expected depending on minDocCount.
This commit is contained in:
Luca Cavanna 2018-11-30 11:21:46 +01:00 committed by GitHub
parent 32f7fbd9f0
commit e3eb05b14b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 74 additions and 15 deletions

View File

@ -219,7 +219,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
private final boolean keyed;
private final long minDocCount;
private final long offset;
private final EmptyBucketInfo emptyBucketInfo;
final EmptyBucketInfo emptyBucketInfo;
InternalDateHistogram(String name, List<Bucket> buckets, BucketOrder order, long minDocCount, long offset,
EmptyBucketInfo emptyBucketInfo,

View File

@ -20,12 +20,14 @@
package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.test.InternalMultiBucketAggregationTestCase;
import org.joda.time.DateTime;
import java.util.ArrayList;
@ -42,12 +44,38 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
private boolean keyed;
private DocValueFormat format;
private long intervalMillis;
private long baseMillis;
private long minDocCount;
private InternalDateHistogram.EmptyBucketInfo emptyBucketInfo;
@Override
public void setUp() throws Exception {
super.setUp();
keyed = randomBoolean();
format = randomNumericDocValueFormat();
//in order for reduction to work properly (and be realistic) we need to use the same interval, minDocCount, emptyBucketInfo
//and base in all randomly created aggs as part of the same test run. This is particularly important when minDocCount is
//set to 0 as empty buckets need to be added to fill the holes.
long interval = randomIntBetween(1, 3);
intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
Rounding rounding = Rounding.builder(TimeValue.timeValueMillis(intervalMillis)).build();
baseMillis = rounding.round(System.currentTimeMillis());
if (randomBoolean()) {
minDocCount = randomIntBetween(1, 10);
emptyBucketInfo = null;
} else {
minDocCount = 0;
ExtendedBounds extendedBounds = null;
if (randomBoolean()) {
//it's ok if min and max are outside the range of the generated buckets, that will just mean that
//empty buckets won't be added before the first bucket and/or after the last one
long min = baseMillis - intervalMillis * randomNumberOfBuckets();
long max = baseMillis + randomNumberOfBuckets() * intervalMillis + randomNumberOfBuckets();
extendedBounds = new ExtendedBounds(min, max);
}
emptyBucketInfo = new InternalDateHistogram.EmptyBucketInfo(rounding, InternalAggregations.EMPTY, extendedBounds);
}
}
@Override
@ -57,29 +85,58 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
InternalAggregations aggregations) {
int nbBuckets = randomNumberOfBuckets();
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
long startingDate = System.currentTimeMillis();
long interval = randomIntBetween(1, 3);
long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis();
//avoid having different random instance start from exactly the same base
long startingDate = baseMillis - intervalMillis * randomIntBetween(0, 100);
for (int i = 0; i < nbBuckets; i++) {
long key = startingDate + (intervalMillis * i);
buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations));
//rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0
if (frequently()) {
long key = startingDate + intervalMillis * i;
buckets.add(new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations));
}
}
BucketOrder order = randomFrom(BucketOrder.key(true), BucketOrder.key(false));
return new InternalDateHistogram(name, buckets, order, 1, 0L, null, format, keyed, pipelineAggregators, metaData);
BucketOrder order = BucketOrder.key(randomBoolean());
return new InternalDateHistogram(name, buckets, order, minDocCount, 0L, emptyBucketInfo, format, keyed,
pipelineAggregators, metaData);
}
@Override
protected void assertReduced(InternalDateHistogram reduced, List<InternalDateHistogram> inputs) {
Map<Long, Long> expectedCounts = new TreeMap<>();
TreeMap<Long, Long> expectedCounts = new TreeMap<>();
for (Histogram histogram : inputs) {
for (Histogram.Bucket bucket : histogram.getBuckets()) {
expectedCounts.compute(((DateTime) bucket.getKey()).getMillis(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
}
if (minDocCount == 0) {
long minBound = -1;
long maxBound = -1;
if (emptyBucketInfo.bounds != null) {
minBound = emptyBucketInfo.rounding.round(emptyBucketInfo.bounds.getMin());
maxBound = emptyBucketInfo.rounding.round(emptyBucketInfo.bounds.getMax());
if (expectedCounts.isEmpty() && minBound <= maxBound) {
expectedCounts.put(minBound, 0L);
}
}
if (expectedCounts.isEmpty() == false) {
Long nextKey = expectedCounts.firstKey();
while (nextKey < expectedCounts.lastKey()) {
expectedCounts.putIfAbsent(nextKey, 0L);
nextKey += intervalMillis;
}
if (emptyBucketInfo.bounds != null) {
while (minBound < expectedCounts.firstKey()) {
expectedCounts.put(expectedCounts.firstKey() - intervalMillis, 0L);
}
while (expectedCounts.lastKey() < maxBound) {
expectedCounts.put(expectedCounts.lastKey() + intervalMillis, 0L);
}
}
}
} else {
expectedCounts.entrySet().removeIf(doubleLongEntry -> doubleLongEntry.getValue() < minDocCount);
}
Map<Long, Long> actualCounts = new TreeMap<>();
for (Histogram.Bucket bucket : reduced.getBuckets()) {
actualCounts.compute(((DateTime) bucket.getKey()).getMillis(),
@ -106,6 +163,7 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
long minDocCount = instance.getMinDocCount();
long offset = instance.getOffset();
List<PipelineAggregator> pipelineAggregators = instance.pipelineAggregators();
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = instance.emptyBucketInfo;
Map<String, Object> metaData = instance.getMetaData();
switch (between(0, 5)) {
case 0:
@ -121,6 +179,7 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
break;
case 3:
minDocCount += between(1, 10);
emptyBucketInfo = null;
break;
case 4:
offset += between(1, 20);
@ -136,7 +195,7 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
default:
throw new AssertionError("Illegal randomisation branch");
}
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, null, format, keyed, pipelineAggregators,
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators,
metaData);
}
}

View File

@ -123,7 +123,7 @@ public class InternalHistogramTests extends InternalMultiBucketAggregationTestCa
}
if (minDocCount == 0) {
double minBound = round(emptyBucketInfo.minBound);
if (expectedCounts.isEmpty() && emptyBucketInfo.minBound < emptyBucketInfo.maxBound) {
if (expectedCounts.isEmpty() && emptyBucketInfo.minBound <= emptyBucketInfo.maxBound) {
expectedCounts.put(minBound, 0L);
}
if (expectedCounts.isEmpty() == false) {