[Test] Add unit tests for Range aggregations (#24569)

Related to #22278
This commit is contained in:
Tanguy Leroux 2017-05-10 12:22:52 +02:00 committed by GitHub
parent 0ff5933a55
commit 2fe53be0db
5 changed files with 364 additions and 2 deletions

View File

@ -34,6 +34,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class InternalRange<B extends InternalRange.Bucket, R extends InternalRange<B, R>> extends InternalMultiBucketAggregation<R, B>
implements Range {
@ -172,6 +173,27 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
@Override
public void writeTo(StreamOutput out) throws IOException {
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
Bucket that = (Bucket) other;
return Objects.equals(from, that.from)
&& Objects.equals(to, that.to)
&& Objects.equals(docCount, that.docCount)
&& Objects.equals(aggregations, that.aggregations)
&& Objects.equals(key, that.key);
}
@Override
public int hashCode() {
return Objects.hash(getClass(), from, to, docCount, aggregations, key);
}
}
public static class Factory<B extends Bucket, R extends InternalRange<B, R>> {
@ -245,8 +267,8 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
out.writeVInt(ranges.size());
for (B bucket : ranges) {
out.writeOptionalString(((Bucket) bucket).key);
out.writeDouble(((Bucket) bucket).from);
out.writeDouble(((Bucket) bucket).to);
out.writeDouble(bucket.from);
out.writeDouble(bucket.to);
out.writeVLong(((Bucket) bucket).docCount);
bucket.aggregations.writeTo(out);
}
@ -317,4 +339,16 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
return builder;
}
@Override
protected int doHashCode() {
return Objects.hash(ranges, format, keyed);
}
@Override
protected boolean doEquals(Object obj) {
InternalRange<?,?> that = (InternalRange<?,?>) obj;
return Objects.equals(ranges, that.ranges)
&& Objects.equals(format, that.format)
&& Objects.equals(keyed, that.keyed);
}
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.range;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.junit.Before;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
public abstract class InternalRangeTestCase<T extends InternalAggregation & Range> extends InternalAggregationTestCase<T> {
private boolean keyed;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
keyed = randomBoolean();
}
@Override
protected T createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
return createTestInstance(name, pipelineAggregators, metaData, keyed);
}
protected abstract T createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
boolean keyed);
@Override
protected void assertReduced(T reduced, List<T> inputs) {
final Map<String, Long> expectedCounts = new TreeMap<>();
for (T input : inputs) {
for (Range.Bucket bucket : input.getBuckets()) {
expectedCounts.compute(bucket.getKeyAsString(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
}
final Map<String, Long> actualCounts = new TreeMap<>();
for (Range.Bucket bucket : reduced.getBuckets()) {
actualCounts.compute(bucket.getKeyAsString(),
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
}
assertEquals(expectedCounts, actualCounts);
}
}

View File

@ -0,0 +1,82 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.range;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.junit.Before;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class InternalRangeTests extends InternalRangeTestCase<InternalRange> {
private DocValueFormat format;
private List<Tuple<Double, Double>> ranges;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
format = randomNumericDocValueFormat();
final int interval = randomFrom(1, 5, 10, 25, 50, 100);
final int numRanges = 1;//randomIntBetween(1, 10);
List<Tuple<Double, Double>> listOfRanges = new ArrayList<>(numRanges);
for (int i = 0; i < numRanges; i++) {
double from = i * interval;
double to = from + interval;
listOfRanges.add(Tuple.tuple(from, to));
}
if (randomBoolean()) {
// Add some overlapping ranges
double max = (double) numRanges * interval;
listOfRanges.add(Tuple.tuple(0.0, max));
listOfRanges.add(Tuple.tuple(0.0, max / 2));
listOfRanges.add(Tuple.tuple(max / 3, max / 3 * 2));
}
ranges = Collections.unmodifiableList(listOfRanges);
}
@Override
protected InternalRange createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
boolean keyed) {
final List<InternalRange.Bucket> buckets = new ArrayList<>();
for (int i = 0; i < ranges.size(); ++i) {
Tuple<Double, Double> range = ranges.get(i);
int docCount = randomIntBetween(0, 1000);
double from = range.v1();
double to = range.v2();
buckets.add( new InternalRange.Bucket("range_" + i, from, to, docCount, InternalAggregations.EMPTY, keyed, format));
}
return new InternalRange<>(name, buckets, format, keyed, pipelineAggregators, Collections.emptyMap());
}
@Override
protected Writeable.Reader<InternalRange> instanceReader() {
return InternalRange::new;
}
}

View File

@ -0,0 +1,97 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.range.date;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.range.InternalRangeTestCase;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.junit.Before;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
public class InternalDateRangeTests extends InternalRangeTestCase<InternalDateRange> {
private DocValueFormat format;
private List<Tuple<Double, Double>> dateRanges;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
format = randomNumericDocValueFormat();
Function<DateTime, DateTime> interval = randomFrom(dateTime -> dateTime.plusSeconds(1), dateTime -> dateTime.plusMinutes(1),
dateTime -> dateTime.plusHours(1), dateTime -> dateTime.plusDays(1), dateTime -> dateTime.plusMonths(1), dateTime ->
dateTime.plusYears(1));
final int numRanges = randomIntBetween(1, 10);
final List<Tuple<Double, Double>> listOfRanges = new ArrayList<>(numRanges);
DateTime date = new DateTime(DateTimeZone.UTC);
double start = date.getMillis();
double end = 0;
for (int i = 0; i < numRanges; i++) {
double from = date.getMillis();
date = interval.apply(date);
double to = date.getMillis();
listOfRanges.add(Tuple.tuple(from, to));
if (to > end) {
end = to;
}
}
if (randomBoolean()) {
final int randomOverlaps = randomIntBetween(1, 5);
for (int i = 0; i < randomOverlaps; i++) {
listOfRanges.add(Tuple.tuple(start, randomDoubleBetween(start, end, false)));
}
}
dateRanges = Collections.unmodifiableList(listOfRanges);
}
@Override
protected InternalDateRange createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
boolean keyed) {
final List<InternalDateRange.Bucket> buckets = new ArrayList<>();
for (int i = 0; i < dateRanges.size(); ++i) {
Tuple<Double, Double> range = dateRanges.get(i);
int docCount = randomIntBetween(0, 1000);
double from = range.v1();
double to = range.v2();
buckets.add( new InternalDateRange.Bucket("range_" + i, from, to, docCount, InternalAggregations.EMPTY, keyed, format));
}
return new InternalDateRange(name, buckets, format, keyed, pipelineAggregators, metaData);
}
@Override
protected Writeable.Reader<InternalDateRange> instanceReader() {
return InternalDateRange::new;
}
}

View File

@ -0,0 +1,81 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.range.geodistance;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.range.InternalRangeTestCase;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.junit.Before;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class InternalGeoDistanceTests extends InternalRangeTestCase<InternalGeoDistance> {
private List<Tuple<Double, Double>> geoDistanceRanges;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
final int interval = randomFrom(1, 5, 10, 25, 50, 100);
final int numRanges = randomIntBetween(1, 10);
List<Tuple<Double, Double>> listOfRanges = new ArrayList<>(numRanges);
for (int i = 0; i < numRanges; i++) {
double from = i * interval;
double to = from + interval;
listOfRanges.add(Tuple.tuple(from, to));
}
if (randomBoolean()) {
// Add some overlapping ranges
double max = (double) numRanges * interval;
listOfRanges.add(Tuple.tuple(0.0, max));
listOfRanges.add(Tuple.tuple(0.0, max / 2));
listOfRanges.add(Tuple.tuple(max / 3, max / 3 * 2));
}
geoDistanceRanges = Collections.unmodifiableList(listOfRanges);
}
@Override
protected Writeable.Reader<InternalGeoDistance> instanceReader() {
return InternalGeoDistance::new;
}
@Override
protected InternalGeoDistance createTestInstance(String name,
List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData,
boolean keyed) {
final List<InternalGeoDistance.Bucket> buckets = new ArrayList<>();
for (int i = 0; i < geoDistanceRanges.size(); ++i) {
Tuple<Double, Double> range = geoDistanceRanges.get(i);
int docCount = randomIntBetween(0, 1000);
double from = range.v1();
double to = range.v2();
buckets.add(new InternalGeoDistance.Bucket("range_" + i, from, to, docCount, InternalAggregations.EMPTY, keyed));
}
return new InternalGeoDistance(name, buckets, keyed, pipelineAggregators, metaData);
}
}