Support Range Fields in Histogram and Date Histogram (#46012)
Backport of 1a0dddf4ad24b3f2c751a1fe0e024fdbf8754f94 (AKA #445395) * Add support for a Range field ValuesSource, including decode logic for range doc values and exposing RangeType as a first class enum * Provide hooks in ValuesSourceConfig for aggregations to control ValuesSource class selection on missing & script values * Branch aggregator creation in Histogram and DateHistogram based on ValuesSource class, to enable specialization based on type. This is similar to how Terms aggregator works. * Prioritize field type when available for selecting the ValuesSource class type to use for an aggregation
This commit is contained in:
parent
f28644c498
commit
aec125faff
|
@ -68,7 +68,7 @@ import org.elasticsearch.index.mapper.MapperParsingException;
|
|||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper.RangeType;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.BoostingQueryBuilder;
|
||||
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.search.TwoPhaseIterator;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
@ -40,13 +41,13 @@ public final class BinaryDocValuesRangeQuery extends Query {
|
|||
|
||||
private final String fieldName;
|
||||
private final QueryType queryType;
|
||||
private final LengthType lengthType;
|
||||
private final RangeType.LengthType lengthType;
|
||||
private final BytesRef from;
|
||||
private final BytesRef to;
|
||||
private final Object originalFrom;
|
||||
private final Object originalTo;
|
||||
|
||||
public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, LengthType lengthType,
|
||||
public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, RangeType.LengthType lengthType,
|
||||
BytesRef from, BytesRef to,
|
||||
Object originalFrom, Object originalTo) {
|
||||
this.fieldName = fieldName;
|
||||
|
@ -178,42 +179,4 @@ public final class BinaryDocValuesRangeQuery extends Query {
|
|||
|
||||
}
|
||||
|
||||
public enum LengthType {
|
||||
FIXED_4 {
|
||||
@Override
|
||||
int readLength(byte[] bytes, int offset) {
|
||||
return 4;
|
||||
}
|
||||
},
|
||||
FIXED_8 {
|
||||
@Override
|
||||
int readLength(byte[] bytes, int offset) {
|
||||
return 8;
|
||||
}
|
||||
},
|
||||
FIXED_16 {
|
||||
@Override
|
||||
int readLength(byte[] bytes, int offset) {
|
||||
return 16;
|
||||
}
|
||||
},
|
||||
VARIABLE {
|
||||
@Override
|
||||
int readLength(byte[] bytes, int offset) {
|
||||
// the first bit encodes the sign and the next 4 bits encode the number
|
||||
// of additional bytes
|
||||
int token = Byte.toUnsignedInt(bytes[offset]);
|
||||
int length = (token >>> 3) & 0x0f;
|
||||
if ((token & 0x80) == 0) {
|
||||
length = 0x0f - length;
|
||||
}
|
||||
return 1 + length;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the length of the value that starts at {@code offset} in {@code bytes}.
|
||||
*/
|
||||
abstract int readLength(byte[] bytes, int offset);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.index.fielddata.ScriptDocValues;
|
|||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
||||
import java.util.Set;
|
||||
|
@ -71,6 +72,7 @@ public abstract class DocValuesIndexFieldData {
|
|||
|
||||
private NumericType numericType;
|
||||
private Function<SortedSetDocValues, ScriptDocValues<?>> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION;
|
||||
private RangeType rangeType;
|
||||
|
||||
public Builder numericType(NumericType type) {
|
||||
this.numericType = type;
|
||||
|
@ -82,12 +84,17 @@ public abstract class DocValuesIndexFieldData {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setRangeType(RangeType rangeType) {
|
||||
this.rangeType = rangeType;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
// Ignore Circuit Breaker
|
||||
final String fieldName = fieldType.name();
|
||||
if (BINARY_INDEX_FIELD_NAMES.contains(fieldName)) {
|
||||
if (BINARY_INDEX_FIELD_NAMES.contains(fieldName) || rangeType != null) {
|
||||
assert numericType == null;
|
||||
return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName);
|
||||
} else if (numericType != null) {
|
||||
|
|
|
@ -19,12 +19,17 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.common.TriFunction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
@ -33,6 +38,32 @@ enum BinaryRangeUtil {
|
|||
|
||||
;
|
||||
|
||||
static BytesRef encodeIPRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()];
|
||||
ByteArrayDataOutput out = new ByteArrayDataOutput(encoded);
|
||||
out.writeVInt(ranges.size());
|
||||
for (RangeFieldMapper.Range range : ranges) {
|
||||
InetAddress fromValue = (InetAddress) range.from;
|
||||
byte[] encodedFromValue = InetAddressPoint.encode(fromValue);
|
||||
out.writeBytes(encodedFromValue, 0, encodedFromValue.length);
|
||||
|
||||
InetAddress toValue = (InetAddress) range.to;
|
||||
byte[] encodedToValue = InetAddressPoint.encode(toValue);
|
||||
out.writeBytes(encodedToValue, 0, encodedToValue.length);
|
||||
}
|
||||
return new BytesRef(encoded, 0, out.getPosition());
|
||||
}
|
||||
|
||||
static List<RangeFieldMapper.Range> decodeIPRanges(BytesRef encodedRanges) {
|
||||
return decodeRanges(encodedRanges, RangeType.IP, BinaryRangeUtil::decodeIP);
|
||||
}
|
||||
|
||||
private static InetAddress decodeIP(byte[] bytes, int offset, int length) {
|
||||
// offset + length because copyOfRange wants a from and a to, not an offset & length
|
||||
byte[] slice = Arrays.copyOfRange(bytes, offset, offset + length);
|
||||
return InetAddressPoint.decode(slice);
|
||||
}
|
||||
|
||||
static BytesRef encodeLongRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
List<RangeFieldMapper.Range> sortedRanges = new ArrayList<>(ranges);
|
||||
Comparator<RangeFieldMapper.Range> fromComparator = Comparator.comparingLong(range -> ((Number) range.from).longValue());
|
||||
|
@ -51,6 +82,11 @@ enum BinaryRangeUtil {
|
|||
return new BytesRef(encoded, 0, out.getPosition());
|
||||
}
|
||||
|
||||
static List<RangeFieldMapper.Range> decodeLongRanges(BytesRef encodedRanges) {
|
||||
return decodeRanges(encodedRanges, RangeType.LONG,
|
||||
BinaryRangeUtil::decodeLong);
|
||||
}
|
||||
|
||||
static BytesRef encodeDoubleRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
List<RangeFieldMapper.Range> sortedRanges = new ArrayList<>(ranges);
|
||||
Comparator<RangeFieldMapper.Range> fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).doubleValue());
|
||||
|
@ -69,6 +105,43 @@ enum BinaryRangeUtil {
|
|||
return new BytesRef(encoded, 0, out.getPosition());
|
||||
}
|
||||
|
||||
static List<RangeFieldMapper.Range> decodeDoubleRanges(BytesRef encodedRanges) {
|
||||
return decodeRanges(encodedRanges, RangeType.DOUBLE,
|
||||
BinaryRangeUtil::decodeDouble);
|
||||
}
|
||||
|
||||
static List<RangeFieldMapper.Range> decodeFloatRanges(BytesRef encodedRanges) {
|
||||
return decodeRanges(encodedRanges, RangeType.FLOAT,
|
||||
BinaryRangeUtil::decodeFloat);
|
||||
}
|
||||
|
||||
static List<RangeFieldMapper.Range> decodeRanges(BytesRef encodedRanges, RangeType rangeType,
|
||||
TriFunction<byte[], Integer, Integer, Object> decodeBytes) {
|
||||
|
||||
RangeType.LengthType lengthType = rangeType.lengthType;
|
||||
ByteArrayDataInput in = new ByteArrayDataInput();
|
||||
in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length);
|
||||
int numRanges = in.readVInt();
|
||||
|
||||
List<RangeFieldMapper.Range> ranges = new ArrayList<>(numRanges);
|
||||
|
||||
final byte[] bytes = encodedRanges.bytes;
|
||||
int offset = in.getPosition();
|
||||
for (int i = 0; i < numRanges; i++) {
|
||||
int length = lengthType.readLength(bytes, offset);
|
||||
Object from = decodeBytes.apply(bytes, offset, length);
|
||||
offset += length;
|
||||
|
||||
length = lengthType.readLength(bytes, offset);
|
||||
Object to = decodeBytes.apply(bytes, offset, length);
|
||||
offset += length;
|
||||
// TODO: Support for exclusive ranges, pending resolution of #40601
|
||||
RangeFieldMapper.Range decodedRange = new RangeFieldMapper.Range(rangeType, from, to, true, true);
|
||||
ranges.add(decodedRange);
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
static BytesRef encodeFloatRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
List<RangeFieldMapper.Range> sortedRanges = new ArrayList<>(ranges);
|
||||
Comparator<RangeFieldMapper.Range> fromComparator = Comparator.comparingDouble(range -> ((Number) range.from).floatValue());
|
||||
|
@ -93,12 +166,20 @@ enum BinaryRangeUtil {
|
|||
return encoded;
|
||||
}
|
||||
|
||||
static double decodeDouble(byte[] bytes, int offset, int length){
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(bytes, offset));
|
||||
}
|
||||
|
||||
static byte[] encodeFloat(float number) {
|
||||
byte[] encoded = new byte[4];
|
||||
NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(number), encoded, 0);
|
||||
return encoded;
|
||||
}
|
||||
|
||||
static float decodeFloat(byte[] bytes, int offset, int length) {
|
||||
return NumericUtils.sortableIntToFloat(NumericUtils.sortableBytesToInt(bytes, offset));
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the specified number of type long in a variable-length byte format.
|
||||
* The byte format preserves ordering, which means the returned byte array can be used for comparing as is.
|
||||
|
@ -114,6 +195,23 @@ enum BinaryRangeUtil {
|
|||
return encode(number, sign);
|
||||
}
|
||||
|
||||
static long decodeLong(byte[] bytes, int offset, int length) {
|
||||
boolean isNegative = (bytes[offset] & 128) == 0;
|
||||
// Start by masking off the last three bits of the first byte - that's the start of our number
|
||||
long decoded;
|
||||
if (isNegative) {
|
||||
decoded = -8 | bytes[offset];
|
||||
} else {
|
||||
decoded = bytes[offset] & 7;
|
||||
}
|
||||
for (int i = 1; i < length; i++) {
|
||||
decoded <<= 8;
|
||||
decoded += Byte.toUnsignedInt(bytes[offset + i]);
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
private static byte[] encode(long l, int sign) {
|
||||
assert l >= 0;
|
||||
|
||||
|
@ -158,4 +256,5 @@ enum BinaryRangeUtil {
|
|||
}
|
||||
return encoded;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -327,7 +327,7 @@ public final class DateFieldMapper extends FieldMapper {
|
|||
return dateMathParser;
|
||||
}
|
||||
|
||||
long parse(String value) {
|
||||
public long parse(String value) {
|
||||
return resolution.convert(DateFormatters.from(dateTimeFormatter().parse(value)).toInstant());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,31 +19,16 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.DoubleRange;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FloatRange;
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.document.InetAddressRange;
|
||||
import org.apache.lucene.document.IntRange;
|
||||
import org.apache.lucene.document.LongRange;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.BinaryDocValuesRangeQuery;
|
||||
import org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.ByteArrayDataOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FutureArrays;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
|
@ -56,15 +41,14 @@ import org.elasticsearch.common.util.LocaleUtils;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -72,7 +56,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static org.elasticsearch.index.query.RangeQueryBuilder.GTE_FIELD;
|
||||
import static org.elasticsearch.index.query.RangeQueryBuilder.GT_FIELD;
|
||||
|
@ -229,6 +212,8 @@ public class RangeFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public RangeType rangeType() { return rangeType; }
|
||||
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
return new RangeFieldType(this);
|
||||
|
@ -249,6 +234,12 @@ public class RangeFieldMapper extends FieldMapper {
|
|||
return Objects.hash(super.hashCode(), rangeType, dateTimeFormatter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
|
||||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder().setRangeType(rangeType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return rangeType.name;
|
||||
|
@ -438,557 +429,6 @@ public class RangeFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
/** Enum defining the type of range */
|
||||
public enum RangeType {
|
||||
IP("ip_range") {
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
InetAddress address = InetAddresses.forString(parser.text());
|
||||
return included ? address : nextUp(address);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
InetAddress address = InetAddresses.forString(parser.text());
|
||||
return included ? address : nextDown(address);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parse(Object value, boolean coerce) {
|
||||
if (value instanceof InetAddress) {
|
||||
return (InetAddress) value;
|
||||
} else {
|
||||
if (value instanceof BytesRef) {
|
||||
value = ((BytesRef) value).utf8ToString();
|
||||
}
|
||||
return InetAddresses.forString(value.toString());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public InetAddress minValue() {
|
||||
return InetAddressPoint.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public InetAddress maxValue() {
|
||||
return InetAddressPoint.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public InetAddress nextUp(Object value) {
|
||||
return InetAddressPoint.nextUp((InetAddress)value);
|
||||
}
|
||||
@Override
|
||||
public InetAddress nextDown(Object value) {
|
||||
return InetAddressPoint.nextDown((InetAddress)value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()];
|
||||
ByteArrayDataOutput out = new ByteArrayDataOutput(encoded);
|
||||
out.writeVInt(ranges.size());
|
||||
for (Range range : ranges) {
|
||||
InetAddress fromValue = (InetAddress) range.from;
|
||||
byte[] encodedFromValue = InetAddressPoint.encode(fromValue);
|
||||
out.writeBytes(encodedFromValue, 0, encodedFromValue.length);
|
||||
|
||||
InetAddress toValue = (InetAddress) range.to;
|
||||
byte[] encodedToValue = InetAddressPoint.encode(toValue);
|
||||
out.writeBytes(encodedToValue, 0, encodedToValue.length);
|
||||
}
|
||||
return new BytesRef(encoded, 0, out.getPosition());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = InetAddressPoint.encode((InetAddress) from);
|
||||
byte[] encodedTo = InetAddressPoint.encode((InetAddress) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_16,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newWithinQuery(field, f, t));
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newContainsQuery(field, f, t ));
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newIntersectsQuery(field, f ,t ));
|
||||
}
|
||||
|
||||
private Query createQuery(String field, Object lower, Object upper, boolean includeLower, boolean includeUpper,
|
||||
BiFunction<InetAddress, InetAddress, Query> querySupplier) {
|
||||
byte[] lowerBytes = InetAddressPoint.encode((InetAddress) lower);
|
||||
byte[] upperBytes = InetAddressPoint.encode((InetAddress) upper);
|
||||
if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"Range query `from` value (" + lower + ") is greater than `to` value (" + upper + ")");
|
||||
}
|
||||
InetAddress correctedFrom = includeLower ? (InetAddress) lower : nextUp(lower);
|
||||
InetAddress correctedTo = includeUpper ? (InetAddress) upper : nextDown(upper);;
|
||||
lowerBytes = InetAddressPoint.encode(correctedFrom);
|
||||
upperBytes = InetAddressPoint.encode(correctedTo);
|
||||
if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
|
||||
return new MatchNoDocsQuery("float range didn't intersect anything");
|
||||
} else {
|
||||
return querySupplier.apply(correctedFrom, correctedTo);
|
||||
}
|
||||
}
|
||||
},
|
||||
DATE("date_range", NumberType.LONG) {
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()});
|
||||
}
|
||||
private Number parse(DateMathParser dateMathParser, String dateStr) {
|
||||
return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");})
|
||||
.toEpochMilli();
|
||||
}
|
||||
@Override
|
||||
public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
Number value = parse(fieldType.dateMathParser, parser.text());
|
||||
return included ? value : nextUp(value);
|
||||
}
|
||||
@Override
|
||||
public Number parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException{
|
||||
Number value = parse(fieldType.dateMathParser, parser.text());
|
||||
return included ? value : nextDown(value);
|
||||
}
|
||||
@Override
|
||||
public Long minValue() {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long maxValue() {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long nextUp(Object value) {
|
||||
return (long) LONG.nextUp(value);
|
||||
}
|
||||
@Override
|
||||
public Long nextDown(Object value) {
|
||||
return (long) LONG.nextDown(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
return LONG.encodeRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower,
|
||||
boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone,
|
||||
@Nullable DateMathParser parser, QueryShardContext context) {
|
||||
ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone;
|
||||
|
||||
DateMathParser dateMathParser = (parser == null) ?
|
||||
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser;
|
||||
Long low = lowerTerm == null ? Long.MIN_VALUE :
|
||||
dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(),
|
||||
context::nowInMillis, false, zone).toEpochMilli();
|
||||
Long high = upperTerm == null ? Long.MAX_VALUE :
|
||||
dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(),
|
||||
context::nowInMillis, false, zone).toEpochMilli();
|
||||
|
||||
return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone,
|
||||
dateMathParser, context);
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.withinQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.containsQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.intersectsQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
},
|
||||
// todo support half_float
|
||||
FLOAT("float_range", NumberType.FLOAT) {
|
||||
@Override
|
||||
public Float minValue() {
|
||||
return Float.NEGATIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Float maxValue() {
|
||||
return Float.POSITIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Float nextUp(Object value) {
|
||||
return Math.nextUp(((Number)value).floatValue());
|
||||
}
|
||||
@Override
|
||||
public Float nextDown(Object value) {
|
||||
return Math.nextDown(((Number)value).floatValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeFloatRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeFloat((Float) from);
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeFloat((Float) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_4,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newWithinQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newContainsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newIntersectsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
},
|
||||
DOUBLE("double_range", NumberType.DOUBLE) {
|
||||
@Override
|
||||
public Double minValue() {
|
||||
return Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Double maxValue() {
|
||||
return Double.POSITIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Double nextUp(Object value) {
|
||||
return Math.nextUp(((Number)value).doubleValue());
|
||||
}
|
||||
@Override
|
||||
public Double nextDown(Object value) {
|
||||
return Math.nextDown(((Number)value).doubleValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeDoubleRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeDouble((Double) from);
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeDouble((Double) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.FIXED_8,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newWithinQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newContainsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newIntersectsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
|
||||
},
|
||||
// todo add BYTE support
|
||||
// todo add SHORT support
|
||||
INTEGER("integer_range", NumberType.INTEGER) {
|
||||
@Override
|
||||
public Integer minValue() {
|
||||
return Integer.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Integer maxValue() {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Integer nextUp(Object value) {
|
||||
return ((Number)value).intValue() + 1;
|
||||
}
|
||||
@Override
|
||||
public Integer nextDown(Object value) {
|
||||
return ((Number)value).intValue() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
return LONG.encodeRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newWithinQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newContainsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newIntersectsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
},
|
||||
LONG("long_range", NumberType.LONG) {
|
||||
@Override
|
||||
public Long minValue() {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long maxValue() {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long nextUp(Object value) {
|
||||
return ((Number)value).longValue() + 1;
|
||||
}
|
||||
@Override
|
||||
public Long nextDown(Object value) {
|
||||
return ((Number)value).longValue() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeLongRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, QueryType queryType, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeLong(((Number) from).longValue());
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeLong(((Number) to).longValue());
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, BinaryDocValuesRangeQuery.LengthType.VARIABLE,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, Range r) {
|
||||
return new LongRange(name, new long[] {((Number)r.from).longValue()},
|
||||
new long[] {((Number)r.to).longValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newWithinQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newContainsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newIntersectsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
};
|
||||
|
||||
RangeType(String name) {
|
||||
this.name = name;
|
||||
this.numberType = null;
|
||||
}
|
||||
|
||||
RangeType(String name, NumberType type) {
|
||||
this.name = name;
|
||||
this.numberType = type;
|
||||
}
|
||||
|
||||
/** Get the associated type name. */
|
||||
public final String typeName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to create the actual {@link Query} using the provided supplier function. Before creating the query we check if
|
||||
* the intervals min > max, in which case an {@link IllegalArgumentException} is raised. The method adapts the interval bounds
|
||||
* based on whether the edges should be included or excluded. In case where after this correction the interval would be empty
|
||||
* because min > max, we simply return a {@link MatchNoDocsQuery}.
|
||||
* This helper handles all {@link Number} cases and dates, the IP range type uses its own logic.
|
||||
*/
|
||||
private static <T extends Comparable<T>> Query createQuery(String field, T from, T to, boolean includeFrom, boolean includeTo,
|
||||
BiFunction<T, T, Query> querySupplier, RangeType rangeType) {
|
||||
if (from.compareTo(to) > 0) {
|
||||
// wrong argument order, this is an error the user should fix
|
||||
throw new IllegalArgumentException("Range query `from` value (" + from + ") is greater than `to` value (" + to + ")");
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
T correctedFrom = includeFrom ? from : (T) rangeType.nextUp(from);
|
||||
@SuppressWarnings("unchecked")
|
||||
T correctedTo = includeTo ? to : (T) rangeType.nextDown(to);
|
||||
if (correctedFrom.compareTo(correctedTo) > 0) {
|
||||
return new MatchNoDocsQuery("range didn't intersect anything");
|
||||
} else {
|
||||
return querySupplier.apply(correctedFrom, correctedTo);
|
||||
}
|
||||
}
|
||||
|
||||
public abstract Field getRangeField(String name, Range range);
|
||||
public List<IndexableField> createFields(ParseContext context, String name, Range range, boolean indexed,
|
||||
boolean docValued, boolean stored) {
|
||||
assert range != null : "range cannot be null when creating fields";
|
||||
List<IndexableField> fields = new ArrayList<>();
|
||||
if (indexed) {
|
||||
fields.add(getRangeField(name, range));
|
||||
}
|
||||
if (docValued) {
|
||||
BinaryRangesDocValuesField field = (BinaryRangesDocValuesField) context.doc().getByKey(name);
|
||||
if (field == null) {
|
||||
field = new BinaryRangesDocValuesField(name, range, this);
|
||||
context.doc().addWithKey(name, field);
|
||||
} else {
|
||||
field.add(range);
|
||||
}
|
||||
}
|
||||
if (stored) {
|
||||
fields.add(new StoredField(name, range.toString()));
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
/** parses from value. rounds according to included flag */
|
||||
public Object parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
||||
Number value = numberType.parse(parser, coerce);
|
||||
return included ? value : (Number)nextUp(value);
|
||||
}
|
||||
/** parses to value. rounds according to included flag */
|
||||
public Object parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
||||
Number value = numberType.parse(parser, coerce);
|
||||
return included ? value : (Number)nextDown(value);
|
||||
}
|
||||
|
||||
public abstract Object minValue();
|
||||
public abstract Object maxValue();
|
||||
public abstract Object nextUp(Object value);
|
||||
public abstract Object nextDown(Object value);
|
||||
public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public Object parse(Object value, boolean coerce) {
|
||||
return numberType.parse(value, coerce);
|
||||
}
|
||||
public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo,
|
||||
ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser,
|
||||
QueryShardContext context) {
|
||||
Object lower = from == null ? minValue() : parse(from, false);
|
||||
Object upper = to == null ? maxValue() : parse(to, false);
|
||||
Query indexQuery;
|
||||
if (relation == ShapeRelation.WITHIN) {
|
||||
indexQuery = withinQuery(field, lower, upper, includeFrom, includeTo);
|
||||
} else if (relation == ShapeRelation.CONTAINS) {
|
||||
indexQuery = containsQuery(field, lower, upper, includeFrom, includeTo);
|
||||
} else {
|
||||
indexQuery = intersectsQuery(field, lower, upper, includeFrom, includeTo);
|
||||
}
|
||||
if (hasDocValues) {
|
||||
final QueryType queryType;
|
||||
if (relation == ShapeRelation.WITHIN) {
|
||||
queryType = QueryType.WITHIN;
|
||||
} else if (relation == ShapeRelation.CONTAINS) {
|
||||
queryType = QueryType.CONTAINS;
|
||||
} else {
|
||||
queryType = QueryType.INTERSECTS;
|
||||
}
|
||||
Query dvQuery = dvRangeQuery(field, queryType, lower, upper, includeFrom, includeTo);
|
||||
return new IndexOrDocValuesQuery(indexQuery, dvQuery);
|
||||
} else {
|
||||
return indexQuery;
|
||||
}
|
||||
}
|
||||
|
||||
// No need to take into account Range#includeFrom or Range#includeTo, because from and to have already been
|
||||
// rounded up via parseFrom and parseTo methods.
|
||||
public abstract BytesRef encodeRanges(Set<Range> ranges) throws IOException;
|
||||
|
||||
public abstract Query dvRangeQuery(String field, QueryType queryType, Object from, Object to,
|
||||
boolean includeFrom, boolean includeTo);
|
||||
|
||||
public final String name;
|
||||
private final NumberType numberType;
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
/** Class defining a range */
|
||||
public static class Range {
|
||||
RangeType type;
|
||||
|
@ -1005,6 +445,27 @@ public class RangeFieldMapper extends FieldMapper {
|
|||
this.includeTo = includeTo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Range range = (Range) o;
|
||||
return includeFrom == range.includeFrom &&
|
||||
includeTo == range.includeTo &&
|
||||
type == range.type &&
|
||||
from.equals(range.from) &&
|
||||
to.equals(range.to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, from, to, includeFrom, includeTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -1017,6 +478,14 @@ public class RangeFieldMapper extends FieldMapper {
|
|||
sb.append(includeTo ? ']' : ')');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public Object getFrom() {
|
||||
return from;
|
||||
}
|
||||
|
||||
public Object getTo() {
|
||||
return to;
|
||||
}
|
||||
}
|
||||
|
||||
static class BinaryRangesDocValuesField extends CustomDocValuesField {
|
||||
|
|
|
@ -0,0 +1,715 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANYDa
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.DoubleRange;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FloatRange;
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.document.InetAddressRange;
|
||||
import org.apache.lucene.document.IntRange;
|
||||
import org.apache.lucene.document.LongRange;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.BinaryDocValuesRangeQuery;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FutureArrays;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.time.DateMathParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/** Enum defining the type of range */
|
||||
public enum RangeType {
|
||||
IP("ip_range", LengthType.FIXED_16) {
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
InetAddress address = InetAddresses.forString(parser.text());
|
||||
return included ? address : nextUp(address);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
InetAddress address = InetAddresses.forString(parser.text());
|
||||
return included ? address : nextDown(address);
|
||||
}
|
||||
@Override
|
||||
public InetAddress parse(Object value, boolean coerce) {
|
||||
if (value instanceof InetAddress) {
|
||||
return (InetAddress) value;
|
||||
} else {
|
||||
if (value instanceof BytesRef) {
|
||||
value = ((BytesRef) value).utf8ToString();
|
||||
}
|
||||
return InetAddresses.forString(value.toString());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public InetAddress minValue() {
|
||||
return InetAddressPoint.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public InetAddress maxValue() {
|
||||
return InetAddressPoint.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public InetAddress nextUp(Object value) {
|
||||
return InetAddressPoint.nextUp((InetAddress)value);
|
||||
}
|
||||
@Override
|
||||
public InetAddress nextDown(Object value) {
|
||||
return InetAddressPoint.nextDown((InetAddress)value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeIPRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
// TODO: Implement this.
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue (Object endpointValue) {
|
||||
throw new UnsupportedOperationException("IP ranges cannot be safely converted to doubles");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = InetAddressPoint.encode((InetAddress) from);
|
||||
byte[] encodedTo = InetAddressPoint.encode((InetAddress) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_16,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newWithinQuery(field, f, t));
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newContainsQuery(field, f, t ));
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, from, to, includeFrom, includeTo,
|
||||
(f, t) -> InetAddressRange.newIntersectsQuery(field, f ,t ));
|
||||
}
|
||||
|
||||
private Query createQuery(String field, Object lower, Object upper, boolean includeLower, boolean includeUpper,
|
||||
BiFunction<InetAddress, InetAddress, Query> querySupplier) {
|
||||
byte[] lowerBytes = InetAddressPoint.encode((InetAddress) lower);
|
||||
byte[] upperBytes = InetAddressPoint.encode((InetAddress) upper);
|
||||
if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"Range query `from` value (" + lower + ") is greater than `to` value (" + upper + ")");
|
||||
}
|
||||
InetAddress correctedFrom = includeLower ? (InetAddress) lower : nextUp(lower);
|
||||
InetAddress correctedTo = includeUpper ? (InetAddress) upper : nextDown(upper);;
|
||||
lowerBytes = InetAddressPoint.encode(correctedFrom);
|
||||
upperBytes = InetAddressPoint.encode(correctedTo);
|
||||
if (FutureArrays.compareUnsigned(lowerBytes, 0, lowerBytes.length, upperBytes, 0, upperBytes.length) > 0) {
|
||||
return new MatchNoDocsQuery("float range didn't intersect anything");
|
||||
} else {
|
||||
return querySupplier.apply(correctedFrom, correctedTo);
|
||||
}
|
||||
}
|
||||
},
|
||||
DATE("date_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.LONG) {
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()});
|
||||
}
|
||||
private Number parse(DateMathParser dateMathParser, String dateStr) {
|
||||
return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");})
|
||||
.toEpochMilli();
|
||||
}
|
||||
@Override
|
||||
public Number parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException {
|
||||
Number value = parse(fieldType.dateMathParser, parser.text());
|
||||
return included ? value : nextUp(value);
|
||||
}
|
||||
@Override
|
||||
public Number parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||
throws IOException{
|
||||
Number value = parse(fieldType.dateMathParser, parser.text());
|
||||
return included ? value : nextDown(value);
|
||||
}
|
||||
@Override
|
||||
public Long minValue() {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long maxValue() {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long nextUp(Object value) {
|
||||
return (long) LONG.nextUp(value);
|
||||
}
|
||||
@Override
|
||||
public Long nextDown(Object value) {
|
||||
return (long) LONG.nextDown(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return LONG.encodeRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
return LONG.decodeRanges(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue (Object endpointValue) {
|
||||
return LONG.doubleValue(endpointValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower,
|
||||
boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone,
|
||||
@Nullable DateMathParser parser, QueryShardContext context) {
|
||||
ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone;
|
||||
|
||||
DateMathParser dateMathParser = (parser == null) ?
|
||||
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser;
|
||||
Long low = lowerTerm == null ? Long.MIN_VALUE :
|
||||
dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(),
|
||||
context::nowInMillis, false, zone).toEpochMilli();
|
||||
Long high = upperTerm == null ? Long.MAX_VALUE :
|
||||
dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(),
|
||||
context::nowInMillis, false, zone).toEpochMilli();
|
||||
|
||||
return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone,
|
||||
dateMathParser, context);
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.withinQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.containsQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||
return LONG.intersectsQuery(field, from, to, includeLower, includeUpper);
|
||||
}
|
||||
},
|
||||
// todo support half_float
|
||||
FLOAT("float_range", LengthType.FIXED_4, NumberFieldMapper.NumberType.FLOAT) {
|
||||
@Override
|
||||
public Float minValue() {
|
||||
return Float.NEGATIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Float maxValue() {
|
||||
return Float.POSITIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Float nextUp(Object value) {
|
||||
return Math.nextUp(((Number)value).floatValue());
|
||||
}
|
||||
@Override
|
||||
public Float nextDown(Object value) {
|
||||
return Math.nextDown(((Number)value).floatValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeFloatRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
return BinaryRangeUtil.decodeFloatRanges(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue(Object endpointValue) {
|
||||
assert endpointValue instanceof Float;
|
||||
return ((Float) endpointValue).doubleValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeFloat((Float) from);
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeFloat((Float) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_4,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newWithinQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newContainsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Float) from, (Float) to, includeFrom, includeTo,
|
||||
(f, t) -> FloatRange.newIntersectsQuery(field, new float[] { f }, new float[] { t }), RangeType.FLOAT);
|
||||
}
|
||||
},
|
||||
DOUBLE("double_range", LengthType.FIXED_8, NumberFieldMapper.NumberType.DOUBLE) {
|
||||
@Override
|
||||
public Double minValue() {
|
||||
return Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Double maxValue() {
|
||||
return Double.POSITIVE_INFINITY;
|
||||
}
|
||||
@Override
|
||||
public Double nextUp(Object value) {
|
||||
return Math.nextUp(((Number)value).doubleValue());
|
||||
}
|
||||
@Override
|
||||
public Double nextDown(Object value) {
|
||||
return Math.nextDown(((Number)value).doubleValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeDoubleRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
return BinaryRangeUtil.decodeDoubleRanges(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue(Object endpointValue) {
|
||||
assert endpointValue instanceof Double;
|
||||
return (Double) endpointValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeDouble((Double) from);
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeDouble((Double) to);
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, LengthType.FIXED_8,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newWithinQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newContainsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Double) from, (Double) to, includeFrom, includeTo,
|
||||
(f, t) -> DoubleRange.newIntersectsQuery(field, new double[] { f }, new double[] { t }), RangeType.DOUBLE);
|
||||
}
|
||||
|
||||
},
|
||||
// todo add BYTE support
|
||||
// todo add SHORT support
|
||||
INTEGER("integer_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.INTEGER) {
|
||||
@Override
|
||||
public Integer minValue() {
|
||||
return Integer.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Integer maxValue() {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Integer nextUp(Object value) {
|
||||
return ((Number)value).intValue() + 1;
|
||||
}
|
||||
@Override
|
||||
public Integer nextDown(Object value) {
|
||||
return ((Number)value).intValue() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return LONG.encodeRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
return LONG.decodeRanges(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue(Object endpointValue) {
|
||||
return LONG.doubleValue(endpointValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
return LONG.dvRangeQuery(field, queryType, from, to, includeFrom, includeTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newWithinQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newContainsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Integer) from, (Integer) to, includeFrom, includeTo,
|
||||
(f, t) -> IntRange.newIntersectsQuery(field, new int[] { f }, new int[] { t }), RangeType.INTEGER);
|
||||
}
|
||||
},
|
||||
LONG("long_range", LengthType.VARIABLE, NumberFieldMapper.NumberType.LONG) {
|
||||
@Override
|
||||
public Long minValue() {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long maxValue() {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
@Override
|
||||
public Long nextUp(Object value) {
|
||||
return ((Number)value).longValue() + 1;
|
||||
}
|
||||
@Override
|
||||
public Long nextDown(Object value) {
|
||||
return ((Number)value).longValue() - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException {
|
||||
return BinaryRangeUtil.encodeLongRanges(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes) {
|
||||
return BinaryRangeUtil.decodeLongRanges(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double doubleValue(Object endpointValue) {
|
||||
assert endpointValue instanceof Long;
|
||||
return ((Long) endpointValue).doubleValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to, boolean includeFrom,
|
||||
boolean includeTo) {
|
||||
if (includeFrom == false) {
|
||||
from = nextUp(from);
|
||||
}
|
||||
|
||||
if (includeTo == false) {
|
||||
to = nextDown(to);
|
||||
}
|
||||
|
||||
byte[] encodedFrom = BinaryRangeUtil.encodeLong(((Number) from).longValue());
|
||||
byte[] encodedTo = BinaryRangeUtil.encodeLong(((Number) to).longValue());
|
||||
return new BinaryDocValuesRangeQuery(field, queryType, LengthType.VARIABLE,
|
||||
new BytesRef(encodedFrom), new BytesRef(encodedTo), from, to);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field getRangeField(String name, RangeFieldMapper.Range r) {
|
||||
return new LongRange(name, new long[] {((Number)r.from).longValue()},
|
||||
new long[] {((Number)r.to).longValue()});
|
||||
}
|
||||
@Override
|
||||
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newWithinQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
@Override
|
||||
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newContainsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
@Override
|
||||
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||
return createQuery(field, (Long) from, (Long) to, includeFrom, includeTo,
|
||||
(f, t) -> LongRange.newIntersectsQuery(field, new long[] { f }, new long[] { t }), RangeType.LONG);
|
||||
}
|
||||
};
|
||||
|
||||
RangeType(String name, LengthType lengthType) {
|
||||
this.name = name;
|
||||
this.numberType = null;
|
||||
this.lengthType = lengthType;
|
||||
}
|
||||
|
||||
RangeType(String name, LengthType lengthType, NumberFieldMapper.NumberType type) {
|
||||
this.name = name;
|
||||
this.numberType = type;
|
||||
this.lengthType = lengthType;
|
||||
}
|
||||
|
||||
/** Get the associated type name. */
|
||||
public final String typeName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to create the actual {@link Query} using the provided supplier function. Before creating the query we check if
|
||||
* the intervals min > max, in which case an {@link IllegalArgumentException} is raised. The method adapts the interval bounds
|
||||
* based on whether the edges should be included or excluded. In case where after this correction the interval would be empty
|
||||
* because min > max, we simply return a {@link MatchNoDocsQuery}.
|
||||
* This helper handles all {@link Number} cases and dates, the IP range type uses its own logic.
|
||||
*/
|
||||
private static <T extends Comparable<T>> Query createQuery(String field, T from, T to, boolean includeFrom, boolean includeTo,
|
||||
BiFunction<T, T, Query> querySupplier, RangeType rangeType) {
|
||||
if (from.compareTo(to) > 0) {
|
||||
// wrong argument order, this is an error the user should fix
|
||||
throw new IllegalArgumentException("Range query `from` value (" + from + ") is greater than `to` value (" + to + ")");
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
T correctedFrom = includeFrom ? from : (T) rangeType.nextUp(from);
|
||||
@SuppressWarnings("unchecked")
|
||||
T correctedTo = includeTo ? to : (T) rangeType.nextDown(to);
|
||||
if (correctedFrom.compareTo(correctedTo) > 0) {
|
||||
return new MatchNoDocsQuery("range didn't intersect anything");
|
||||
} else {
|
||||
return querySupplier.apply(correctedFrom, correctedTo);
|
||||
}
|
||||
}
|
||||
|
||||
public abstract Field getRangeField(String name, RangeFieldMapper.Range range);
|
||||
public List<IndexableField> createFields(ParseContext context, String name, RangeFieldMapper.Range range, boolean indexed,
|
||||
boolean docValued, boolean stored) {
|
||||
assert range != null : "range cannot be null when creating fields";
|
||||
List<IndexableField> fields = new ArrayList<>();
|
||||
if (indexed) {
|
||||
fields.add(getRangeField(name, range));
|
||||
}
|
||||
if (docValued) {
|
||||
RangeFieldMapper.BinaryRangesDocValuesField field = (RangeFieldMapper.BinaryRangesDocValuesField) context.doc().getByKey(name);
|
||||
if (field == null) {
|
||||
field = new RangeFieldMapper.BinaryRangesDocValuesField(name, range, this);
|
||||
context.doc().addWithKey(name, field);
|
||||
} else {
|
||||
field.add(range);
|
||||
}
|
||||
}
|
||||
if (stored) {
|
||||
fields.add(new StoredField(name, range.toString()));
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
/** parses from value. rounds according to included flag */
|
||||
public Object parseFrom(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce,
|
||||
boolean included) throws IOException {
|
||||
Number value = numberType.parse(parser, coerce);
|
||||
return included ? value : (Number)nextUp(value);
|
||||
}
|
||||
/** parses to value. rounds according to included flag */
|
||||
public Object parseTo(RangeFieldMapper.RangeFieldType fieldType, XContentParser parser, boolean coerce,
|
||||
boolean included) throws IOException {
|
||||
Number value = numberType.parse(parser, coerce);
|
||||
return included ? value : (Number)nextDown(value);
|
||||
}
|
||||
|
||||
public abstract Object minValue();
|
||||
public abstract Object maxValue();
|
||||
public abstract Object nextUp(Object value);
|
||||
public abstract Object nextDown(Object value);
|
||||
public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||
public Object parse(Object value, boolean coerce) {
|
||||
return numberType.parse(value, coerce);
|
||||
}
|
||||
public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo,
|
||||
ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser,
|
||||
QueryShardContext context) {
|
||||
Object lower = from == null ? minValue() : parse(from, false);
|
||||
Object upper = to == null ? maxValue() : parse(to, false);
|
||||
Query indexQuery;
|
||||
if (relation == ShapeRelation.WITHIN) {
|
||||
indexQuery = withinQuery(field, lower, upper, includeFrom, includeTo);
|
||||
} else if (relation == ShapeRelation.CONTAINS) {
|
||||
indexQuery = containsQuery(field, lower, upper, includeFrom, includeTo);
|
||||
} else {
|
||||
indexQuery = intersectsQuery(field, lower, upper, includeFrom, includeTo);
|
||||
}
|
||||
if (hasDocValues) {
|
||||
final BinaryDocValuesRangeQuery.QueryType queryType;
|
||||
if (relation == ShapeRelation.WITHIN) {
|
||||
queryType = BinaryDocValuesRangeQuery.QueryType.WITHIN;
|
||||
} else if (relation == ShapeRelation.CONTAINS) {
|
||||
queryType = BinaryDocValuesRangeQuery.QueryType.CONTAINS;
|
||||
} else {
|
||||
queryType = BinaryDocValuesRangeQuery.QueryType.INTERSECTS;
|
||||
}
|
||||
Query dvQuery = dvRangeQuery(field, queryType, lower, upper, includeFrom, includeTo);
|
||||
return new IndexOrDocValuesQuery(indexQuery, dvQuery);
|
||||
} else {
|
||||
return indexQuery;
|
||||
}
|
||||
}
|
||||
|
||||
// No need to take into account Range#includeFrom or Range#includeTo, because from and to have already been
|
||||
// rounded up via parseFrom and parseTo methods.
|
||||
public abstract BytesRef encodeRanges(Set<RangeFieldMapper.Range> ranges) throws IOException;
|
||||
public abstract List<RangeFieldMapper.Range> decodeRanges(BytesRef bytes);
|
||||
|
||||
/**
|
||||
* Given the Range.to or Range.from Object value from a Range instance, converts that value into a Double. Before converting, it
|
||||
* asserts that the object is of the expected type. Operation is not supported on IP ranges (because of loss of precision)
|
||||
*
|
||||
* @param endpointValue Object value for Range.to or Range.from
|
||||
* @return endpointValue as a Double
|
||||
*/
|
||||
public abstract Double doubleValue(Object endpointValue);
|
||||
|
||||
public boolean isNumeric() {
|
||||
return numberType != null;
|
||||
}
|
||||
|
||||
public abstract Query dvRangeQuery(String field, BinaryDocValuesRangeQuery.QueryType queryType, Object from, Object to,
|
||||
boolean includeFrom, boolean includeTo);
|
||||
|
||||
public final String name;
|
||||
private final NumberFieldMapper.NumberType numberType;
|
||||
public final LengthType lengthType;
|
||||
|
||||
public enum LengthType {
|
||||
FIXED_4 {
|
||||
@Override
|
||||
public int readLength(byte[] bytes, int offset) {
|
||||
return 4;
|
||||
}
|
||||
},
|
||||
FIXED_8 {
|
||||
@Override
|
||||
public int readLength(byte[] bytes, int offset) {
|
||||
return 8;
|
||||
}
|
||||
},
|
||||
FIXED_16 {
|
||||
@Override
|
||||
public int readLength(byte[] bytes, int offset) {
|
||||
return 16;
|
||||
}
|
||||
},
|
||||
VARIABLE {
|
||||
@Override
|
||||
public int readLength(byte[] bytes, int offset) {
|
||||
// the first bit encodes the sign and the next 4 bits encode the number
|
||||
// of additional bytes
|
||||
int token = Byte.toUnsignedInt(bytes[offset]);
|
||||
int length = (token >>> 3) & 0x0f;
|
||||
if ((token & 0x80) == 0) {
|
||||
length = 0x0f - length;
|
||||
}
|
||||
return 1 + length;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the length of the value that starts at {@code offset} in {@code bytes}.
|
||||
*/
|
||||
public abstract int readLength(byte[] bytes, int offset);
|
||||
}
|
||||
}
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
|||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ObjectMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
|
@ -118,7 +119,7 @@ public class IndicesModule extends AbstractModule {
|
|||
for (NumberFieldMapper.NumberType type : NumberFieldMapper.NumberType.values()) {
|
||||
mappers.put(type.typeName(), new NumberFieldMapper.TypeParser(type));
|
||||
}
|
||||
for (RangeFieldMapper.RangeType type : RangeFieldMapper.RangeType.values()) {
|
||||
for (RangeType type : RangeType.values()) {
|
||||
mappers.put(type.typeName(), new RangeFieldMapper.TypeParser(type));
|
||||
}
|
||||
mappers.put(BooleanFieldMapper.CONTENT_TYPE, new BooleanFieldMapper.TypeParser());
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType.Relation;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
|
@ -46,7 +47,6 @@ import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder;
|
|||
import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
|
@ -69,7 +69,7 @@ import static java.util.Collections.unmodifiableMap;
|
|||
/**
|
||||
* A builder for histograms on date fields.
|
||||
*/
|
||||
public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, DateHistogramAggregationBuilder>
|
||||
public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource, DateHistogramAggregationBuilder>
|
||||
implements MultiBucketAggregationBuilder, DateIntervalConsumer {
|
||||
|
||||
public static final String NAME = "date_histogram";
|
||||
|
@ -101,7 +101,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
private static final ObjectParser<DateHistogramAggregationBuilder, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ObjectParser<>(DateHistogramAggregationBuilder.NAME);
|
||||
ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true);
|
||||
ValuesSourceParserHelper.declareAnyFields(PARSER, true, true, true);
|
||||
|
||||
DateIntervalWrapper.declareIntervalFields(PARSER);
|
||||
|
||||
|
@ -137,7 +137,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
|
||||
/** Create a new builder with the given name. */
|
||||
public DateHistogramAggregationBuilder(String name) {
|
||||
super(name, ValuesSourceType.NUMERIC, ValueType.DATE);
|
||||
super(name, ValuesSourceType.ANY, ValueType.DATE);
|
||||
}
|
||||
|
||||
protected DateHistogramAggregationBuilder(DateHistogramAggregationBuilder clone,
|
||||
|
@ -158,7 +158,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
|
||||
/** Read from a stream, for internal use only. */
|
||||
public DateHistogramAggregationBuilder(StreamInput in) throws IOException {
|
||||
super(in, ValuesSourceType.NUMERIC, ValueType.DATE);
|
||||
super(in, ValuesSourceType.ANY, ValueType.DATE);
|
||||
order = InternalOrder.Streams.readHistogramOrder(in, true);
|
||||
keyed = in.readBoolean();
|
||||
minDocCount = in.readVLong();
|
||||
|
@ -167,6 +167,13 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
extendedBounds = in.readOptionalWriteable(ExtendedBounds::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ValuesSourceType resolveScriptAny(Script script) {
|
||||
// TODO: No idea how we'd support Range scripts here.
|
||||
return ValuesSourceType.NUMERIC;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
InternalOrder.Streams.writeHistogramOrder(order, out, true);
|
||||
|
@ -484,7 +491,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ValuesSourceAggregatorFactory<Numeric> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
|
||||
protected ValuesSourceAggregatorFactory<ValuesSource> innerBuild(SearchContext context, ValuesSourceConfig<ValuesSource> config,
|
||||
AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException {
|
||||
final ZoneId tz = timeZone();
|
||||
final Rounding rounding = dateHistogramInterval.createRounding(tz);
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.Rounding;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.BucketOrder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -36,7 +36,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public final class DateHistogramAggregatorFactory
|
||||
extends ValuesSourceAggregatorFactory<ValuesSource.Numeric> {
|
||||
extends ValuesSourceAggregatorFactory<ValuesSource> {
|
||||
|
||||
private final long offset;
|
||||
private final BucketOrder order;
|
||||
|
@ -46,7 +46,7 @@ public final class DateHistogramAggregatorFactory
|
|||
private final Rounding rounding;
|
||||
private final Rounding shardRounding;
|
||||
|
||||
public DateHistogramAggregatorFactory(String name, ValuesSourceConfig<Numeric> config,
|
||||
public DateHistogramAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config,
|
||||
long offset, BucketOrder order, boolean keyed, long minDocCount,
|
||||
Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, SearchContext context,
|
||||
AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder,
|
||||
|
@ -66,12 +66,34 @@ public final class DateHistogramAggregatorFactory
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
protected ValuesSource resolveMissingAny(Object missing) {
|
||||
if (missing instanceof Number) {
|
||||
return ValuesSource.Numeric.EMPTY;
|
||||
}
|
||||
throw new IllegalArgumentException("Only numeric missing values are supported for date histogram aggregation, found ["
|
||||
+ missing + "]");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
if (collectsFromSingleBucket == false) {
|
||||
return asMultiBucketAggregator(this, context, parent);
|
||||
}
|
||||
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
|
||||
if (valuesSource instanceof ValuesSource.Numeric) {
|
||||
return createAggregator((ValuesSource.Numeric) valuesSource, parent, pipelineAggregators, metaData);
|
||||
} else if (valuesSource instanceof ValuesSource.Range) {
|
||||
ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource;
|
||||
if (rangeValueSource.rangeType() != RangeType.DATE) {
|
||||
throw new IllegalArgumentException("Expected date range type but found range type [" + rangeValueSource.rangeType().name
|
||||
+ "]");
|
||||
}
|
||||
return createRangeAggregator((ValuesSource.Range) valuesSource, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("Expected one of [Date, Range] values source, found ["
|
||||
+ valuesSource.toString() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
|
@ -80,6 +102,13 @@ public final class DateHistogramAggregatorFactory
|
|||
valuesSource, config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
private Aggregator createRangeAggregator(ValuesSource.Range valuesSource, Aggregator parent,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
return new DateRangeHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds,
|
||||
valuesSource, config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Rounding;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.BucketOrder;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalOrder;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* An aggregator for date values. Every date is rounded down using a configured
|
||||
* {@link Rounding}.
|
||||
*
|
||||
* @see Rounding
|
||||
*/
|
||||
class DateRangeHistogramAggregator extends BucketsAggregator {
|
||||
|
||||
private final ValuesSource.Range valuesSource;
|
||||
private final DocValueFormat formatter;
|
||||
private final Rounding rounding;
|
||||
private final Rounding shardRounding;
|
||||
private final BucketOrder order;
|
||||
private final boolean keyed;
|
||||
|
||||
private final long minDocCount;
|
||||
private final ExtendedBounds extendedBounds;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
private long offset;
|
||||
|
||||
DateRangeHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding,
|
||||
long offset, BucketOrder order, boolean keyed,
|
||||
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Range valuesSource,
|
||||
DocValueFormat formatter, SearchContext aggregationContext,
|
||||
Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
|
||||
this.rounding = rounding;
|
||||
this.shardRounding = shardRounding;
|
||||
this.offset = offset;
|
||||
this.order = InternalOrder.validate(order, this);
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.extendedBounds = extendedBounds;
|
||||
this.valuesSource = valuesSource;
|
||||
this.formatter = formatter;
|
||||
|
||||
bucketOrds = new LongHash(1, aggregationContext.bigArrays());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScoreMode scoreMode() {
|
||||
if (valuesSource != null && valuesSource.needsScores()) {
|
||||
return ScoreMode.COMPLETE;
|
||||
}
|
||||
return super.scoreMode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
|
||||
final LeafBucketCollector sub) throws IOException {
|
||||
if (valuesSource == null) {
|
||||
return LeafBucketCollector.NO_OP_COLLECTOR;
|
||||
}
|
||||
final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
|
||||
final RangeType rangeType = valuesSource.rangeType();
|
||||
return new LeafBucketCollectorBase(sub, values) {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
if (values.advanceExact(doc)) {
|
||||
// Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc
|
||||
// values, so it isn't clear what we'd be iterating over.
|
||||
final int valuesCount = values.docValueCount();
|
||||
assert valuesCount == 1 : "Value count for ranges should always be 1";
|
||||
long previousKey = Long.MIN_VALUE;
|
||||
|
||||
for (int i = 0; i < valuesCount; i++) {
|
||||
BytesRef encodedRanges = values.nextValue();
|
||||
List<RangeFieldMapper.Range> ranges = rangeType.decodeRanges(encodedRanges);
|
||||
long previousFrom = Long.MIN_VALUE;
|
||||
for (RangeFieldMapper.Range range : ranges) {
|
||||
final Long from = (Long) range.getFrom();
|
||||
// The encoding should ensure that this assert is always true.
|
||||
assert from >= previousFrom : "Start of range not >= previous start";
|
||||
final Long to = (Long) range.getTo();
|
||||
final long startKey = offsetAwareRounding(shardRounding, from, offset);
|
||||
final long endKey = offsetAwareRounding(shardRounding, to, offset);
|
||||
for (long key = startKey > previousKey ? startKey : previousKey; key <= endKey;
|
||||
key = shardRounding.nextRoundingValue(key)) {
|
||||
if (key == previousKey) {
|
||||
continue;
|
||||
}
|
||||
// Bucket collection identical to NumericHistogramAggregator, could be refactored
|
||||
long bucketOrd = bucketOrds.add(key);
|
||||
if (bucketOrd < 0) { // already seen
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
collectExistingBucket(sub, doc, bucketOrd);
|
||||
} else {
|
||||
collectBucket(sub, doc, bucketOrd);
|
||||
}
|
||||
}
|
||||
if (endKey > previousKey) {
|
||||
previousKey = endKey;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private long offsetAwareRounding(Rounding rounding, long value, long offset) {
|
||||
return rounding.round(value - offset) + offset;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
|
||||
assert owningBucketOrdinal == 0;
|
||||
consumeBucketsAndMaybeBreak((int) bucketOrds.size());
|
||||
|
||||
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
|
||||
for (long i = 0; i < bucketOrds.size(); i++) {
|
||||
buckets.add(new InternalDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), keyed, formatter, bucketAggregations(i)));
|
||||
}
|
||||
|
||||
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
|
||||
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this));
|
||||
|
||||
// value source will be null for unmapped fields
|
||||
// Important: use `rounding` here, not `shardRounding`
|
||||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
|
||||
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
|
||||
: null;
|
||||
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doClose() {
|
||||
Releasables.close(bucketOrds);
|
||||
}
|
||||
}
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
|
@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.InternalOrder.CompoundOrder;
|
|||
import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
|
@ -48,9 +48,10 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A builder for histograms on numeric fields.
|
||||
* A builder for histograms on numeric fields. This builder can operate on either base numeric fields, or numeric range fields. IP range
|
||||
* fields are unsupported, and will throw at the factory layer.
|
||||
*/
|
||||
public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource.Numeric, HistogramAggregationBuilder>
|
||||
public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<ValuesSource, HistogramAggregationBuilder>
|
||||
implements MultiBucketAggregationBuilder {
|
||||
public static final String NAME = "histogram";
|
||||
|
||||
|
@ -65,7 +66,7 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<
|
|||
private static final ObjectParser<HistogramAggregationBuilder, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ObjectParser<>(HistogramAggregationBuilder.NAME);
|
||||
ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, false);
|
||||
ValuesSourceParserHelper.declareAnyFields(PARSER, true, true);
|
||||
|
||||
PARSER.declareDouble(HistogramAggregationBuilder::interval, Histogram.INTERVAL_FIELD);
|
||||
|
||||
|
@ -95,9 +96,15 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<
|
|||
private boolean keyed = false;
|
||||
private long minDocCount = 0;
|
||||
|
||||
@Override
|
||||
protected ValuesSourceType resolveScriptAny(Script script) {
|
||||
// TODO: No idea how we'd support Range scripts here.
|
||||
return ValuesSourceType.NUMERIC;
|
||||
}
|
||||
|
||||
/** Create a new builder with the given name. */
|
||||
public HistogramAggregationBuilder(String name) {
|
||||
super(name, ValuesSourceType.NUMERIC, ValueType.DOUBLE);
|
||||
super(name, ValuesSourceType.ANY, ValueType.NUMERIC);
|
||||
}
|
||||
|
||||
protected HistogramAggregationBuilder(HistogramAggregationBuilder clone, Builder factoriesBuilder, Map<String, Object> metaData) {
|
||||
|
@ -118,7 +125,7 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<
|
|||
|
||||
/** Read from a stream, for internal use only. */
|
||||
public HistogramAggregationBuilder(StreamInput in) throws IOException {
|
||||
super(in, ValuesSourceType.NUMERIC, ValueType.DOUBLE);
|
||||
super(in, ValuesSourceType.ANY, ValueType.NUMERIC);
|
||||
order = InternalOrder.Streams.readHistogramOrder(in, true);
|
||||
keyed = in.readBoolean();
|
||||
minDocCount = in.readVLong();
|
||||
|
@ -295,7 +302,7 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ValuesSourceAggregatorFactory<Numeric> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
|
||||
protected ValuesSourceAggregatorFactory<ValuesSource> innerBuild(SearchContext context, ValuesSourceConfig<ValuesSource> config,
|
||||
AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException {
|
||||
return new HistogramAggregatorFactory(name, config, interval, offset, order, keyed, minDocCount, minBound, maxBound,
|
||||
context, parent, subFactoriesBuilder, metaData);
|
||||
|
|
|
@ -22,10 +22,9 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.BucketOrder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -34,7 +33,11 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource.Numeric> {
|
||||
/**
|
||||
* Constructs the per-shard aggregator instance for histogram aggregation. Selects the numeric or range field implementation based on the
|
||||
* field type.
|
||||
*/
|
||||
public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource> {
|
||||
|
||||
private final double interval, offset;
|
||||
private final BucketOrder order;
|
||||
|
@ -42,10 +45,19 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
|
|||
private final long minDocCount;
|
||||
private final double minBound, maxBound;
|
||||
|
||||
public HistogramAggregatorFactory(String name, ValuesSourceConfig<Numeric> config, double interval, double offset,
|
||||
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
SearchContext context, AggregatorFactory parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
@Override
|
||||
protected ValuesSource resolveMissingAny(Object missing) {
|
||||
if (missing instanceof Number) {
|
||||
return ValuesSource.Numeric.EMPTY;
|
||||
}
|
||||
throw new IllegalArgumentException("Only numeric missing values are supported for histogram aggregation, found ["
|
||||
+ missing + "]");
|
||||
}
|
||||
|
||||
public HistogramAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, double interval, double offset,
|
||||
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
SearchContext context, AggregatorFactory parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException {
|
||||
super(name, config, context, parent, subFactoriesBuilder, metaData);
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
|
@ -61,24 +73,34 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
if (collectsFromSingleBucket == false) {
|
||||
return asMultiBucketAggregator(this, context, parent);
|
||||
}
|
||||
return createAggregator(valuesSource, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) throws IOException {
|
||||
|
||||
return new HistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, valuesSource,
|
||||
config.format(), context, parent, pipelineAggregators, metaData);
|
||||
if (valuesSource instanceof ValuesSource.Numeric) {
|
||||
return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound,
|
||||
(ValuesSource.Numeric) valuesSource, config.format(), context, parent, pipelineAggregators, metaData);
|
||||
} else if (valuesSource instanceof ValuesSource.Range) {
|
||||
ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource;
|
||||
if (rangeValueSource.rangeType().isNumeric() == false) {
|
||||
throw new IllegalArgumentException("Expected numeric range type but found non-numeric range ["
|
||||
+ rangeValueSource.rangeType().name + "]");
|
||||
}
|
||||
return new RangeHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound,
|
||||
(ValuesSource.Range) valuesSource, config.format(), context, parent, pipelineAggregators,
|
||||
metaData);
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("Expected one of [Numeric, Range] values source, found ["
|
||||
+ valuesSource.toString() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Aggregator createUnmapped(Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||
throws IOException {
|
||||
return createAggregator(null, parent, pipelineAggregators, metaData);
|
||||
return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound,
|
||||
null, config.format(), context, parent, pipelineAggregators, metaData);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ import java.util.Map;
|
|||
* written as {@code interval * x + offset} and yet is less than or equal to
|
||||
* {@code value}.
|
||||
*/
|
||||
class HistogramAggregator extends BucketsAggregator {
|
||||
class NumericHistogramAggregator extends BucketsAggregator {
|
||||
|
||||
private final ValuesSource.Numeric valuesSource;
|
||||
private final DocValueFormat formatter;
|
||||
|
@ -64,11 +64,11 @@ class HistogramAggregator extends BucketsAggregator {
|
|||
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
HistogramAggregator(String name, AggregatorFactories factories, double interval, double offset,
|
||||
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
@Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter,
|
||||
SearchContext context, Aggregator parent,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
NumericHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset,
|
||||
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
@Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter,
|
||||
SearchContext context, Aggregator parent,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, context, parent, pipelineAggregators, metaData);
|
||||
if (interval <= 0) {
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.aggregations.BucketOrder;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalOrder;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class RangeHistogramAggregator extends BucketsAggregator {
|
||||
private final ValuesSource.Range valuesSource;
|
||||
private final DocValueFormat formatter;
|
||||
private final double interval, offset;
|
||||
private final BucketOrder order;
|
||||
private final boolean keyed;
|
||||
private final long minDocCount;
|
||||
private final double minBound, maxBound;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
|
||||
RangeHistogramAggregator(String name, AggregatorFactories factories, double interval, double offset,
|
||||
BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound,
|
||||
@Nullable ValuesSource.Range valuesSource, DocValueFormat formatter,
|
||||
SearchContext context, Aggregator parent,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||
|
||||
super(name, factories, context, parent, pipelineAggregators, metaData);
|
||||
if (interval <= 0) {
|
||||
throw new IllegalArgumentException("interval must be positive, got: " + interval);
|
||||
}
|
||||
this.interval = interval;
|
||||
this.offset = offset;
|
||||
this.order = InternalOrder.validate(order, this);
|
||||
this.keyed = keyed;
|
||||
this.minDocCount = minDocCount;
|
||||
this.minBound = minBound;
|
||||
this.maxBound = maxBound;
|
||||
this.valuesSource = valuesSource;
|
||||
this.formatter = formatter;
|
||||
|
||||
bucketOrds = new LongHash(1, context.bigArrays());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
|
||||
if (valuesSource == null) {
|
||||
return LeafBucketCollector.NO_OP_COLLECTOR;
|
||||
}
|
||||
final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
|
||||
final RangeType rangeType = valuesSource.rangeType();
|
||||
return new LeafBucketCollectorBase(sub, values) {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
if (values.advanceExact(doc)) {
|
||||
// Is it possible for valuesCount to be > 1 here? Multiple ranges are encoded into the same BytesRef in the binary doc
|
||||
// values, so it isn't clear what we'd be iterating over.
|
||||
final int valuesCount = values.docValueCount();
|
||||
assert valuesCount == 1 : "Value count for ranges should always be 1";
|
||||
double previousKey = Double.NEGATIVE_INFINITY;
|
||||
|
||||
for (int i = 0; i < valuesCount; i++) {
|
||||
BytesRef encodedRanges = values.nextValue();
|
||||
List<RangeFieldMapper.Range> ranges = rangeType.decodeRanges(encodedRanges);
|
||||
double previousFrom = Double.NEGATIVE_INFINITY;
|
||||
for (RangeFieldMapper.Range range : ranges) {
|
||||
final Double from = rangeType.doubleValue(range.getFrom());
|
||||
// The encoding should ensure that this assert is always true.
|
||||
assert from >= previousFrom : "Start of range not >= previous start";
|
||||
final Double to = rangeType.doubleValue(range.getTo());
|
||||
final double startKey = Math.floor((from - offset) / interval);
|
||||
final double endKey = Math.floor((to - offset) / interval);
|
||||
for (double key = startKey > previousKey ? startKey : previousKey; key <= endKey; key++) {
|
||||
if (key == previousKey) {
|
||||
continue;
|
||||
}
|
||||
// Bucket collection identical to NumericHistogramAggregator, could be refactored
|
||||
long bucketOrd = bucketOrds.add(Double.doubleToLongBits(key));
|
||||
if (bucketOrd < 0) { // already seen
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
collectExistingBucket(sub, doc, bucketOrd);
|
||||
} else {
|
||||
collectBucket(sub, doc, bucketOrd);
|
||||
}
|
||||
}
|
||||
if (endKey > previousKey) {
|
||||
previousKey = endKey;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// TODO: buildAggregation and buildEmptyAggregation are literally just copied out of NumericHistogramAggregator. We could refactor
|
||||
// this to an abstract super class, if we wanted to. Might be overkill.
|
||||
@Override
|
||||
public InternalAggregation buildAggregation(long bucket) throws IOException {
|
||||
assert bucket == 0;
|
||||
consumeBucketsAndMaybeBreak((int) bucketOrds.size());
|
||||
List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
|
||||
for (long i = 0; i < bucketOrds.size(); i++) {
|
||||
double roundKey = Double.longBitsToDouble(bucketOrds.get(i));
|
||||
double key = roundKey * interval + offset;
|
||||
buckets.add(new InternalHistogram.Bucket(key, bucketDocCount(i), keyed, formatter, bucketAggregations(i)));
|
||||
}
|
||||
|
||||
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
|
||||
CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this));
|
||||
|
||||
InternalHistogram.EmptyBucketInfo emptyBucketInfo = null;
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
|
||||
}
|
||||
return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(),
|
||||
metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InternalAggregation buildEmptyAggregation() {
|
||||
InternalHistogram.EmptyBucketInfo emptyBucketInfo = null;
|
||||
if (minDocCount == 0) {
|
||||
emptyBucketInfo = new InternalHistogram.EmptyBucketInfo(interval, offset, minBound, maxBound, buildEmptySubAggregations());
|
||||
}
|
||||
return new InternalHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed,
|
||||
pipelineAggregators(), metaData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doClose() {
|
||||
Releasables.close(bucketOrds);
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.missing;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
|
@ -78,7 +79,7 @@ public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder<Va
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.significant;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -155,7 +156,7 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -94,7 +95,7 @@ public class RareTermsAggregationBuilder extends ValuesSourceAggregationBuilder<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -140,7 +141,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Valu
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -96,7 +97,7 @@ public final class CardinalityAggregationBuilder
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
|
@ -78,7 +79,7 @@ public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder
|
|||
}
|
||||
|
||||
@Override
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,11 @@ public enum MissingValues {
|
|||
SortedBinaryDocValues values = valuesSource.bytesValues(context);
|
||||
return replaceMissing(values, missing);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon ValuesSource.Bytes of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -82,6 +87,10 @@ public enum MissingValues {
|
|||
return missing;
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon SortedBinaryDocValues of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -111,6 +120,10 @@ public enum MissingValues {
|
|||
final SortedNumericDoubleValues values = valuesSource.doubleValues(context);
|
||||
return replaceMissing(values, missing.doubleValue());
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon ValuesSource.Numeric of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -145,6 +158,11 @@ public enum MissingValues {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon SortedNumericDocValues of [" + super.toString() + "]";
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -179,6 +197,11 @@ public enum MissingValues {
|
|||
return count == 0 ? 1 : count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon SortedNumericDoubleValues of [" + super.toString() + "]";
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -209,6 +232,12 @@ public enum MissingValues {
|
|||
valuesSource.globalOrdinalsValues(context),
|
||||
valuesSource.globalOrdinalsMapping(context), missing);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon ValuesSource.Bytes.WithOrdinals of [" + super.toString() + "]";
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -263,6 +292,12 @@ public enum MissingValues {
|
|||
// the document does not have a value
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon AbstractSortedDocValues of [" + super.toString() + "]";
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -316,6 +351,11 @@ public enum MissingValues {
|
|||
// the document does not have a value
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon AbstractSortedDocValues of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -369,6 +409,11 @@ public enum MissingValues {
|
|||
final MultiGeoPointValues values = valuesSource.geoPointValues(context);
|
||||
return replaceMissing(values, missing);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon ValuesSource.GeoPoint of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -402,6 +447,11 @@ public enum MissingValues {
|
|||
return missing;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "anon MultiGeoPointValues of [" + super.toString() + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.BinaryDVIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
|
||||
|
@ -48,7 +49,8 @@ public enum ValueType implements Writeable {
|
|||
// TODO: what is the difference between "number" and "numeric"?
|
||||
NUMERIC((byte) 7, "numeric", "numeric", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW),
|
||||
GEOPOINT((byte) 8, "geo_point", "geo_point", ValuesSourceType.GEOPOINT, IndexGeoPointFieldData.class, DocValueFormat.GEOHASH),
|
||||
BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN);
|
||||
BOOLEAN((byte) 9, "boolean", "boolean", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.BOOLEAN),
|
||||
RANGE((byte) 10, "range", "range", ValuesSourceType.RANGE, BinaryDVIndexFieldData.class, DocValueFormat.RAW);
|
||||
|
||||
final String description;
|
||||
final ValuesSourceType valuesSourceType;
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
|||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||
import org.elasticsearch.index.fielddata.SortingBinaryDocValues;
|
||||
import org.elasticsearch.index.fielddata.SortingNumericDoubleValues;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.script.AggregationScript;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSource.WithScript.BytesValues;
|
||||
import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues;
|
||||
|
@ -65,6 +66,28 @@ public abstract class ValuesSource {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static class Range extends ValuesSource {
|
||||
private final RangeType rangeType;
|
||||
protected final IndexFieldData<?> indexFieldData;
|
||||
|
||||
public Range(IndexFieldData<?> indexFieldData, RangeType rangeType) {
|
||||
this.indexFieldData = indexFieldData;
|
||||
this.rangeType = rangeType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedBinaryDocValues bytesValues(LeafReaderContext context) {
|
||||
return indexFieldData.load(context).getBytesValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocValueBits docsWithValue(LeafReaderContext context) throws IOException {
|
||||
final SortedBinaryDocValues bytes = bytesValues(context);
|
||||
return org.elasticsearch.index.fielddata.FieldData.docsWithValue(bytes);
|
||||
}
|
||||
|
||||
public RangeType rangeType() { return rangeType; }
|
||||
}
|
||||
public abstract static class Bytes extends ValuesSource {
|
||||
|
||||
@Override
|
||||
|
@ -193,6 +216,7 @@ public abstract class ValuesSource {
|
|||
public SortedBinaryDocValues bytesValues(LeafReaderContext context) {
|
||||
return indexFieldData.load(context).getBytesValues();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class Script extends Bytes {
|
||||
|
|
|
@ -63,7 +63,7 @@ public abstract class ValuesSourceAggregationBuilder<VS extends ValuesSource, AB
|
|||
|
||||
/**
|
||||
* Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override
|
||||
* {@link #serializeTargetValueType()} to return true.
|
||||
* {@link #serializeTargetValueType(Version)} to return true.
|
||||
*/
|
||||
protected LeafOnly(StreamInput in, ValuesSourceType valuesSourceType) throws IOException {
|
||||
super(in, valuesSourceType);
|
||||
|
@ -110,24 +110,31 @@ public abstract class ValuesSourceAggregationBuilder<VS extends ValuesSource, AB
|
|||
}
|
||||
|
||||
/**
|
||||
* Read an aggregation from a stream that does not serialize its targetValueType. This should be used by most subclasses.
|
||||
* Read an aggregation from a stream that has a sensible default for TargetValueType. This should be used by most subclasses.
|
||||
* Subclasses needing to maintain backward compatibility to a version that did not serialize TargetValueType should use this
|
||||
* constructor, providing the old, constant value for TargetValueType and override {@link #serializeTargetValueType(Version)} to return
|
||||
* true only for versions that support the serialization.
|
||||
*/
|
||||
protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType, ValueType targetValueType)
|
||||
throws IOException {
|
||||
super(in);
|
||||
assert false == serializeTargetValueType() : "Wrong read constructor called for subclass that provides its targetValueType";
|
||||
this.valuesSourceType = valuesSourceType;
|
||||
this.targetValueType = targetValueType;
|
||||
if (serializeTargetValueType(in.getVersion())) {
|
||||
this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream);
|
||||
} else {
|
||||
this.targetValueType = targetValueType;
|
||||
}
|
||||
read(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an aggregation from a stream that serializes its targetValueType. This should only be used by subclasses that override
|
||||
* {@link #serializeTargetValueType()} to return true.
|
||||
* {@link #serializeTargetValueType(Version)} to return true.
|
||||
*/
|
||||
protected ValuesSourceAggregationBuilder(StreamInput in, ValuesSourceType valuesSourceType) throws IOException {
|
||||
super(in);
|
||||
assert serializeTargetValueType() : "Wrong read constructor called for subclass that serializes its targetValueType";
|
||||
// TODO: Can we get rid of this constructor and always use the three value version? Does this assert provide any value?
|
||||
assert serializeTargetValueType(in.getVersion()) : "Wrong read constructor called for subclass that serializes its targetValueType";
|
||||
this.valuesSourceType = valuesSourceType;
|
||||
this.targetValueType = in.readOptionalWriteable(ValueType::readFromStream);
|
||||
read(in);
|
||||
|
@ -155,7 +162,7 @@ public abstract class ValuesSourceAggregationBuilder<VS extends ValuesSource, AB
|
|||
|
||||
@Override
|
||||
protected final void doWriteTo(StreamOutput out) throws IOException {
|
||||
if (serializeTargetValueType()) {
|
||||
if (serializeTargetValueType(out.getVersion())) {
|
||||
out.writeOptionalWriteable(targetValueType);
|
||||
}
|
||||
out.writeOptionalString(field);
|
||||
|
@ -187,8 +194,9 @@ public abstract class ValuesSourceAggregationBuilder<VS extends ValuesSource, AB
|
|||
/**
|
||||
* Should this builder serialize its targetValueType? Defaults to false. All subclasses that override this to true should use the three
|
||||
* argument read constructor rather than the four argument version.
|
||||
* @param version For backwards compatibility, subclasses can change behavior based on the version
|
||||
*/
|
||||
protected boolean serializeTargetValueType() {
|
||||
protected boolean serializeTargetValueType(Version version) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -316,10 +324,31 @@ public abstract class ValuesSourceAggregationBuilder<VS extends ValuesSource, AB
|
|||
return factory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide a hook for aggregations to have finer grained control of the ValuesSourceType for script values. This will only be called if
|
||||
* the user did not supply a type hint for the script. The script object is provided for reference.
|
||||
*
|
||||
* @param script - The user supplied script
|
||||
* @return The ValuesSourceType we expect this script to yield.
|
||||
*/
|
||||
protected ValuesSourceType resolveScriptAny(Script script) {
|
||||
return ValuesSourceType.BYTES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide a hook for aggregations to have finer grained control of the ValueType for script values. This will only be called if the
|
||||
* user did not supply a type hint for the script. The script object is provided for reference
|
||||
* @param script - the user supplied script
|
||||
* @return The ValueType we expect this script to yield
|
||||
*/
|
||||
protected ValueType defaultValueType(Script script) {
|
||||
return valueType;
|
||||
}
|
||||
|
||||
protected ValuesSourceConfig<VS> resolveConfig(SearchContext context) {
|
||||
ValueType valueType = this.valueType != null ? this.valueType : targetValueType;
|
||||
return ValuesSourceConfig.resolve(context.getQueryShardContext(),
|
||||
valueType, field, script, missing, timeZone, format);
|
||||
valueType, field, script, missing, timeZone, format, this::resolveScriptAny);
|
||||
}
|
||||
|
||||
protected abstract ValuesSourceAggregatorFactory<VS> innerBuild(SearchContext context, ValuesSourceConfig<VS> config,
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
|||
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.script.AggregationScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -48,12 +49,27 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
* Resolve a {@link ValuesSourceConfig} given configuration parameters.
|
||||
*/
|
||||
public static <VS extends ValuesSource> ValuesSourceConfig<VS> resolve(
|
||||
QueryShardContext context,
|
||||
ValueType valueType,
|
||||
String field, Script script,
|
||||
Object missing,
|
||||
ZoneId timeZone,
|
||||
String format) {
|
||||
QueryShardContext context,
|
||||
ValueType valueType,
|
||||
String field, Script script,
|
||||
Object missing,
|
||||
ZoneId timeZone,
|
||||
String format) {
|
||||
return resolve(context, valueType, field, script, missing, timeZone, format, s -> ValuesSourceType.BYTES);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a {@link ValuesSourceConfig} given configuration parameters.
|
||||
*/
|
||||
public static <VS extends ValuesSource> ValuesSourceConfig<VS> resolve(
|
||||
QueryShardContext context,
|
||||
ValueType valueType,
|
||||
String field, Script script,
|
||||
Object missing,
|
||||
ZoneId timeZone,
|
||||
String format,
|
||||
Function<Script, ValuesSourceType> resolveScriptAny
|
||||
) {
|
||||
|
||||
if (field == null) {
|
||||
if (script == null) {
|
||||
|
@ -67,7 +83,7 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
// we need to have a specific value source
|
||||
// type to know how to handle the script values, so we fallback
|
||||
// on Bytes
|
||||
valuesSourceType = ValuesSourceType.BYTES;
|
||||
valuesSourceType = resolveScriptAny.apply(script);
|
||||
}
|
||||
ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(valuesSourceType);
|
||||
config.missing(missing);
|
||||
|
@ -96,18 +112,21 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
IndexFieldData<?> indexFieldData = context.getForField(fieldType);
|
||||
|
||||
ValuesSourceConfig<VS> config;
|
||||
if (valueType == null) {
|
||||
if (indexFieldData instanceof IndexNumericFieldData) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC);
|
||||
} else if (indexFieldData instanceof IndexGeoPointFieldData) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT);
|
||||
} else {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.BYTES);
|
||||
}
|
||||
if (indexFieldData instanceof IndexNumericFieldData) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC);
|
||||
} else if (indexFieldData instanceof IndexGeoPointFieldData) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT);
|
||||
} else if (fieldType instanceof RangeFieldMapper.RangeFieldType) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.RANGE);
|
||||
} else {
|
||||
config = new ValuesSourceConfig<>(valueType.getValuesSourceType());
|
||||
if (valueType == null) {
|
||||
config = new ValuesSourceConfig<>(ValuesSourceType.BYTES);
|
||||
} else {
|
||||
config = new ValuesSourceConfig<>(valueType.getValuesSourceType());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
config.fieldContext(new FieldContext(field, indexFieldData, fieldType));
|
||||
config.missing(missing);
|
||||
config.timezone(timeZone);
|
||||
|
@ -303,6 +322,9 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
if (valueSourceType() == ValuesSourceType.GEOPOINT) {
|
||||
return (VS) geoPointField();
|
||||
}
|
||||
if (valueSourceType() == ValuesSourceType.RANGE) {
|
||||
return (VS) rangeField();
|
||||
}
|
||||
// falling back to bytes values
|
||||
return (VS) bytesField();
|
||||
}
|
||||
|
@ -352,4 +374,14 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
|
||||
return new ValuesSource.GeoPoint.Fielddata((IndexGeoPointFieldData) fieldContext().indexFieldData());
|
||||
}
|
||||
|
||||
private ValuesSource rangeField() {
|
||||
MappedFieldType fieldType = fieldContext.fieldType();
|
||||
|
||||
if (fieldType instanceof RangeFieldMapper.RangeFieldType == false) {
|
||||
throw new IllegalStateException("Asked for range ValuesSource, but field is of type " + fieldType.name());
|
||||
}
|
||||
RangeFieldMapper.RangeFieldType rangeFieldType = (RangeFieldMapper.RangeFieldType)fieldType;
|
||||
return new ValuesSource.Range(fieldContext().indexFieldData(), rangeFieldType.rangeType());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,9 +34,15 @@ public final class ValuesSourceParserHelper {
|
|||
private ValuesSourceParserHelper() {} // utility class, no instantiation
|
||||
|
||||
public static <T> void declareAnyFields(
|
||||
AbstractObjectParser<? extends ValuesSourceAggregationBuilder<ValuesSource, ?>, T> objectParser,
|
||||
boolean scriptable, boolean formattable) {
|
||||
declareFields(objectParser, scriptable, formattable, false, null);
|
||||
AbstractObjectParser<? extends ValuesSourceAggregationBuilder<ValuesSource, ?>, T> objectParser,
|
||||
boolean scriptable, boolean formattable) {
|
||||
declareAnyFields(objectParser, scriptable, formattable, false);
|
||||
}
|
||||
|
||||
public static <T> void declareAnyFields(
|
||||
AbstractObjectParser<? extends ValuesSourceAggregationBuilder<ValuesSource, ?>, T> objectParser,
|
||||
boolean scriptable, boolean formattable, boolean timezoneAware) {
|
||||
declareFields(objectParser, scriptable, formattable, timezoneAware, null);
|
||||
}
|
||||
|
||||
public static <T> void declareNumericFields(
|
||||
|
|
|
@ -30,7 +30,8 @@ public enum ValuesSourceType implements Writeable {
|
|||
ANY,
|
||||
NUMERIC,
|
||||
BYTES,
|
||||
GEOPOINT;
|
||||
GEOPOINT,
|
||||
RANGE;
|
||||
|
||||
public static ValuesSourceType fromString(String name) {
|
||||
return valueOf(name.trim().toUpperCase(Locale.ROOT));
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.search.BaseRangeFieldQueryTestCase;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -84,7 +85,7 @@ public abstract class BaseRandomBinaryDocValuesRangeQueryTestCase extends BaseRa
|
|||
|
||||
protected abstract String fieldName();
|
||||
|
||||
protected abstract RangeFieldMapper.RangeType rangeType();
|
||||
protected abstract RangeType rangeType();
|
||||
|
||||
protected abstract static class AbstractRange<T> extends Range {
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -41,7 +42,7 @@ public class BinaryDocValuesRangeQueryTests extends ESTestCase {
|
|||
|
||||
public void testBasics() throws Exception {
|
||||
String fieldName = "long_field";
|
||||
RangeFieldMapper.RangeType rangeType = RangeFieldMapper.RangeType.LONG;
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory()) {
|
||||
try (RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
|
||||
// intersects (within)
|
||||
|
@ -127,7 +128,7 @@ public class BinaryDocValuesRangeQueryTests extends ESTestCase {
|
|||
|
||||
public void testNoField() throws IOException {
|
||||
String fieldName = "long_field";
|
||||
RangeFieldMapper.RangeType rangeType = RangeFieldMapper.RangeType.LONG;
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
|
||||
// no field in index
|
||||
try (Directory dir = newDirectory()) {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
public class DoubleRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase {
|
||||
|
||||
|
@ -28,8 +28,8 @@ public class DoubleRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinary
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RangeFieldMapper.RangeType rangeType() {
|
||||
return RangeFieldMapper.RangeType.DOUBLE;
|
||||
protected RangeType rangeType() {
|
||||
return RangeType.DOUBLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
public class FloatRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase {
|
||||
|
||||
|
@ -28,8 +28,8 @@ public class FloatRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryD
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RangeFieldMapper.RangeType rangeType() {
|
||||
return RangeFieldMapper.RangeType.FLOAT;
|
||||
protected RangeType rangeType() {
|
||||
return RangeType.FLOAT;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.lucene.queries;
|
|||
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
import org.apache.lucene.util.FutureArrays;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
|
@ -34,8 +34,8 @@ public class InetAddressRandomBinaryDocValuesRangeQueryTests extends BaseRandomB
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RangeFieldMapper.RangeType rangeType() {
|
||||
return RangeFieldMapper.RangeType.IP;
|
||||
protected RangeType rangeType() {
|
||||
return RangeType.IP;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
public class IntegerRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase {
|
||||
|
||||
|
@ -29,8 +29,8 @@ public class IntegerRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinar
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RangeFieldMapper.RangeType rangeType() {
|
||||
return RangeFieldMapper.RangeType.INTEGER;
|
||||
protected RangeType rangeType() {
|
||||
return RangeType.INTEGER;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
|
||||
public class LongRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDocValuesRangeQueryTestCase {
|
||||
|
||||
|
@ -29,8 +29,8 @@ public class LongRandomBinaryDocValuesRangeQueryTests extends BaseRandomBinaryDo
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RangeFieldMapper.RangeType rangeType() {
|
||||
return RangeFieldMapper.RangeType.LONG;
|
||||
protected RangeType rangeType() {
|
||||
return RangeType.LONG;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,8 +19,14 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
public class BinaryRangeUtilTests extends ESTestCase {
|
||||
|
||||
public void testBasics() {
|
||||
|
@ -140,6 +146,81 @@ public class BinaryRangeUtilTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDecodeLong() {
|
||||
long[] cases = new long[] { Long.MIN_VALUE, -2049, -2048, -128, -3, -1, 0, 1, 3, 125, 2048, 2049, Long.MAX_VALUE};
|
||||
for (long expected : cases) {
|
||||
byte[] encoded = BinaryRangeUtil.encodeLong(expected);
|
||||
int offset = 0;
|
||||
int length = RangeType.LengthType.VARIABLE.readLength(encoded, offset);
|
||||
assertEquals(expected, BinaryRangeUtil.decodeLong(encoded, offset, length));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDecodeLongRanges() throws IOException {
|
||||
int iters = randomIntBetween(32, 1024);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
long start = randomLong();
|
||||
long end = randomLongBetween(start + 1, Long.MAX_VALUE);
|
||||
RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.LONG, start, end, true, true);
|
||||
List<RangeFieldMapper.Range> decoded = BinaryRangeUtil.decodeLongRanges(BinaryRangeUtil.encodeLongRanges(singleton(expected)));
|
||||
assertEquals(1, decoded.size());
|
||||
RangeFieldMapper.Range actual = decoded.get(0);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDecodeDoubleRanges() throws IOException {
|
||||
int iters = randomIntBetween(32, 1024);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
double start = randomDouble();
|
||||
double end = randomDoubleBetween(Math.nextUp(start), Double.MAX_VALUE, false);
|
||||
RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.DOUBLE, start, end, true, true);
|
||||
List<RangeFieldMapper.Range> decoded = BinaryRangeUtil.decodeDoubleRanges(BinaryRangeUtil.encodeDoubleRanges(
|
||||
singleton(expected)));
|
||||
assertEquals(1, decoded.size());
|
||||
RangeFieldMapper.Range actual = decoded.get(0);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDecodeFloatRanges() throws IOException {
|
||||
int iters = randomIntBetween(32, 1024);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
float start = randomFloat();
|
||||
// for some reason, ESTestCase doesn't provide randomFloatBetween
|
||||
float end = randomFloat();
|
||||
if (start > end) {
|
||||
float temp = start;
|
||||
start = end;
|
||||
end = temp;
|
||||
}
|
||||
RangeFieldMapper.Range expected = new RangeFieldMapper.Range(RangeType.FLOAT, start, end, true, true);
|
||||
List<RangeFieldMapper.Range> decoded = BinaryRangeUtil.decodeFloatRanges(BinaryRangeUtil.encodeFloatRanges(
|
||||
singleton(expected)));
|
||||
assertEquals(1, decoded.size());
|
||||
RangeFieldMapper.Range actual = decoded.get(0);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDecodeIPRanges() throws IOException {
|
||||
RangeFieldMapper.Range[] cases = {
|
||||
createIPRange("192.168.0.1", "192.168.0.100"),
|
||||
createIPRange("::ffff:c0a8:107", "2001:db8::")
|
||||
};
|
||||
for (RangeFieldMapper.Range expected : cases) {
|
||||
List<RangeFieldMapper.Range> decoded = BinaryRangeUtil.decodeIPRanges(BinaryRangeUtil.encodeIPRanges(singleton(expected)));
|
||||
assertEquals(1, decoded.size());
|
||||
RangeFieldMapper.Range actual = decoded.get(0);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
}
|
||||
|
||||
private RangeFieldMapper.Range createIPRange(String start, String end) {
|
||||
return new RangeFieldMapper.Range(RangeType.IP, InetAddresses.forString(start), InetAddresses.forString(end),
|
||||
true, true);
|
||||
}
|
||||
|
||||
private static int normalize(int cmp) {
|
||||
if (cmp < 0) {
|
||||
return -1;
|
||||
|
|
|
@ -416,7 +416,7 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||
|
||||
public void testIllegalArguments() throws Exception {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", RangeFieldMapper.RangeType.INTEGER.name)
|
||||
.startObject("properties").startObject("field").field("type", RangeType.INTEGER.name)
|
||||
.field("format", DATE_FORMAT).endObject().endObject().endObject().endObject();
|
||||
|
||||
ThrowingRunnable runnable = () -> parser.parse("type", new CompressedXContent(Strings.toString(mapping)));
|
||||
|
|
|
@ -69,7 +69,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
public void testIntegerRangeQuery() throws Exception {
|
||||
Query query = new QueryStringQueryBuilder(INTEGER_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext());
|
||||
Query range = IntRange.newIntersectsQuery(INTEGER_RANGE_FIELD_NAME, new int[]{-450}, new int[]{45000});
|
||||
Query dv = RangeFieldMapper.RangeType.INTEGER.dvRangeQuery(INTEGER_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.INTEGER.dvRangeQuery(INTEGER_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450, 45000, true, true);
|
||||
assertEquals(new IndexOrDocValuesQuery(range, dv), query);
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
public void testLongRangeQuery() throws Exception {
|
||||
Query query = new QueryStringQueryBuilder(LONG_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext());
|
||||
Query range = LongRange.newIntersectsQuery(LONG_RANGE_FIELD_NAME, new long[]{-450}, new long[]{45000});
|
||||
Query dv = RangeFieldMapper.RangeType.LONG.dvRangeQuery(LONG_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.LONG.dvRangeQuery(LONG_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450, 45000, true, true);
|
||||
assertEquals(new IndexOrDocValuesQuery(range, dv), query);
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
public void testFloatRangeQuery() throws Exception {
|
||||
Query query = new QueryStringQueryBuilder(FLOAT_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext());
|
||||
Query range = FloatRange.newIntersectsQuery(FLOAT_RANGE_FIELD_NAME, new float[]{-450}, new float[]{45000});
|
||||
Query dv = RangeFieldMapper.RangeType.FLOAT.dvRangeQuery(FLOAT_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.FLOAT.dvRangeQuery(FLOAT_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450.0f, 45000.0f, true, true);
|
||||
assertEquals(new IndexOrDocValuesQuery(range, dv), query);
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
public void testDoubleRangeQuery() throws Exception {
|
||||
Query query = new QueryStringQueryBuilder(DOUBLE_RANGE_FIELD_NAME + ":[-450 TO 45000]").toQuery(createShardContext());
|
||||
Query range = DoubleRange.newIntersectsQuery(DOUBLE_RANGE_FIELD_NAME, new double[]{-450}, new double[]{45000});
|
||||
Query dv = RangeFieldMapper.RangeType.DOUBLE.dvRangeQuery(DOUBLE_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.DOUBLE.dvRangeQuery(DOUBLE_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS, -450.0, 45000.0, true, true);
|
||||
assertEquals(new IndexOrDocValuesQuery(range, dv), query);
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
Query range = LongRange.newIntersectsQuery(DATE_RANGE_FIELD_NAME,
|
||||
new long[]{ parser.parse("2010-01-01", () -> 0).toEpochMilli()},
|
||||
new long[]{ parser.parse("2018-01-01", () -> 0).toEpochMilli()});
|
||||
Query dv = RangeFieldMapper.RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS,
|
||||
parser.parse("2010-01-01", () -> 0).toEpochMilli(),
|
||||
parser.parse("2018-01-01", () -> 0).toEpochMilli(), true, true);
|
||||
|
@ -118,7 +118,7 @@ public class RangeFieldQueryStringQueryBuilderTests extends AbstractQueryTestCas
|
|||
InetAddress upper = InetAddresses.forString("192.168.0.5");
|
||||
Query query = new QueryStringQueryBuilder(IP_RANGE_FIELD_NAME + ":[192.168.0.1 TO 192.168.0.5]").toQuery(createShardContext());
|
||||
Query range = InetAddressRange.newIntersectsQuery(IP_RANGE_FIELD_NAME, lower, upper);
|
||||
Query dv = RangeFieldMapper.RangeType.IP.dvRangeQuery(IP_RANGE_FIELD_NAME,
|
||||
Query dv = RangeType.IP.dvRangeQuery(IP_RANGE_FIELD_NAME,
|
||||
BinaryDocValuesRangeQuery.QueryType.INTERSECTS,
|
||||
lower, upper, true, true);
|
||||
assertEquals(new IndexOrDocValuesQuery(range, dv), query);
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.time.DateFormatter;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper.RangeFieldType;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper.RangeType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -434,9 +433,9 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||
}
|
||||
|
||||
public void testParseIp() {
|
||||
assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse(InetAddresses.forString("::1"), randomBoolean()));
|
||||
assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse("::1", randomBoolean()));
|
||||
assertEquals(InetAddresses.forString("::1"), RangeFieldMapper.RangeType.IP.parse(new BytesRef("::1"), randomBoolean()));
|
||||
assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse(InetAddresses.forString("::1"), randomBoolean()));
|
||||
assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse("::1", randomBoolean()));
|
||||
assertEquals(InetAddresses.forString("::1"), RangeType.IP.parse(new BytesRef("::1"), randomBoolean()));
|
||||
}
|
||||
|
||||
public void testTermQuery() throws Exception {
|
||||
|
|
|
@ -70,7 +70,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
histogram -> {
|
||||
assertEquals(0, histogram.getBuckets().size());
|
||||
assertFalse(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -78,11 +78,11 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
public void testMatchNoDocs() throws IOException {
|
||||
testBothCases(new MatchNoDocsQuery(), dataset,
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
testBothCases(new MatchNoDocsQuery(), dataset,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD),
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -94,21 +94,21 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
histogram -> {
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testSearchAndReduceCase(query, dataset,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
|
||||
histogram -> {
|
||||
assertEquals(8, histogram.getBuckets().size());
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testBothCases(query, dataset,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -122,33 +122,33 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
testSearchAndReduceCase(query, foo,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).order(BucketOrder.count(false)),
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size()), false
|
||||
);
|
||||
|
||||
testSearchCase(query, dataset,
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size()), false
|
||||
);
|
||||
testSearchAndReduceCase(query, dataset,
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size()), false
|
||||
);
|
||||
testBothCases(query, dataset,
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size()), false
|
||||
);
|
||||
|
||||
testSearchCase(query, dataset,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD),
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size()), false
|
||||
);
|
||||
testSearchAndReduceCase(query, dataset,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD),
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(8, histogram.getBuckets().size()), false
|
||||
);
|
||||
testBothCases(query, dataset,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(6, histogram.getBuckets().size()), false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -162,10 +162,10 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
histogram -> {
|
||||
assertEquals(0, histogram.getBuckets().size());
|
||||
assertFalse(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testSearchAndReduceCase(query, dates, aggregation,
|
||||
histogram -> assertNull(histogram)
|
||||
histogram -> assertNull(histogram), false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -176,19 +176,19 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
Consumer<DateHistogramAggregationBuilder> aggregation = agg ->
|
||||
agg.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD);
|
||||
testSearchCase(query, dates, aggregation,
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
testSearchAndReduceCase(query, dates, aggregation,
|
||||
histogram -> assertNull(histogram)
|
||||
histogram -> assertNull(histogram), false
|
||||
);
|
||||
|
||||
aggregation = agg ->
|
||||
agg.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD);
|
||||
testSearchCase(query, dates, aggregation,
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
testSearchAndReduceCase(query, dates, aggregation,
|
||||
histogram -> assertNull(histogram)
|
||||
histogram -> assertNull(histogram), false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -198,7 +198,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
histogram -> {
|
||||
assertEquals(0, histogram.getBuckets().size());
|
||||
assertFalse(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -206,11 +206,11 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
public void testAggregateWrongField() throws IOException {
|
||||
testBothCases(new MatchAllDocsQuery(), dataset,
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field("wrong_field"),
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(), dataset,
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field("wrong_field"),
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size())
|
||||
histogram -> assertEquals(0, histogram.getBuckets().size()), false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -302,7 +302,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(3);
|
||||
assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(3);
|
||||
assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
|
@ -405,7 +405,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(3);
|
||||
assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(5);
|
||||
assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(5);
|
||||
assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
|
@ -541,7 +541,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(5);
|
||||
assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -570,7 +570,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(2, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(2, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
|
@ -626,7 +626,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString());
|
||||
assertEquals(2, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -656,7 +656,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -687,7 +687,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
|
@ -714,7 +714,64 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
public void testNanosIntervalSecond() throws IOException {
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
"2017-02-01T00:00:05.015298384Z",
|
||||
"2017-02-01T00:00:11.299954583Z",
|
||||
"2017-02-01T00:00:11.074986434Z",
|
||||
"2017-02-01T00:00:37.688314602Z",
|
||||
"2017-02-01T00:00:37.210328172Z",
|
||||
"2017-02-01T00:00:37.380889483Z"
|
||||
),
|
||||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.SECOND).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
|
||||
bucket = buckets.get(1);
|
||||
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
|
||||
assertEquals(2, bucket.getDocCount());
|
||||
|
||||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}, true
|
||||
);
|
||||
testBothCases(new MatchAllDocsQuery(),
|
||||
Arrays.asList(
|
||||
"2017-02-01T00:00:05.015298384Z",
|
||||
"2017-02-01T00:00:11.299954583Z",
|
||||
"2017-02-01T00:00:11.074986434Z",
|
||||
"2017-02-01T00:00:37.688314602Z",
|
||||
"2017-02-01T00:00:37.210328172Z",
|
||||
"2017-02-01T00:00:37.380889483Z"
|
||||
),
|
||||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("1000ms")).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
|
||||
bucket = buckets.get(1);
|
||||
assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString());
|
||||
assertEquals(2, bucket.getDocCount());
|
||||
|
||||
bucket = buckets.get(2);
|
||||
assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}, true
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -750,7 +807,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(3);
|
||||
assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
|
||||
// 5 sec interval with minDocCount = 3
|
||||
|
@ -763,7 +820,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
Histogram.Bucket bucket = buckets.get(0);
|
||||
assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
@ -800,7 +857,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
bucket = buckets.get(3);
|
||||
assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString());
|
||||
assertEquals(1, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
|
||||
// 5 sec interval with minDocCount = 3
|
||||
|
@ -813,7 +870,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
Histogram.Bucket bucket = buckets.get(0);
|
||||
assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString());
|
||||
assertEquals(3, bucket.getDocCount());
|
||||
}
|
||||
}, false
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -827,15 +884,15 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps,
|
||||
aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD),
|
||||
histogram -> {}, 2));
|
||||
histogram -> {}, 2, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD),
|
||||
histogram -> {}, 2));
|
||||
histogram -> {}, 2, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L),
|
||||
histogram -> {}, 100));
|
||||
histogram -> {}, 100, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation ->
|
||||
|
@ -846,7 +903,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
.fixedInterval(DateHistogramInterval.seconds(5))
|
||||
.field(DATE_FIELD)
|
||||
),
|
||||
histogram -> {}, 5));
|
||||
histogram -> {}, 5, false));
|
||||
}
|
||||
|
||||
public void testMaxBucketDeprecated() throws IOException {
|
||||
|
@ -859,15 +916,15 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD),
|
||||
histogram -> {}, 2));
|
||||
histogram -> {}, 2, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD),
|
||||
histogram -> {}, 2));
|
||||
histogram -> {}, 2, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L),
|
||||
histogram -> {}, 100));
|
||||
histogram -> {}, 100, false));
|
||||
|
||||
expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps,
|
||||
aggregation ->
|
||||
|
@ -878,7 +935,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
.dateHistogramInterval(DateHistogramInterval.seconds(5))
|
||||
.field(DATE_FIELD)
|
||||
),
|
||||
histogram -> {}, 5));
|
||||
histogram -> {}, 5, false));
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
}
|
||||
|
||||
|
@ -894,7 +951,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
"2017-02-05"
|
||||
),
|
||||
aggregation -> aggregation.fixedInterval(DateHistogramInterval.WEEK).field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("failed to parse setting [date_histogram.fixedInterval] with value [1w] as a time value: " +
|
||||
"unit is missing or unrecognized"));
|
||||
|
@ -912,7 +969,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
"2017-02-05"
|
||||
),
|
||||
aggregation -> aggregation.calendarInterval(new DateHistogramInterval("5d")).field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("The supplied interval [5d] could not be parsed as a calendar interval."));
|
||||
}
|
||||
|
@ -931,7 +988,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY)
|
||||
.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [calendar_interval] configuration option."));
|
||||
}
|
||||
|
@ -950,7 +1007,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.calendarInterval(DateHistogramInterval.DAY)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [fixed_interval] configuration option."));
|
||||
}
|
||||
|
@ -969,7 +1026,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.dateHistogramInterval(DateHistogramInterval.DAY)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options."));
|
||||
|
||||
|
@ -986,7 +1043,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY)
|
||||
.dateHistogramInterval(DateHistogramInterval.DAY)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options."));
|
||||
|
||||
|
@ -1003,7 +1060,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.interval(1000)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options."));
|
||||
|
||||
|
@ -1020,7 +1077,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY)
|
||||
.interval(1000)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options."));
|
||||
}
|
||||
|
@ -1039,7 +1096,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation .dateHistogramInterval(DateHistogramInterval.DAY)
|
||||
.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option."));
|
||||
|
||||
|
@ -1056,7 +1113,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.DAY)
|
||||
.calendarInterval(DateHistogramInterval.DAY)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option."));
|
||||
|
||||
|
@ -1073,7 +1130,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.interval(1000)
|
||||
.fixedInterval(new DateHistogramInterval("2d"))
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option."));
|
||||
|
||||
|
@ -1090,7 +1147,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
aggregation -> aggregation.interval(1000)
|
||||
.calendarInterval(DateHistogramInterval.DAY)
|
||||
.field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option."));
|
||||
|
||||
|
@ -1101,7 +1158,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(),
|
||||
Collections.emptyList(),
|
||||
aggregation -> aggregation.dateHistogramInterval(new DateHistogramInterval("foobar")).field(DATE_FIELD),
|
||||
histogram -> {}
|
||||
histogram -> {}, false
|
||||
));
|
||||
assertThat(e.getMessage(), equalTo("Unable to parse interval [foobar]"));
|
||||
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
|
||||
|
@ -1109,50 +1166,59 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
|
||||
private void testSearchCase(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify) throws IOException {
|
||||
testSearchCase(query, dataset, configure, verify, 10000);
|
||||
Consumer<InternalDateHistogram> verify, boolean useNanosecondResolution) throws IOException {
|
||||
testSearchCase(query, dataset, configure, verify, 10000, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void testSearchCase(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify,
|
||||
int maxBucket) throws IOException {
|
||||
executeTestCase(false, query, dataset, configure, verify, maxBucket);
|
||||
int maxBucket, boolean useNanosecondResolution) throws IOException {
|
||||
executeTestCase(false, query, dataset, configure, verify, maxBucket, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void testSearchAndReduceCase(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify) throws IOException {
|
||||
testSearchAndReduceCase(query, dataset, configure, verify, 1000);
|
||||
Consumer<InternalDateHistogram> verify, boolean useNanosecondResolution) throws IOException {
|
||||
testSearchAndReduceCase(query, dataset, configure, verify, 1000, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void testSearchAndReduceCase(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify,
|
||||
int maxBucket) throws IOException {
|
||||
executeTestCase(true, query, dataset, configure, verify, maxBucket);
|
||||
int maxBucket, boolean useNanosecondResolution) throws IOException {
|
||||
executeTestCase(true, query, dataset, configure, verify, maxBucket, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void testBothCases(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify) throws IOException {
|
||||
testBothCases(query, dataset, configure, verify, 10000);
|
||||
Consumer<InternalDateHistogram> verify, boolean useNanosecondResolution) throws IOException {
|
||||
testBothCases(query, dataset, configure, verify, 10000, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void testBothCases(Query query, List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify,
|
||||
int maxBucket) throws IOException {
|
||||
testSearchCase(query, dataset, configure, verify, maxBucket);
|
||||
testSearchAndReduceCase(query, dataset, configure, verify, maxBucket);
|
||||
int maxBucket, boolean useNanosecondResolution) throws IOException {
|
||||
testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution);
|
||||
testSearchAndReduceCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution);
|
||||
}
|
||||
|
||||
private void executeTestCase(boolean reduced, Query query, List<String> dataset,
|
||||
private void executeTestCase(boolean reduced,
|
||||
Query query,
|
||||
List<String> dataset,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
Consumer<InternalDateHistogram> verify,
|
||||
int maxBucket) throws IOException {
|
||||
int maxBucket, boolean useNanosecondResolution) throws IOException {
|
||||
|
||||
try (Directory directory = newDirectory()) {
|
||||
DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name");
|
||||
if (useNanosecondResolution) {
|
||||
builder.withResolution(DateFieldMapper.Resolution.NANOSECONDS);
|
||||
}
|
||||
DateFieldMapper.DateFieldType fieldType = builder.fieldType();
|
||||
fieldType.setHasDocValues(true);
|
||||
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
Document document = new Document();
|
||||
for (String date : dataset) {
|
||||
|
@ -1160,7 +1226,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
indexWriter.commit();
|
||||
}
|
||||
|
||||
long instant = asLong(date);
|
||||
long instant = asLong(date, fieldType);
|
||||
document.add(new SortedNumericDocValuesField(DATE_FIELD, instant));
|
||||
document.add(new LongPoint(INSTANT_FIELD, instant));
|
||||
indexWriter.addDocument(document);
|
||||
|
@ -1176,9 +1242,6 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
configure.accept(aggregationBuilder);
|
||||
}
|
||||
|
||||
DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name");
|
||||
DateFieldMapper.DateFieldType fieldType = builder.fieldType();
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setName(aggregationBuilder.field());
|
||||
|
||||
InternalDateHistogram histogram;
|
||||
|
@ -1195,4 +1258,8 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
private static long asLong(String dateTime) {
|
||||
return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli();
|
||||
}
|
||||
|
||||
private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) {
|
||||
return fieldType.parse(dateTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,684 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.queries.BinaryDocValuesRangeQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.time.DateFormatters;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
public class DateRangeHistogramAggregatorTests extends AggregatorTestCase {
|
||||
|
||||
public static final String FIELD_NAME = "fieldName";
|
||||
|
||||
public void testBasics() throws Exception {
|
||||
RangeFieldMapper.Range range = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T12:14:36"),
|
||||
asLong("2019-08-01T15:07:22"), true, true);
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.DAY),
|
||||
writer -> writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range))))),
|
||||
histo -> {
|
||||
assertEquals(1, histo.getBuckets().size());
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testUnsupportedRangeType() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
final String fieldName = "field";
|
||||
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange =
|
||||
rangeType.encodeRanges(singleton(new RangeFieldMapper.Range(rangeType, 12234, 89765, true, true)));
|
||||
doc.add(new BinaryDocValuesField(fieldName, encodedRange));
|
||||
w.addDocument(doc);
|
||||
|
||||
DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("my_agg")
|
||||
.field(fieldName)
|
||||
.calendarInterval(DateHistogramInterval.MONTH);
|
||||
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setName(fieldName);
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
expectThrows(IllegalArgumentException.class, () -> createAggregator(aggBuilder, searcher, fieldType));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test calendar interval behaves correctly on months over 30 days
|
||||
*/
|
||||
public void testLongMonthsCalendarInterval() throws Exception {
|
||||
RangeFieldMapper.Range julyRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T00:00:00"),
|
||||
asLong("2019-07-31T23:59:59"), true, true);
|
||||
RangeFieldMapper.Range augustRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T00:00:00"),
|
||||
asLong("2019-08-31T23:59:59"), true, true);
|
||||
RangeFieldMapper.Range septemberRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-09-01T00:00:00"),
|
||||
asLong("2019-09-30T23:59:59"), true, true);
|
||||
|
||||
// Calendar interval case - three months, three bucketLong.MIN_VALUE;s
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.MONTH),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(julyRange)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(augustRange)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(septemberRange)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(3, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T00:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-01T00:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-09-01T00:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test fixed interval 30d behaves correctly with months over 30 days
|
||||
*/
|
||||
public void testLongMonthsFixedInterval() throws Exception {
|
||||
RangeFieldMapper.Range julyRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T00:00:00"),
|
||||
asLong("2019-07-31T23:59:59"), true, true);
|
||||
RangeFieldMapper.Range augustRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T00:00:00"),
|
||||
asLong("2019-08-31T23:59:59"), true, true);
|
||||
RangeFieldMapper.Range septemberRange = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-09-01T00:00:00"),
|
||||
asLong("2019-09-30T23:59:59"), true, true);
|
||||
|
||||
// Fixed interval case - 4 periods of 30 days
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.fixedInterval(new DateHistogramInterval("30d")),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(julyRange)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(augustRange)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(septemberRange)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(4, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-06-13T00:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-13T00:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-12T00:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-09-11T00:00:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testOffsetCalendarInterval() throws Exception {
|
||||
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"),
|
||||
asLong("2019-07-01T03:20:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:45:00"),
|
||||
asLong("2019-07-01T03:50:00"), true, true);
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:55:00"),
|
||||
asLong("2019-07-01T04:05:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:17:00"),
|
||||
asLong("2019-07-01T04:19:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:55:00"),
|
||||
asLong("2019-07-01T05:05:00"), true, true);
|
||||
|
||||
// No offset, just to make sure the ranges line up as expected
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.HOUR),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(3, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T05:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
|
||||
// 10 minute offset should shift all data into one bucket
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.HOUR).offset("10m"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(2, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:10:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:10:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testOffsetFixedInterval() throws Exception {
|
||||
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"),
|
||||
asLong("2019-07-01T03:20:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:45:00"),
|
||||
asLong("2019-07-01T03:50:00"), true, true);
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:55:00"),
|
||||
asLong("2019-07-01T04:05:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:17:00"),
|
||||
asLong("2019-07-01T04:19:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:55:00"),
|
||||
asLong("2019-07-01T05:05:00"), true, true);
|
||||
|
||||
// No offset, just to make sure the ranges line up as expected
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.fixedInterval(new DateHistogramInterval("1h")),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(3, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T05:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
|
||||
// 10 minute offset should shift all data into one bucket
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.fixedInterval(new DateHistogramInterval("1h")).offset("10m"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(2, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:10:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:10:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that when incrementing the rounded bucket key, offsets are correctly taken into account at the <1hour scale
|
||||
*/
|
||||
public void testNextRoundingValueOffsetHours() throws Exception {
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"),
|
||||
asLong("2019-07-01T03:20:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T04:15:00"),
|
||||
asLong("2019-07-01T04:20:00"), true, true);
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T05:15:00"),
|
||||
asLong("2019-07-01T05:20:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T06:15:00"),
|
||||
asLong("2019-07-01T06:20:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T07:15:00"),
|
||||
asLong("2019-07-01T07:20:00"), true, true);
|
||||
RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T08:15:00"),
|
||||
asLong("2019-07-01T08:20:00"), true, true);
|
||||
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.fixedInterval(new DateHistogramInterval("1h")).offset("13m"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(6, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:13:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:13:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T05:13:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T06:13:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T07:13:00"), histo.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T08:13:00"), histo.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(5).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.HOUR).offset("13m"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(6, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T03:13:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T04:13:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T05:13:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T06:13:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T07:13:00"), histo.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T08:13:00"), histo.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(5).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test that when incrementing the rounded bucket key, offsets are correctly taken into account when interval is on date scale and
|
||||
* offset is on time scale
|
||||
*/
|
||||
public void testNextRoundingValueOffsetDays() throws Exception {
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-01T03:15:00"),
|
||||
asLong("2019-07-01T03:20:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-02T04:15:00"),
|
||||
asLong("2019-07-02T04:20:00"), true, true);
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-03T05:15:00"),
|
||||
asLong("2019-07-03T05:20:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-04T06:15:00"),
|
||||
asLong("2019-07-04T06:20:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-05T07:15:00"),
|
||||
asLong("2019-07-05T07:20:00"), true, true);
|
||||
RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-07-06T08:15:00"),
|
||||
asLong("2019-07-06T08:20:00"), true, true);
|
||||
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.fixedInterval(new DateHistogramInterval("1d")).offset("36h"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(6, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-06-30T12:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T12:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-02T12:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-03T12:00:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-04T12:00:00"), histo.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-05T12:00:00"), histo.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(5).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.DAY).offset("12h"),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(6, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-06-30T12:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-01T12:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-02T12:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-03T12:00:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-04T12:00:00"), histo.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-07-05T12:00:00"), histo.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(5).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testMinDocCount() throws Exception {
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-01T12:14:36"),
|
||||
asLong("2019-08-01T15:07:22"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"),
|
||||
asLong("2019-08-02T15:07:22"), true, true);
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"),
|
||||
asLong("2019-08-02T15:07:22"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T12:14:36"),
|
||||
asLong("2019-08-03T15:07:22"), true, true);
|
||||
|
||||
// Guard case, make sure the agg buckets as expected without min doc count
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.DAY),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(3, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-08-01T00:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T00:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-03T00:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
|
||||
testCase(
|
||||
new MatchAllDocsQuery(),
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.DAY).minDocCount(2),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(1, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T00:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(3, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testIntersectQuery() throws Exception {
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"),
|
||||
asLong("2019-08-02T02:45:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T05:15:00"),
|
||||
asLong("2019-08-02T05:45:00"), true, true);
|
||||
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:15:00"),
|
||||
asLong("2019-08-02T03:45:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"),
|
||||
asLong("2019-08-02T04:45:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:30:00"),
|
||||
asLong("2019-08-02T04:30:00"), true, true);
|
||||
|
||||
RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"),
|
||||
asLong("2019-08-02T03:45:00"), true, true);
|
||||
RangeFieldMapper.Range range7 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"),
|
||||
asLong("2019-08-02T05:45:00"), true, true);
|
||||
RangeFieldMapper.Range range8 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:30:00"),
|
||||
asLong("2019-08-02T05:30:00"), true, true);
|
||||
|
||||
Query query = RangeType.DATE.dvRangeQuery(FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, asLong("2019-08-02T03:00:00"),
|
||||
asLong("2019-08-02T05:00:00"), true, true);
|
||||
|
||||
|
||||
testCase(
|
||||
query,
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.HOUR).minDocCount(2),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range7)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range8)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(4, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T02:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T03:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(4, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T04:00:00"), histo.getBuckets().get(2).getKey());
|
||||
assertEquals(4, histo.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T05:00:00"), histo.getBuckets().get(3).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testWithinQuery() throws Exception {
|
||||
RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"),
|
||||
asLong("2019-08-02T02:45:00"), true, true);
|
||||
RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T05:15:00"),
|
||||
asLong("2019-08-02T05:45:00"), true, true);
|
||||
|
||||
RangeFieldMapper.Range range3 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:15:00"),
|
||||
asLong("2019-08-02T03:45:00"), true, true);
|
||||
RangeFieldMapper.Range range4 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"),
|
||||
asLong("2019-08-02T04:45:00"), true, true);
|
||||
RangeFieldMapper.Range range5 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T03:30:00"),
|
||||
asLong("2019-08-02T04:30:00"), true, true);
|
||||
|
||||
RangeFieldMapper.Range range6 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:15:00"),
|
||||
asLong("2019-08-02T03:45:00"), true, true);
|
||||
RangeFieldMapper.Range range7 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T04:15:00"),
|
||||
asLong("2019-08-02T05:45:00"), true, true);
|
||||
RangeFieldMapper.Range range8 = new RangeFieldMapper.Range(RangeType.DATE, asLong("2019-08-02T02:30:00"),
|
||||
asLong("2019-08-02T05:30:00"), true, true);
|
||||
|
||||
Query query = RangeType.DATE.dvRangeQuery(FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.WITHIN, asLong("2019-08-02T03:00:00"),
|
||||
asLong("2019-08-02T05:00:00"), true, true);
|
||||
|
||||
|
||||
testCase(
|
||||
query,
|
||||
builder -> builder.calendarInterval(DateHistogramInterval.HOUR).minDocCount(2),
|
||||
writer -> {
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range1)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range2)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range3)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range4)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range5)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range6)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range7)))));
|
||||
writer.addDocument(singleton(new BinaryDocValuesField(FIELD_NAME, RangeType.DATE.encodeRanges(singleton(range8)))));
|
||||
},
|
||||
histo -> {
|
||||
assertEquals(2, histo.getBuckets().size());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T03:00:00"), histo.getBuckets().get(0).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(asZDT("2019-08-02T04:00:00"), histo.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histo.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histo));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private void testCase(Query query,
|
||||
Consumer<DateHistogramAggregationBuilder> configure,
|
||||
CheckedConsumer<RandomIndexWriter, IOException> buildIndex,
|
||||
Consumer<InternalDateHistogram> verify) throws IOException {
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(FIELD_NAME, RangeType.DATE).fieldType();
|
||||
fieldType.setName(FIELD_NAME);
|
||||
final DateHistogramAggregationBuilder aggregationBuilder = new DateHistogramAggregationBuilder("_name").field(FIELD_NAME);
|
||||
if (configure != null) {
|
||||
configure.accept(aggregationBuilder);
|
||||
}
|
||||
testCase(aggregationBuilder, query, buildIndex, verify, fieldType);
|
||||
}
|
||||
|
||||
private void testCase(DateHistogramAggregationBuilder aggregationBuilder, Query query,
|
||||
CheckedConsumer<RandomIndexWriter, IOException> buildIndex, Consumer<InternalDateHistogram> verify,
|
||||
MappedFieldType fieldType) throws IOException {
|
||||
Directory directory = newDirectory();
|
||||
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
|
||||
buildIndex.accept(indexWriter);
|
||||
indexWriter.close();
|
||||
|
||||
IndexReader indexReader = DirectoryReader.open(directory);
|
||||
IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
|
||||
|
||||
InternalDateHistogram histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType);
|
||||
verify.accept(histogram);
|
||||
|
||||
indexReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
private static long asLong(String dateTime) {
|
||||
return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli();
|
||||
}
|
||||
|
||||
private static ZonedDateTime asZDT(String dateTime) {
|
||||
return Instant.ofEpochMilli(asLong(dateTime)).atZone(ZoneOffset.UTC);
|
||||
}
|
||||
}
|
|
@ -34,10 +34,9 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class HistogramAggregatorTests extends AggregatorTestCase {
|
||||
public class NumericHistogramAggregatorTests extends AggregatorTestCase {
|
||||
|
||||
public void testLongs() throws Exception {
|
||||
try (Directory dir = newDirectory();
|
||||
|
@ -300,6 +299,44 @@ public class HistogramAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRandomOffset() throws Exception {
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
// Note, these values are carefully chosen to ensure that no matter what offset we pick, no two can end up in the same bucket
|
||||
for (double value : new double[] {9.3, 3.2, -5}) {
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedNumericDocValuesField("field", NumericUtils.doubleToSortableLong(value)));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
final double offset = randomDouble();
|
||||
final double interval = 5;
|
||||
final double expectedOffset = offset % interval;
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(interval)
|
||||
.offset(offset);
|
||||
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE);
|
||||
fieldType.setName("field");
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(3, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-10 + expectedOffset, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(expectedOffset, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(5 + expectedOffset, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertTrue(AggregationInspectionHelper.hasValue(histogram));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testExtendedBounds() throws Exception {
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
|
@ -0,0 +1,446 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
||||
public class RangeHistogramAggregatorTests extends AggregatorTestCase {
|
||||
|
||||
@Rule
|
||||
public final ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
public void testDoubles() throws Exception {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5
|
||||
new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0
|
||||
new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10
|
||||
new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-5d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(0d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(5d, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(10d, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(40d, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(45d, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testLongs() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5
|
||||
new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0
|
||||
new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10
|
||||
new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true), // bucket 40, 45
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-5d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(0d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(5d, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(10d, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(40d, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(45d, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testMultipleRanges() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
Document doc = new Document();
|
||||
Set<RangeFieldMapper.Range> multiRecord = new HashSet<>(4);
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true)); // bucket 0 5
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true)); // bucket -5, 0
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true)); // bucket 0, 5, 10
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true)); // bucket 40, 45
|
||||
BytesRef encodedRange = rangeType.encodeRanges(multiRecord);
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-5d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(0d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(5d, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(10d, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(40d, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(45d, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testMultipleRangesLotsOfOverlap() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
Document doc = new Document();
|
||||
Set<RangeFieldMapper.Range> multiRecord = new HashSet<>(4);
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 1L, 2L, true, true)); // bucket 0 5
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 1L, 4L, true, true)); // bucket -5, 0
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 1L, 13L, true, true)); // bucket 0, 5, 10
|
||||
multiRecord.add(new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true)); // bucket 40, 45
|
||||
BytesRef encodedRange = rangeType.encodeRanges(multiRecord);
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(3, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(0d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(5d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(10d, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(2).getDocCount());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testLongsIrrationalInterval() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true), // bucket 0 5
|
||||
new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true), // bucket -5, 0
|
||||
new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true), // bucket 0, 5, 10
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(Math.PI);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-1 * Math.PI, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(0 * Math.PI, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(1 * Math.PI, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(3, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(2 * Math.PI, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(3 * Math.PI, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(4 * Math.PI, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testMinDocCount() throws Exception {
|
||||
RangeType rangeType = RangeType.LONG;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, -14L, -11L, true, true), // bucket -15
|
||||
new RangeFieldMapper.Range(rangeType, 0L, 9L, true, true), // bucket 0, 5
|
||||
new RangeFieldMapper.Range(rangeType, 6L, 12L, true, true), // bucket 5, 10
|
||||
new RangeFieldMapper.Range(rangeType, 13L, 14L, true, true), // bucket 10
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5)
|
||||
.minDocCount(2);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(2, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(5d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(10d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(1).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testOffset() throws Exception {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket -1, 4
|
||||
new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -6 -1 4
|
||||
new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 4, 9
|
||||
new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 39, 44, 49
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5)
|
||||
.offset(4);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
//assertEquals(7, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-6d, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(-1d, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(4d, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(3, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(9d, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(39d, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(44d, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
|
||||
assertEquals(49d, histogram.getBuckets().get(6).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(6).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testOffsetGtInterval() throws Exception {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5
|
||||
new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0
|
||||
new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10
|
||||
new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
// I'd like to randomize the offset here, like I did in the test for the numeric side, but there's no way I can think of to
|
||||
// construct the intervals such that they wouldn't "slosh" between buckets.
|
||||
final double offset = 20;
|
||||
final double interval = 5;
|
||||
final double expectedOffset = offset % interval;
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(interval)
|
||||
.offset(offset);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
assertEquals(6, histogram.getBuckets().size());
|
||||
|
||||
assertEquals(-5d + expectedOffset, histogram.getBuckets().get(0).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(0).getDocCount());
|
||||
|
||||
assertEquals(0d + expectedOffset, histogram.getBuckets().get(1).getKey());
|
||||
assertEquals(3, histogram.getBuckets().get(1).getDocCount());
|
||||
|
||||
assertEquals(5d + expectedOffset, histogram.getBuckets().get(2).getKey());
|
||||
assertEquals(2, histogram.getBuckets().get(2).getDocCount());
|
||||
|
||||
assertEquals(10d + expectedOffset, histogram.getBuckets().get(3).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(3).getDocCount());
|
||||
|
||||
assertEquals(40d + expectedOffset, histogram.getBuckets().get(4).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(4).getDocCount());
|
||||
|
||||
assertEquals(45d + expectedOffset, histogram.getBuckets().get(5).getKey());
|
||||
assertEquals(1, histogram.getBuckets().get(5).getDocCount());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testIpRangesUnsupported() throws Exception {
|
||||
RangeType rangeType = RangeType.IP;
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange =
|
||||
rangeType.encodeRanges(Collections.singleton(new RangeFieldMapper.Range(rangeType, InetAddresses.forString("10.0.0.1"),
|
||||
InetAddresses.forString("10.0.0.10"), true, true)));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
|
||||
HistogramAggregationBuilder aggBuilder = new HistogramAggregationBuilder("my_agg")
|
||||
.field("field")
|
||||
.interval(5);
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader reader = w.getReader()) {
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
expectedException.expect(IllegalArgumentException.class);
|
||||
search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.missing;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
|
@ -30,11 +31,13 @@ import org.apache.lucene.store.Directory;
|
|||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
|
@ -85,6 +88,34 @@ public class MissingAggregatorTests extends AggregatorTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testMatchSparseRangeField() throws IOException {
|
||||
int numDocs = randomIntBetween(100, 200);
|
||||
final AtomicInteger count = new AtomicInteger();
|
||||
final String fieldName = "field";
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
final BinaryDocValuesField field = new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(
|
||||
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true))));
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setName(fieldName);
|
||||
testBothCases(numDocs,
|
||||
fieldName,
|
||||
Queries.newMatchAllQuery(),
|
||||
doc -> {
|
||||
if (randomBoolean()) {
|
||||
doc.add(new SortedNumericDocValuesField("another_field", randomLong()));
|
||||
count.incrementAndGet();
|
||||
} else {
|
||||
doc.add(field);
|
||||
}
|
||||
},
|
||||
internalMissing -> {
|
||||
assertEquals(internalMissing.getDocCount(), count.get());
|
||||
count.set(0);
|
||||
assertTrue(AggregationInspectionHelper.hasValue(internalMissing));
|
||||
}, fieldType);
|
||||
}
|
||||
|
||||
|
||||
public void testMissingField() throws IOException {
|
||||
int numDocs = randomIntBetween(10, 20);
|
||||
testBothCases(numDocs,
|
||||
|
@ -104,8 +135,22 @@ public class MissingAggregatorTests extends AggregatorTestCase {
|
|||
Query query,
|
||||
Consumer<Document> consumer,
|
||||
Consumer<InternalMissing> verify) throws IOException {
|
||||
executeTestCase(numDocs, fieldName, query, consumer, verify, false);
|
||||
executeTestCase(numDocs, fieldName, query, consumer, verify, true);
|
||||
NumberFieldMapper.Builder mapperBuilder = new NumberFieldMapper.Builder("_name",
|
||||
NumberFieldMapper.NumberType.LONG);
|
||||
final MappedFieldType fieldType = mapperBuilder.fieldType();
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setName(fieldName);
|
||||
testBothCases(numDocs, fieldName, query, consumer, verify, fieldType);
|
||||
}
|
||||
|
||||
private void testBothCases(int numDocs,
|
||||
String fieldName,
|
||||
Query query,
|
||||
Consumer<Document> consumer,
|
||||
Consumer<InternalMissing> verify,
|
||||
MappedFieldType fieldType) throws IOException {
|
||||
executeTestCase(numDocs, fieldName, query, consumer, verify, false, fieldType);
|
||||
executeTestCase(numDocs, fieldName, query, consumer, verify, true, fieldType);
|
||||
|
||||
}
|
||||
|
||||
|
@ -114,7 +159,8 @@ public class MissingAggregatorTests extends AggregatorTestCase {
|
|||
Query query,
|
||||
Consumer<Document> consumer,
|
||||
Consumer<InternalMissing> verify,
|
||||
boolean reduced) throws IOException {
|
||||
boolean reduced,
|
||||
MappedFieldType fieldType) throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
Document document = new Document();
|
||||
|
@ -131,16 +177,9 @@ public class MissingAggregatorTests extends AggregatorTestCase {
|
|||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
IndexSearcher indexSearcher =
|
||||
newSearcher(indexReader, true, true);
|
||||
MissingAggregationBuilder builder =
|
||||
new MissingAggregationBuilder("_name", ValueType.LONG);
|
||||
MissingAggregationBuilder builder = new MissingAggregationBuilder("_name", null);
|
||||
builder.field(fieldName);
|
||||
|
||||
NumberFieldMapper.Builder mapperBuilder = new NumberFieldMapper.Builder("_name",
|
||||
NumberFieldMapper.NumberType.LONG);
|
||||
MappedFieldType fieldType = mapperBuilder.fieldType();
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setName(builder.field());
|
||||
|
||||
InternalMissing missing;
|
||||
if (reduced) {
|
||||
missing = searchAndReduce(indexSearcher, query, builder, fieldType);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.significant;
|
||||
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
|
@ -40,9 +41,12 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory.ExecutionMode;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
|
||||
|
@ -50,6 +54,7 @@ import org.junit.Before;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
@ -256,6 +261,44 @@ public class SignificantTermsAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Uses the significant terms aggregation on a range field
|
||||
*/
|
||||
public void testRangeField() throws IOException {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
final RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true);
|
||||
final RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(rangeType, 6.0D, 10.0D, true, true);
|
||||
final String fieldName = "rangeField";
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setName(fieldName);
|
||||
|
||||
IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
|
||||
indexWriterConfig.setMaxBufferedDocs(100);
|
||||
indexWriterConfig.setRAMBufferSizeMB(100); // flush on open to have a single segment
|
||||
try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, indexWriterConfig)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1L, 5L, true, true),
|
||||
new RangeFieldMapper.Range(rangeType, -3L, 4L, true, true),
|
||||
new RangeFieldMapper.Range(rangeType, 4L, 13L, true, true),
|
||||
new RangeFieldMapper.Range(rangeType, 42L, 49L, true, true),
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
// Attempt aggregation on range field
|
||||
SignificantTermsAggregationBuilder sigAgg = new SignificantTermsAggregationBuilder("sig_text", null).field(fieldName);
|
||||
sigAgg.executionHint(randomExecutionHint());
|
||||
|
||||
try (IndexReader reader = DirectoryReader.open(w)) {
|
||||
IndexSearcher indexSearcher = newIndexSearcher(reader);
|
||||
expectThrows(AggregationExecutionException.class, () -> createAggregator(sigAgg, indexSearcher, fieldType));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldAlias() throws IOException {
|
||||
TextFieldType textFieldType = new TextFieldType();
|
||||
textFieldType.setName("text");
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
|
@ -44,11 +45,14 @@ import org.elasticsearch.index.mapper.IdFieldMapper;
|
|||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
|
@ -285,6 +289,36 @@ public class RareTermsAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRangeField() throws Exception {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
try (Directory directory = newDirectory()) {
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
for (RangeFieldMapper.Range range : new RangeFieldMapper.Range[] {
|
||||
new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true), // bucket 0 5
|
||||
new RangeFieldMapper.Range(rangeType, -3.1, 4.2, true, true), // bucket -5, 0
|
||||
new RangeFieldMapper.Range(rangeType, 4.2, 13.3, true, true), // bucket 0, 5, 10
|
||||
new RangeFieldMapper.Range(rangeType, 42.5, 49.3, true, true), // bucket 40, 45
|
||||
}) {
|
||||
Document doc = new Document();
|
||||
BytesRef encodedRange = rangeType.encodeRanges(Collections.singleton(range));
|
||||
doc.add(new BinaryDocValuesField("field", encodedRange));
|
||||
indexWriter.addDocument(doc);
|
||||
}
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder("field", rangeType).fieldType();
|
||||
fieldType.setName("field");
|
||||
|
||||
try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) {
|
||||
IndexSearcher indexSearcher = newIndexSearcher(indexReader);
|
||||
RareTermsAggregationBuilder aggregationBuilder = new RareTermsAggregationBuilder("_name", null)
|
||||
.field("field");
|
||||
expectThrows(AggregationExecutionException.class,
|
||||
() -> createAggregator(aggregationBuilder, indexSearcher, fieldType));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testNestedTerms() throws IOException {
|
||||
Query query = new MatchAllDocsQuery();
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.InetAddressPoint;
|
||||
|
@ -49,6 +50,8 @@ import org.elasticsearch.index.mapper.IpFieldMapper;
|
|||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -85,6 +88,7 @@ import org.elasticsearch.test.geo.RandomGeoGenerator;
|
|||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
|
@ -888,6 +892,34 @@ public class TermsAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRangeField() throws Exception {
|
||||
try (Directory directory = newDirectory()) {
|
||||
double start = randomDouble();
|
||||
double end = randomDoubleBetween(Math.nextUp(start), Double.MAX_VALUE, false);
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
final RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType, start, end, true, true);
|
||||
final String fieldName = "field";
|
||||
final BinaryDocValuesField field = new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range)));
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
Document document = new Document();
|
||||
document.add(field);
|
||||
indexWriter.addDocument(document);
|
||||
try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) {
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setHasDocValues(true);
|
||||
fieldType.setName(fieldName);
|
||||
|
||||
IndexSearcher indexSearcher = newIndexSearcher(indexReader);
|
||||
TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", null) .field(fieldName);
|
||||
// Note - other places we throw IllegalArgumentException
|
||||
expectThrows(AggregationExecutionException.class, () -> {
|
||||
createAggregator(aggregationBuilder, indexSearcher, fieldType);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testGeoPointField() throws Exception {
|
||||
try (Directory directory = newDirectory()) {
|
||||
GeoPoint point = RandomGeoGenerator.randomPoint(random());
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
|
@ -34,12 +35,15 @@ import org.elasticsearch.common.CheckedConsumer;
|
|||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
@ -54,6 +58,28 @@ public class CardinalityAggregatorTests extends AggregatorTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testRangeFieldValues() throws IOException {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
final RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true);
|
||||
final RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(rangeType, 6.0D, 10.0D, true, true);
|
||||
final String fieldName = "rangeField";
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setName(fieldName);
|
||||
final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name", null).field(fieldName);
|
||||
Set<RangeFieldMapper.Range> multiRecord = new HashSet<>(2);
|
||||
multiRecord.add(range1);
|
||||
multiRecord.add(range2);
|
||||
testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> {
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range2)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(multiRecord))));
|
||||
}, card -> {
|
||||
assertEquals(3.0, card.getValue(), 0);
|
||||
assertTrue(AggregationInspectionHelper.hasValue(card));
|
||||
}, fieldType);
|
||||
}
|
||||
|
||||
public void testNoMatchingField() throws IOException {
|
||||
testCase(new MatchAllDocsQuery(), iw -> {
|
||||
iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7)));
|
||||
|
@ -155,8 +181,7 @@ public class CardinalityAggregatorTests extends AggregatorTestCase {
|
|||
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(
|
||||
NumberFieldMapper.NumberType.LONG);
|
||||
fieldType.setName("number");
|
||||
final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder(
|
||||
"_name", ValueType.NUMERIC).field("number");
|
||||
final CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder("_name", null).field("number");
|
||||
testCase(aggregationBuilder, query, buildIndex, verify, fieldType);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
|
@ -41,12 +42,16 @@ import org.elasticsearch.index.mapper.IpFieldMapper;
|
|||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RangeType;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
|
@ -162,6 +167,28 @@ public class ValueCountAggregatorTests extends AggregatorTestCase {
|
|||
}, null);
|
||||
}
|
||||
|
||||
public void testRangeFieldValues() throws IOException {
|
||||
RangeType rangeType = RangeType.DOUBLE;
|
||||
final RangeFieldMapper.Range range1 = new RangeFieldMapper.Range(rangeType, 1.0D, 5.0D, true, true);
|
||||
final RangeFieldMapper.Range range2 = new RangeFieldMapper.Range(rangeType, 6.0D, 10.0D, true, true);
|
||||
final String fieldName = "rangeField";
|
||||
MappedFieldType fieldType = new RangeFieldMapper.Builder(fieldName, rangeType).fieldType();
|
||||
fieldType.setName(fieldName);
|
||||
final ValueCountAggregationBuilder aggregationBuilder = new ValueCountAggregationBuilder("_name", null).field(fieldName);
|
||||
Set<RangeFieldMapper.Range> multiRecord = new HashSet<>(2);
|
||||
multiRecord.add(range1);
|
||||
multiRecord.add(range2);
|
||||
testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> {
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range1)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(singleton(range2)))));
|
||||
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(multiRecord))));
|
||||
}, count -> {
|
||||
assertEquals(4.0, count.getValue(), 0);
|
||||
assertTrue(AggregationInspectionHelper.hasValue(count));
|
||||
}, fieldType);
|
||||
}
|
||||
|
||||
private void testCase(Query query,
|
||||
ValueType valueType,
|
||||
CheckedConsumer<RandomIndexWriter, IOException> indexer,
|
||||
|
@ -215,6 +242,8 @@ public class ValueCountAggregatorTests extends AggregatorTestCase {
|
|||
return new IpFieldMapper.Builder("_name").fieldType();
|
||||
case GEOPOINT:
|
||||
return new GeoPointFieldMapper.Builder("_name").fieldType();
|
||||
case RANGE:
|
||||
return new RangeFieldMapper.Builder("_name", RangeType.DOUBLE).fieldType();
|
||||
default:
|
||||
throw new IllegalArgumentException("Test does not support value type [" + valueType + "]");
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ public class ValuesSourceTypeTests extends AbstractWriteableEnumTestCase {
|
|||
assertThat(ValuesSourceType.NUMERIC.ordinal(), equalTo(1));
|
||||
assertThat(ValuesSourceType.BYTES.ordinal(), equalTo(2));
|
||||
assertThat(ValuesSourceType.GEOPOINT.ordinal(), equalTo(3));
|
||||
assertThat(ValuesSourceType.RANGE.ordinal(), equalTo(4));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -45,6 +46,7 @@ public class ValuesSourceTypeTests extends AbstractWriteableEnumTestCase {
|
|||
assertThat(ValuesSourceType.fromString("numeric"), equalTo(ValuesSourceType.NUMERIC));
|
||||
assertThat(ValuesSourceType.fromString("bytes"), equalTo(ValuesSourceType.BYTES));
|
||||
assertThat(ValuesSourceType.fromString("geopoint"), equalTo(ValuesSourceType.GEOPOINT));
|
||||
assertThat(ValuesSourceType.fromString("range"), equalTo(ValuesSourceType.RANGE));
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ValuesSourceType.fromString("does_not_exist"));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("No enum constant org.elasticsearch.search.aggregations.support.ValuesSourceType.DOES_NOT_EXIST"));
|
||||
|
@ -57,6 +59,7 @@ public class ValuesSourceTypeTests extends AbstractWriteableEnumTestCase {
|
|||
assertReadFromStream(1, ValuesSourceType.NUMERIC);
|
||||
assertReadFromStream(2, ValuesSourceType.BYTES);
|
||||
assertReadFromStream(3, ValuesSourceType.GEOPOINT);
|
||||
assertReadFromStream(4, ValuesSourceType.RANGE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -65,5 +68,6 @@ public class ValuesSourceTypeTests extends AbstractWriteableEnumTestCase {
|
|||
assertWriteToStream(ValuesSourceType.NUMERIC, 1);
|
||||
assertWriteToStream(ValuesSourceType.BYTES, 2);
|
||||
assertWriteToStream(ValuesSourceType.GEOPOINT, 3);
|
||||
assertWriteToStream(ValuesSourceType.RANGE, 4);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
ProfileResult histoAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(histoAggResult, notNullValue());
|
||||
assertThat(histoAggResult.getQueryName(),
|
||||
equalTo("HistogramAggregator"));
|
||||
equalTo("NumericHistogramAggregator"));
|
||||
assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
|
||||
assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0));
|
||||
assertThat(histoAggResult.getTime(), greaterThan(0L));
|
||||
|
@ -145,7 +145,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
ProfileResult histoAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(histoAggResult, notNullValue());
|
||||
assertThat(histoAggResult.getQueryName(),
|
||||
equalTo("HistogramAggregator"));
|
||||
equalTo("NumericHistogramAggregator"));
|
||||
assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
|
||||
assertThat(histoAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown();
|
||||
|
@ -215,7 +215,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
ProfileResult histoAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(histoAggResult, notNullValue());
|
||||
assertThat(histoAggResult.getQueryName(),
|
||||
equalTo("HistogramAggregator"));
|
||||
equalTo("NumericHistogramAggregator"));
|
||||
assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
|
||||
assertThat(histoAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown();
|
||||
|
@ -346,7 +346,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
ProfileResult histoAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(histoAggResult, notNullValue());
|
||||
assertThat(histoAggResult.getQueryName(),
|
||||
equalTo("HistogramAggregator"));
|
||||
equalTo("NumericHistogramAggregator"));
|
||||
assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
|
||||
assertThat(histoAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown();
|
||||
|
|
Loading…
Reference in New Issue