Add parsing methods for InternalDateHistogram and InternalHistogram (#24213)
This commit is contained in:
parent
fbd793d9e6
commit
2ac90b3de9
|
@ -111,10 +111,9 @@ public final class XContentParserUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method expects that the current token is a {@code XContentParser.Token.FIELD_NAME} and
|
* This method expects that the current field name is the concatenation of a type, a delimiter and a name
|
||||||
* that the current field name is the concatenation of a type, delimiter and name (ex: terms#foo
|
* (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry},
|
||||||
* where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, "#" is
|
* "#" is the delimiter and "foo" the name of the object to parse).
|
||||||
* the delimiter and "foo" the name of the object to parse).
|
|
||||||
*
|
*
|
||||||
* The method splits the field's name to extract the type and name and then parses the object
|
* The method splits the field's name to extract the type and name and then parses the object
|
||||||
* using the {@link XContentParser#namedObject(Class, String, Object)} method.
|
* using the {@link XContentParser#namedObject(Class, String, Object)} method.
|
||||||
|
@ -128,7 +127,6 @@ public final class XContentParserUtils {
|
||||||
* from the field's name
|
* from the field's name
|
||||||
*/
|
*/
|
||||||
public static <T> T parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass) throws IOException {
|
public static <T> T parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass) throws IOException {
|
||||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
|
|
||||||
String currentFieldName = parser.currentName();
|
String currentFieldName = parser.currentName();
|
||||||
if (Strings.hasLength(currentFieldName)) {
|
if (Strings.hasLength(currentFieldName)) {
|
||||||
int position = currentFieldName.indexOf(delimiter);
|
int position = currentFieldName.indexOf(delimiter);
|
||||||
|
|
|
@ -0,0 +1,181 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.aggregations;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||||
|
|
||||||
|
public abstract class ParsedMultiBucketAggregation extends ParsedAggregation implements MultiBucketsAggregation {
|
||||||
|
|
||||||
|
protected final List<ParsedBucket<?>> buckets = new ArrayList<>();
|
||||||
|
protected boolean keyed;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
if (keyed) {
|
||||||
|
builder.startObject(CommonFields.BUCKETS.getPreferredName());
|
||||||
|
} else {
|
||||||
|
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||||
|
}
|
||||||
|
for (ParsedBucket<?> bucket : buckets) {
|
||||||
|
bucket.toXContent(builder, params);
|
||||||
|
}
|
||||||
|
if (keyed) {
|
||||||
|
builder.endObject();
|
||||||
|
} else {
|
||||||
|
builder.endArray();
|
||||||
|
}
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static void declareMultiBucketAggregationFields(final ObjectParser<? extends ParsedMultiBucketAggregation, Void> objectParser,
|
||||||
|
final CheckedFunction<XContentParser, ParsedBucket<?>, IOException> bucketParser,
|
||||||
|
final CheckedFunction<XContentParser, ParsedBucket<?>, IOException> keyedBucketParser) {
|
||||||
|
declareAggregationFields(objectParser);
|
||||||
|
objectParser.declareField((parser, aggregation, context) -> {
|
||||||
|
XContentParser.Token token = parser.currentToken();
|
||||||
|
if (token == XContentParser.Token.START_OBJECT) {
|
||||||
|
aggregation.keyed = true;
|
||||||
|
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||||
|
aggregation.buckets.add(keyedBucketParser.apply(parser));
|
||||||
|
}
|
||||||
|
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||||
|
aggregation.keyed = false;
|
||||||
|
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||||
|
aggregation.buckets.add(bucketParser.apply(parser));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, CommonFields.BUCKETS, ObjectParser.ValueType.OBJECT_ARRAY);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class ParsedBucket<T> implements MultiBucketsAggregation.Bucket {
|
||||||
|
|
||||||
|
private Aggregations aggregations;
|
||||||
|
private T key;
|
||||||
|
private String keyAsString;
|
||||||
|
private long docCount;
|
||||||
|
private boolean keyed;
|
||||||
|
|
||||||
|
protected void setKey(T key) {
|
||||||
|
this.key = key;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getKey() {
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void setKeyAsString(String keyAsString) {
|
||||||
|
this.keyAsString = keyAsString;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyAsString() {
|
||||||
|
return keyAsString;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void setDocCount(long docCount) {
|
||||||
|
this.docCount = docCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getDocCount() {
|
||||||
|
return docCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setKeyed(boolean keyed) {
|
||||||
|
this.keyed = keyed;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void setAggregations(Aggregations aggregations) {
|
||||||
|
this.aggregations = aggregations;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Aggregations getAggregations() {
|
||||||
|
return aggregations;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
if (keyed) {
|
||||||
|
// Subclasses can override the getKeyAsString method to handle specific cases like
|
||||||
|
// keyed bucket with RAW doc value format where the key_as_string field is not printed
|
||||||
|
// out but we still need to have a string version of the key to use as the bucket's name.
|
||||||
|
builder.startObject(getKeyAsString());
|
||||||
|
} else {
|
||||||
|
builder.startObject();
|
||||||
|
}
|
||||||
|
if (keyAsString != null) {
|
||||||
|
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
|
||||||
|
}
|
||||||
|
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||||
|
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||||
|
aggregations.toXContentInternal(builder, params);
|
||||||
|
builder.endObject();
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static <T, B extends ParsedBucket<T>> B parseXContent(final XContentParser parser,
|
||||||
|
final boolean keyed,
|
||||||
|
final Supplier<B> bucketSupplier,
|
||||||
|
final CheckedFunction<XContentParser, T, IOException> keyParser)
|
||||||
|
throws IOException {
|
||||||
|
final B bucket = bucketSupplier.get();
|
||||||
|
bucket.setKeyed(keyed);
|
||||||
|
XContentParser.Token token = parser.currentToken();
|
||||||
|
String currentFieldName = parser.currentName();
|
||||||
|
if (keyed) {
|
||||||
|
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||||
|
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||||
|
}
|
||||||
|
|
||||||
|
List<Aggregation> aggregations = new ArrayList<>();
|
||||||
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
currentFieldName = parser.currentName();
|
||||||
|
} else if (token.isValue()) {
|
||||||
|
if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
|
||||||
|
bucket.setKeyAsString(parser.text());
|
||||||
|
} else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
|
||||||
|
bucket.setKey(keyParser.apply(parser));
|
||||||
|
} else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
|
||||||
|
bucket.setDocCount(parser.longValue());
|
||||||
|
}
|
||||||
|
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||||
|
aggregations.add(XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bucket.setAggregations(new Aggregations(aggregations));
|
||||||
|
return bucket;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
|
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||||
|
import org.joda.time.DateTime;
|
||||||
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements Histogram {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getType() {
|
||||||
|
return DateHistogramAggregationBuilder.NAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<? extends Histogram.Bucket> getBuckets() {
|
||||||
|
return buckets.stream().map(bucket -> (Histogram.Bucket) bucket).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ObjectParser<ParsedDateHistogram, Void> PARSER =
|
||||||
|
new ObjectParser<>(ParsedDateHistogram.class.getSimpleName(), true, ParsedDateHistogram::new);
|
||||||
|
static {
|
||||||
|
declareMultiBucketAggregationFields(PARSER,
|
||||||
|
parser -> ParsedBucket.fromXContent(parser, false),
|
||||||
|
parser -> ParsedBucket.fromXContent(parser, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ParsedDateHistogram fromXContent(XContentParser parser, String name) throws IOException {
|
||||||
|
ParsedDateHistogram aggregation = PARSER.parse(parser, null);
|
||||||
|
aggregation.setName(name);
|
||||||
|
return aggregation;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket<Long> implements Histogram.Bucket {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getKey() {
|
||||||
|
return new DateTime(super.getKey(), DateTimeZone.UTC);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyAsString() {
|
||||||
|
String keyAsString = super.getKeyAsString();
|
||||||
|
if (keyAsString != null) {
|
||||||
|
return keyAsString;
|
||||||
|
} else {
|
||||||
|
return DocValueFormat.RAW.format((Long) super.getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException {
|
||||||
|
return parseXContent(parser, keyed, ParsedBucket::new, XContentParser::longValue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
|
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
public class ParsedHistogram extends ParsedMultiBucketAggregation implements Histogram {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected String getType() {
|
||||||
|
return HistogramAggregationBuilder.NAME;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<? extends Histogram.Bucket> getBuckets() {
|
||||||
|
return buckets.stream().map(bucket -> (Histogram.Bucket) bucket).collect(Collectors.toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ObjectParser<ParsedHistogram, Void> PARSER =
|
||||||
|
new ObjectParser<>(ParsedHistogram.class.getSimpleName(), true, ParsedHistogram::new);
|
||||||
|
static {
|
||||||
|
declareMultiBucketAggregationFields(PARSER,
|
||||||
|
parser -> ParsedBucket.fromXContent(parser, false),
|
||||||
|
parser -> ParsedBucket.fromXContent(parser, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ParsedHistogram fromXContent(XContentParser parser, String name) throws IOException {
|
||||||
|
ParsedHistogram aggregation = PARSER.parse(parser, null);
|
||||||
|
aggregation.setName(name);
|
||||||
|
return aggregation;
|
||||||
|
}
|
||||||
|
|
||||||
|
static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket<Double> implements Histogram.Bucket {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyAsString() {
|
||||||
|
String keyAsString = super.getKeyAsString();
|
||||||
|
if (keyAsString != null) {
|
||||||
|
return keyAsString;
|
||||||
|
} else {
|
||||||
|
return DocValueFormat.RAW.format((Double) getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException {
|
||||||
|
return parseXContent(parser, keyed, ParsedBucket::new, XContentParser::doubleValue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -386,6 +386,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public static Suggestion<? extends Entry<? extends Option>> fromXContent(XContentParser parser) throws IOException {
|
public static Suggestion<? extends Entry<? extends Option>> fromXContent(XContentParser parser) throws IOException {
|
||||||
|
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
|
||||||
return XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Suggestion.class);
|
return XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Suggestion.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,12 +65,10 @@ public class XContentParserUtilsTests extends ESTestCase {
|
||||||
|
|
||||||
BytesReference bytes = toXContent((builder, params) -> builder.field("test", 0), xContentType, randomBoolean());
|
BytesReference bytes = toXContent((builder, params) -> builder.field("test", 0), xContentType, randomBoolean());
|
||||||
try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) {
|
try (XContentParser parser = xContentType.xContent().createParser(namedXContentRegistry, bytes)) {
|
||||||
parser.nextToken();
|
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||||
ParsingException e = expectThrows(ParsingException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class));
|
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation);
|
||||||
assertEquals("Failed to parse object: expecting token of type [FIELD_NAME] but found [START_OBJECT]", e.getMessage());
|
|
||||||
|
|
||||||
parser.nextToken();
|
ParsingException e = expectThrows(ParsingException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class));
|
||||||
e = expectThrows(ParsingException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class));
|
|
||||||
assertEquals("Cannot parse object of class [Boolean] without type information. Set [typed_keys] parameter " +
|
assertEquals("Cannot parse object of class [Boolean] without type information. Set [typed_keys] parameter " +
|
||||||
"on the request to ensure the type information is added to the response output", e.getMessage());
|
"on the request to ensure the type information is added to the response output", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,10 @@ import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||||
import org.elasticsearch.script.ScriptService;
|
import org.elasticsearch.script.ScriptService;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.SearchModule;
|
import org.elasticsearch.search.SearchModule;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram;
|
||||||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||||
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
|
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
|
||||||
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
|
||||||
|
@ -121,6 +125,8 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
||||||
(p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c));
|
(p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c));
|
||||||
namedXContents.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c));
|
namedXContents.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c));
|
||||||
namedXContents.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
|
namedXContents.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
|
||||||
|
namedXContents.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
|
||||||
|
namedXContents.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
|
||||||
|
|
||||||
return namedXContents.entrySet().stream()
|
return namedXContents.entrySet().stream()
|
||||||
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
|
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
|
||||||
|
|
|
@ -0,0 +1,142 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.aggregations;
|
||||||
|
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import static java.util.Collections.emptyMap;
|
||||||
|
|
||||||
|
public abstract class InternalMultiBucketAggregationTestCase<T extends InternalAggregation & MultiBucketsAggregation>
|
||||||
|
extends InternalAggregationTestCase<T> {
|
||||||
|
|
||||||
|
private boolean hasSubAggregations;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void initHasSubAggregations() {
|
||||||
|
hasSubAggregations = randomBoolean();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected final T createTestInstance(String name, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||||
|
List<InternalAggregation> internal = new ArrayList<>();
|
||||||
|
if (hasSubAggregations) {
|
||||||
|
final int numAggregations = randomIntBetween(1, 3);
|
||||||
|
for (int i = 0; i <numAggregations; i++) {
|
||||||
|
internal.add(createTestInstance(randomAlphaOfLength(5), pipelineAggregators, emptyMap(), InternalAggregations.EMPTY));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return createTestInstance(name, pipelineAggregators, metaData, new InternalAggregations(internal));
|
||||||
|
}
|
||||||
|
|
||||||
|
protected abstract T createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
||||||
|
Map<String, Object> metaData, InternalAggregations aggregations);
|
||||||
|
|
||||||
|
protected abstract Class<? extends ParsedMultiBucketAggregation> implementationClass();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected final void assertFromXContent(T aggregation, ParsedAggregation parsedAggregation) {
|
||||||
|
assertMultiBucketsAggregation(aggregation, parsedAggregation, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testIterators() throws IOException {
|
||||||
|
final T aggregation = createTestInstance();
|
||||||
|
assertMultiBucketsAggregation(aggregation, parseAndAssert(aggregation, false), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertMultiBucketsAggregation(Aggregation expected, Aggregation actual, boolean checkOrder) {
|
||||||
|
assertTrue(expected instanceof MultiBucketsAggregation);
|
||||||
|
MultiBucketsAggregation expectedMultiBucketsAggregation = (MultiBucketsAggregation) expected;
|
||||||
|
|
||||||
|
assertTrue(actual instanceof MultiBucketsAggregation);
|
||||||
|
MultiBucketsAggregation actualMultiBucketsAggregation = (MultiBucketsAggregation) actual;
|
||||||
|
|
||||||
|
Class<? extends ParsedMultiBucketAggregation> parsedClass = implementationClass();
|
||||||
|
assertTrue(parsedClass != null && parsedClass.isInstance(actual));
|
||||||
|
|
||||||
|
assertTrue(expected instanceof InternalAggregation && actual instanceof ParsedAggregation);
|
||||||
|
assertEquals(expected.getName(), actual.getName());
|
||||||
|
assertEquals(expected.getMetaData(), actual.getMetaData());
|
||||||
|
assertEquals(((InternalAggregation) expected).getType(), ((ParsedAggregation) actual).getType());
|
||||||
|
|
||||||
|
List<? extends MultiBucketsAggregation.Bucket> expectedBuckets = expectedMultiBucketsAggregation.getBuckets();
|
||||||
|
List<? extends MultiBucketsAggregation.Bucket> actualBuckets = actualMultiBucketsAggregation.getBuckets();
|
||||||
|
assertEquals(expectedBuckets.size(), actualBuckets.size());
|
||||||
|
|
||||||
|
if (checkOrder) {
|
||||||
|
Iterator<? extends MultiBucketsAggregation.Bucket> expectedIt = expectedBuckets.iterator();
|
||||||
|
Iterator<? extends MultiBucketsAggregation.Bucket> actualIt = actualBuckets.iterator();
|
||||||
|
while (expectedIt.hasNext()) {
|
||||||
|
MultiBucketsAggregation.Bucket expectedBucket = expectedIt.next();
|
||||||
|
MultiBucketsAggregation.Bucket actualBucket = actualIt.next();
|
||||||
|
assertBucket(expectedBucket, actualBucket, true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (MultiBucketsAggregation.Bucket expectedBucket : expectedBuckets) {
|
||||||
|
boolean found = false;
|
||||||
|
for (MultiBucketsAggregation.Bucket actualBucket : actualBuckets) {
|
||||||
|
if (actualBucket.getKey().equals(expectedBucket.getKey())) {
|
||||||
|
found = true;
|
||||||
|
assertBucket(expectedBucket, actualBucket, false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertTrue("Failed to find bucket with key [" + expectedBucket.getKey() + "]", found);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertBucket(MultiBucketsAggregation.Bucket expected, MultiBucketsAggregation.Bucket actual, boolean checkOrder) {
|
||||||
|
assertTrue(expected instanceof InternalMultiBucketAggregation.InternalBucket);
|
||||||
|
assertTrue(actual instanceof ParsedMultiBucketAggregation.ParsedBucket);
|
||||||
|
|
||||||
|
assertEquals(expected.getKey(), actual.getKey());
|
||||||
|
assertEquals(expected.getKeyAsString(), actual.getKeyAsString());
|
||||||
|
assertEquals(expected.getDocCount(), actual.getDocCount());
|
||||||
|
|
||||||
|
Aggregations expectedAggregations = expected.getAggregations();
|
||||||
|
Aggregations actualAggregations = actual.getAggregations();
|
||||||
|
assertEquals(expectedAggregations.asList().size(), actualAggregations.asList().size());
|
||||||
|
|
||||||
|
if (checkOrder) {
|
||||||
|
Iterator<Aggregation> expectedIt = expectedAggregations.iterator();
|
||||||
|
Iterator<Aggregation> actualIt = actualAggregations.iterator();
|
||||||
|
|
||||||
|
while (expectedIt.hasNext()) {
|
||||||
|
Aggregation expectedAggregation = expectedIt.next();
|
||||||
|
Aggregation actualAggregation = actualIt.next();
|
||||||
|
assertMultiBucketsAggregation(expectedAggregation, actualAggregation, true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (Aggregation expectedAggregation : expectedAggregations) {
|
||||||
|
Aggregation actualAggregation = actualAggregations.get(expectedAggregation.getName());
|
||||||
|
assertNotNull(actualAggregation);
|
||||||
|
assertMultiBucketsAggregation(expectedAggregation, actualAggregation, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,14 +19,14 @@
|
||||||
|
|
||||||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||||
|
|
||||||
import org.apache.lucene.util.TestUtil;
|
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
|
|
||||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||||
|
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
|
||||||
|
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -37,14 +37,22 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueHours;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||||
|
|
||||||
public class InternalDateHistogramTests extends InternalAggregationTestCase<InternalDateHistogram> {
|
public class InternalDateHistogramTests extends InternalMultiBucketAggregationTestCase<InternalDateHistogram> {
|
||||||
|
|
||||||
|
private boolean keyed;
|
||||||
|
private DocValueFormat format;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void init() {
|
||||||
|
keyed = randomBoolean();
|
||||||
|
format = randomNumericDocValueFormat();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected InternalDateHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
protected InternalDateHistogram createTestInstance(String name,
|
||||||
Map<String, Object> metaData) {
|
List<PipelineAggregator> pipelineAggregators,
|
||||||
|
Map<String, Object> metaData,
|
||||||
boolean keyed = randomBoolean();
|
InternalAggregations aggregations) {
|
||||||
DocValueFormat format = DocValueFormat.RAW;
|
|
||||||
int nbBuckets = randomInt(10);
|
int nbBuckets = randomInt(10);
|
||||||
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
|
List<InternalDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
|
||||||
long startingDate = System.currentTimeMillis();
|
long startingDate = System.currentTimeMillis();
|
||||||
|
@ -54,7 +62,7 @@ public class InternalDateHistogramTests extends InternalAggregationTestCase<Inte
|
||||||
|
|
||||||
for (int i = 0; i < nbBuckets; i++) {
|
for (int i = 0; i < nbBuckets; i++) {
|
||||||
long key = startingDate + (intervalMillis * i);
|
long key = startingDate + (intervalMillis * i);
|
||||||
buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, InternalAggregations.EMPTY));
|
buckets.add(i, new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations));
|
||||||
}
|
}
|
||||||
|
|
||||||
InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC);
|
InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC);
|
||||||
|
@ -82,4 +90,9 @@ public class InternalDateHistogramTests extends InternalAggregationTestCase<Inte
|
||||||
protected Writeable.Reader<InternalDateHistogram> instanceReader() {
|
protected Writeable.Reader<InternalDateHistogram> instanceReader() {
|
||||||
return InternalDateHistogram::new;
|
return InternalDateHistogram::new;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
|
||||||
|
return ParsedDateHistogram.class;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,30 +24,42 @@ import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
|
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
|
||||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||||
|
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
|
||||||
|
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
public class InternalHistogramTests extends InternalAggregationTestCase<InternalHistogram> {
|
public class InternalHistogramTests extends InternalMultiBucketAggregationTestCase<InternalHistogram> {
|
||||||
|
|
||||||
|
private boolean keyed;
|
||||||
|
private DocValueFormat format;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void init() {
|
||||||
|
keyed = randomBoolean();
|
||||||
|
format = randomNumericDocValueFormat();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected InternalHistogram createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
protected InternalHistogram createTestInstance(String name,
|
||||||
Map<String, Object> metaData) {
|
List<PipelineAggregator> pipelineAggregators,
|
||||||
final boolean keyed = randomBoolean();
|
Map<String, Object> metaData,
|
||||||
final DocValueFormat format = DocValueFormat.RAW;
|
InternalAggregations aggregations) {
|
||||||
final int base = randomInt(50) - 30;
|
final int base = randomInt(50) - 30;
|
||||||
final int numBuckets = randomInt(10);
|
final int numBuckets = randomInt(10);
|
||||||
final int interval = randomIntBetween(1, 3);
|
final int interval = randomIntBetween(1, 3);
|
||||||
List<InternalHistogram.Bucket> buckets = new ArrayList<>();
|
List<InternalHistogram.Bucket> buckets = new ArrayList<>();
|
||||||
for (int i = 0; i < numBuckets; ++i) {
|
for (int i = 0; i < numBuckets; ++i) {
|
||||||
final int docCount = TestUtil.nextInt(random(), 1, 50);
|
final int docCount = TestUtil.nextInt(random(), 1, 50);
|
||||||
buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, InternalAggregations.EMPTY));
|
buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, aggregations));
|
||||||
}
|
}
|
||||||
return new InternalHistogram(name, buckets, (InternalOrder) InternalHistogram.Order.KEY_ASC,
|
InternalOrder order = (InternalOrder) randomFrom(InternalHistogram.Order.KEY_ASC, InternalHistogram.Order.KEY_DESC);
|
||||||
1, null, format, keyed, pipelineAggregators, metaData);
|
return new InternalHistogram(name, buckets, order, 1, null, format, keyed, pipelineAggregators, metaData);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -72,4 +84,8 @@ public class InternalHistogramTests extends InternalAggregationTestCase<Internal
|
||||||
return InternalHistogram::new;
|
return InternalHistogram::new;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
|
||||||
|
return ParsedHistogram.class;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue