Add parsing for String/Long/Double Terms aggregations (#24521)
This commit is contained in:
parent
01b976071f
commit
3c66ac06ae
|
@ -47,7 +47,7 @@ public class Aggregations implements Iterable<Aggregation>, ToXContent {
|
|||
protected Aggregations() {
|
||||
}
|
||||
|
||||
protected Aggregations(List<? extends Aggregation> aggregations) {
|
||||
public Aggregations(List<? extends Aggregation> aggregations) {
|
||||
this.aggregations = aggregations;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations;
|
||||
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -33,10 +34,11 @@ import java.util.function.Supplier;
|
|||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
public abstract class ParsedMultiBucketAggregation extends ParsedAggregation implements MultiBucketsAggregation {
|
||||
public abstract class ParsedMultiBucketAggregation<B extends ParsedMultiBucketAggregation.Bucket>
|
||||
extends ParsedAggregation implements MultiBucketsAggregation {
|
||||
|
||||
protected final List<ParsedBucket<?>> buckets = new ArrayList<>();
|
||||
protected boolean keyed;
|
||||
protected final List<B> buckets = new ArrayList<>();
|
||||
protected boolean keyed = false;
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
|
@ -45,7 +47,7 @@ public abstract class ParsedMultiBucketAggregation extends ParsedAggregation imp
|
|||
} else {
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
}
|
||||
for (ParsedBucket<?> bucket : buckets) {
|
||||
for (B bucket : buckets) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
if (keyed) {
|
||||
|
@ -57,8 +59,8 @@ public abstract class ParsedMultiBucketAggregation extends ParsedAggregation imp
|
|||
}
|
||||
|
||||
protected static void declareMultiBucketAggregationFields(final ObjectParser<? extends ParsedMultiBucketAggregation, Void> objectParser,
|
||||
final CheckedFunction<XContentParser, ParsedBucket<?>, IOException> bucketParser,
|
||||
final CheckedFunction<XContentParser, ParsedBucket<?>, IOException> keyedBucketParser) {
|
||||
final CheckedFunction<XContentParser, ParsedBucket, IOException> bucketParser,
|
||||
final CheckedFunction<XContentParser, ParsedBucket, IOException> keyedBucketParser) {
|
||||
declareAggregationFields(objectParser);
|
||||
objectParser.declareField((parser, aggregation, context) -> {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
|
@ -76,23 +78,13 @@ public abstract class ParsedMultiBucketAggregation extends ParsedAggregation imp
|
|||
}, CommonFields.BUCKETS, ObjectParser.ValueType.OBJECT_ARRAY);
|
||||
}
|
||||
|
||||
public static class ParsedBucket<T> implements MultiBucketsAggregation.Bucket {
|
||||
public static abstract class ParsedBucket implements MultiBucketsAggregation.Bucket {
|
||||
|
||||
private Aggregations aggregations;
|
||||
private T key;
|
||||
private String keyAsString;
|
||||
private long docCount;
|
||||
private boolean keyed;
|
||||
|
||||
protected void setKey(T key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
protected void setKeyAsString(String keyAsString) {
|
||||
this.keyAsString = keyAsString;
|
||||
}
|
||||
|
@ -137,17 +129,21 @@ public abstract class ParsedMultiBucketAggregation extends ParsedAggregation imp
|
|||
if (keyAsString != null) {
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
|
||||
}
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
keyToXContent(builder);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount);
|
||||
aggregations.toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected static <T, B extends ParsedBucket<T>> B parseXContent(final XContentParser parser,
|
||||
final boolean keyed,
|
||||
final Supplier<B> bucketSupplier,
|
||||
final CheckedFunction<XContentParser, T, IOException> keyParser)
|
||||
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
return builder.field(CommonFields.KEY.getPreferredName(), getKey());
|
||||
}
|
||||
|
||||
protected static <B extends ParsedBucket> B parseXContent(final XContentParser parser,
|
||||
final boolean keyed,
|
||||
final Supplier<B> bucketSupplier,
|
||||
final CheckedBiConsumer<XContentParser, B, IOException> keyConsumer)
|
||||
throws IOException {
|
||||
final B bucket = bucketSupplier.get();
|
||||
bucket.setKeyed(keyed);
|
||||
|
@ -166,7 +162,7 @@ public abstract class ParsedMultiBucketAggregation extends ParsedAggregation imp
|
|||
if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.setKeyAsString(parser.text());
|
||||
} else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.setKey(keyParser.apply(parser));
|
||||
keyConsumer.accept(parser, bucket);
|
||||
} else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.setDocCount(parser.longValue());
|
||||
}
|
||||
|
|
|
@ -20,17 +20,16 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements Histogram {
|
||||
public class ParsedDateHistogram extends ParsedMultiBucketAggregation<ParsedDateHistogram.ParsedBucket> implements Histogram {
|
||||
|
||||
@Override
|
||||
protected String getType() {
|
||||
|
@ -39,7 +38,7 @@ public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements
|
|||
|
||||
@Override
|
||||
public List<? extends Histogram.Bucket> getBuckets() {
|
||||
return buckets.stream().map(bucket -> (Histogram.Bucket) bucket).collect(Collectors.toList());
|
||||
return buckets;
|
||||
}
|
||||
|
||||
private static ObjectParser<ParsedDateHistogram, Void> PARSER =
|
||||
|
@ -56,11 +55,16 @@ public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements
|
|||
return aggregation;
|
||||
}
|
||||
|
||||
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket<Long> implements Histogram.Bucket {
|
||||
public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
|
||||
|
||||
private Long key;
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return new DateTime(super.getKey(), DateTimeZone.UTC);
|
||||
if (key != null) {
|
||||
return new DateTime(key, DateTimeZone.UTC);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,13 +72,20 @@ public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements
|
|||
String keyAsString = super.getKeyAsString();
|
||||
if (keyAsString != null) {
|
||||
return keyAsString;
|
||||
} else {
|
||||
return DocValueFormat.RAW.format((Long) super.getKey());
|
||||
}
|
||||
if (key != null) {
|
||||
return Long.toString(key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
return builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
}
|
||||
|
||||
static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException {
|
||||
return parseXContent(parser, keyed, ParsedBucket::new, XContentParser::longValue);
|
||||
return parseXContent(parser, keyed, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,14 +21,12 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
|
|||
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ParsedHistogram extends ParsedMultiBucketAggregation implements Histogram {
|
||||
public class ParsedHistogram extends ParsedMultiBucketAggregation<ParsedHistogram.ParsedBucket> implements Histogram {
|
||||
|
||||
@Override
|
||||
protected String getType() {
|
||||
|
@ -37,7 +35,7 @@ public class ParsedHistogram extends ParsedMultiBucketAggregation implements His
|
|||
|
||||
@Override
|
||||
public List<? extends Histogram.Bucket> getBuckets() {
|
||||
return buckets.stream().map(bucket -> (Histogram.Bucket) bucket).collect(Collectors.toList());
|
||||
return buckets;
|
||||
}
|
||||
|
||||
private static ObjectParser<ParsedHistogram, Void> PARSER =
|
||||
|
@ -54,20 +52,29 @@ public class ParsedHistogram extends ParsedMultiBucketAggregation implements His
|
|||
return aggregation;
|
||||
}
|
||||
|
||||
static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket<Double> implements Histogram.Bucket {
|
||||
static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket {
|
||||
|
||||
private Double key;
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
String keyAsString = super.getKeyAsString();
|
||||
if (keyAsString != null) {
|
||||
return keyAsString;
|
||||
} else {
|
||||
return DocValueFormat.RAW.format((Double) getKey());
|
||||
}
|
||||
if (key != null) {
|
||||
return Double.toString(key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException {
|
||||
return parseXContent(parser, keyed, ParsedBucket::new, XContentParser::doubleValue);
|
||||
return parseXContent(parser, keyed, ParsedBucket::new, (p, bucket) -> bucket.key = p.doubleValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ParsedDoubleTerms extends ParsedTerms {
|
||||
|
||||
@Override
|
||||
protected String getType() {
|
||||
return DoubleTerms.NAME;
|
||||
}
|
||||
|
||||
private static ObjectParser<ParsedDoubleTerms, Void> PARSER =
|
||||
new ObjectParser<>(ParsedDoubleTerms.class.getSimpleName(), true, ParsedDoubleTerms::new);
|
||||
static {
|
||||
declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
|
||||
}
|
||||
|
||||
public static ParsedDoubleTerms fromXContent(XContentParser parser, String name) throws IOException {
|
||||
ParsedDoubleTerms aggregation = PARSER.parse(parser, null);
|
||||
aggregation.setName(name);
|
||||
return aggregation;
|
||||
}
|
||||
|
||||
public static class ParsedBucket extends ParsedTerms.ParsedBucket {
|
||||
|
||||
private Double key;
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
String keyAsString = super.getKeyAsString();
|
||||
if (keyAsString != null) {
|
||||
return keyAsString;
|
||||
}
|
||||
if (key != null) {
|
||||
return Double.toString(key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public Number getKeyAsNumber() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
if (super.getKeyAsString() != null) {
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
static ParsedBucket fromXContent(XContentParser parser) throws IOException {
|
||||
return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.doubleValue());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ParsedLongTerms extends ParsedTerms {
|
||||
|
||||
@Override
|
||||
protected String getType() {
|
||||
return LongTerms.NAME;
|
||||
}
|
||||
|
||||
private static ObjectParser<ParsedLongTerms, Void> PARSER =
|
||||
new ObjectParser<>(ParsedLongTerms.class.getSimpleName(), true, ParsedLongTerms::new);
|
||||
static {
|
||||
declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
|
||||
}
|
||||
|
||||
public static ParsedLongTerms fromXContent(XContentParser parser, String name) throws IOException {
|
||||
ParsedLongTerms aggregation = PARSER.parse(parser, null);
|
||||
aggregation.setName(name);
|
||||
return aggregation;
|
||||
}
|
||||
|
||||
public static class ParsedBucket extends ParsedTerms.ParsedBucket {
|
||||
|
||||
private Long key;
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
String keyAsString = super.getKeyAsString();
|
||||
if (keyAsString != null) {
|
||||
return keyAsString;
|
||||
}
|
||||
if (key != null) {
|
||||
return Long.toString(key);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public Number getKeyAsNumber() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(CommonFields.KEY.getPreferredName(), key);
|
||||
if (super.getKeyAsString() != null) {
|
||||
builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), getKeyAsString());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
static ParsedBucket fromXContent(XContentParser parser) throws IOException {
|
||||
return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ParsedStringTerms extends ParsedTerms {
|
||||
|
||||
@Override
|
||||
protected String getType() {
|
||||
return StringTerms.NAME;
|
||||
}
|
||||
|
||||
private static ObjectParser<ParsedStringTerms, Void> PARSER =
|
||||
new ObjectParser<>(ParsedStringTerms.class.getSimpleName(), true, ParsedStringTerms::new);
|
||||
static {
|
||||
declareParsedTermsFields(PARSER, ParsedBucket::fromXContent);
|
||||
}
|
||||
|
||||
public static ParsedStringTerms fromXContent(XContentParser parser, String name) throws IOException {
|
||||
ParsedStringTerms aggregation = PARSER.parse(parser, null);
|
||||
aggregation.setName(name);
|
||||
return aggregation;
|
||||
}
|
||||
|
||||
public static class ParsedBucket extends ParsedTerms.ParsedBucket {
|
||||
|
||||
private BytesRef key;
|
||||
|
||||
@Override
|
||||
public Object getKey() {
|
||||
return getKeyAsString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKeyAsString() {
|
||||
String keyAsString = super.getKeyAsString();
|
||||
if (keyAsString != null) {
|
||||
return keyAsString;
|
||||
}
|
||||
if (key != null) {
|
||||
return key.utf8ToString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public Number getKeyAsNumber() {
|
||||
if (key != null) {
|
||||
return Double.parseDouble(key.utf8ToString());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException {
|
||||
return builder.field(CommonFields.KEY.getPreferredName(), getKey());
|
||||
}
|
||||
|
||||
static ParsedBucket fromXContent(XContentParser parser) throws IOException {
|
||||
return parseTermsBucketXContent(parser, ParsedBucket::new, (p, bucket) -> bucket.key = p.utf8BytesOrNull());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME;
|
||||
import static org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.SUM_OF_OTHER_DOC_COUNTS;
|
||||
|
||||
public abstract class ParsedTerms extends ParsedMultiBucketAggregation<ParsedTerms.ParsedBucket> implements Terms {
|
||||
|
||||
protected long docCountErrorUpperBound;
|
||||
protected long sumOtherDocCount;
|
||||
|
||||
@Override
|
||||
public long getDocCountError() {
|
||||
return docCountErrorUpperBound;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumOfOtherDocCounts() {
|
||||
return sumOtherDocCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<? extends Terms.Bucket> getBuckets() {
|
||||
return buckets;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Terms.Bucket getBucketByKey(String term) {
|
||||
for (Terms.Bucket bucket : getBuckets()) {
|
||||
if (bucket.getKeyAsString().equals(term)) {
|
||||
return bucket;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
|
||||
builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), getSumOfOtherDocCounts());
|
||||
builder.startArray(CommonFields.BUCKETS.getPreferredName());
|
||||
for (Terms.Bucket bucket : getBuckets()) {
|
||||
bucket.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static void declareParsedTermsFields(final ObjectParser<? extends ParsedTerms, Void> objectParser,
|
||||
final CheckedFunction<XContentParser, ParsedBucket, IOException> bucketParser) {
|
||||
declareMultiBucketAggregationFields(objectParser, bucketParser::apply, bucketParser::apply);
|
||||
objectParser.declareLong((parsedTerms, value) -> parsedTerms.docCountErrorUpperBound = value ,
|
||||
DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME);
|
||||
objectParser.declareLong((parsedTerms, value) -> parsedTerms.sumOtherDocCount = value,
|
||||
SUM_OF_OTHER_DOC_COUNTS);
|
||||
}
|
||||
|
||||
public abstract static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Terms.Bucket {
|
||||
|
||||
boolean showDocCountError = false;
|
||||
protected long docCountError;
|
||||
|
||||
@Override
|
||||
public int compareTerm(Terms.Bucket other) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDocCountError() {
|
||||
return docCountError;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
keyToXContent(builder);
|
||||
builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
|
||||
if (showDocCountError) {
|
||||
builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError());
|
||||
}
|
||||
getAggregations().toXContentInternal(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
||||
static <B extends ParsedBucket> B parseTermsBucketXContent(final XContentParser parser, final Supplier<B> bucketSupplier,
|
||||
final CheckedBiConsumer<XContentParser, B, IOException> keyConsumer)
|
||||
throws IOException {
|
||||
|
||||
final B bucket = bucketSupplier.get();
|
||||
final List<Aggregation> aggregations = new ArrayList<>();
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = parser.currentName();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (CommonFields.KEY_AS_STRING.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.setKeyAsString(parser.text());
|
||||
} else if (CommonFields.KEY.getPreferredName().equals(currentFieldName)) {
|
||||
keyConsumer.accept(parser, bucket);
|
||||
} else if (CommonFields.DOC_COUNT.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.setDocCount(parser.longValue());
|
||||
} else if (DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName().equals(currentFieldName)) {
|
||||
bucket.docCountError = parser.longValue();
|
||||
bucket.showDocCountError = true;
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
aggregations.add(XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class));
|
||||
}
|
||||
}
|
||||
bucket.setAggregations(new Aggregations(aggregations));
|
||||
return bucket;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles;
|
|||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -60,7 +59,7 @@ public abstract class ParsedPercentiles extends ParsedAggregation implements Ite
|
|||
}
|
||||
Double value = getPercentile(percent);
|
||||
if (value != null) {
|
||||
return DocValueFormat.RAW.format(value);
|
||||
return Double.toString(value);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ContextParser;
|
|||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParserUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
|
@ -38,6 +39,12 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggre
|
|||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
|
||||
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
|
||||
|
@ -127,6 +134,9 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
|||
namedXContents.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
|
||||
namedXContents.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
|
||||
namedXContents.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
|
||||
namedXContents.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c));
|
||||
namedXContents.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c));
|
||||
namedXContents.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c));
|
||||
|
||||
return namedXContents.entrySet().stream()
|
||||
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
|
||||
|
@ -259,12 +269,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
|||
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
|
||||
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
|
||||
|
||||
String currentName = parser.currentName();
|
||||
int i = currentName.indexOf(InternalAggregation.TYPED_KEYS_DELIMITER);
|
||||
String aggType = currentName.substring(0, i);
|
||||
String aggName = currentName.substring(i + 1);
|
||||
|
||||
parsedAggregation = parser.namedObject(Aggregation.class, aggType, aggName);
|
||||
parsedAggregation = XContentParserUtils.parseTypedKeysObject(parser, Aggregation.TYPED_KEYS_DELIMITER, Aggregation.class);
|
||||
|
||||
assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken());
|
||||
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
|
||||
|
@ -277,7 +282,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
|||
assertEquals(aggregation.getType(), ((ParsedAggregation) parsedAggregation).getType());
|
||||
}
|
||||
|
||||
BytesReference parsedBytes = toXContent((ToXContent) parsedAggregation, xContentType, params, humanReadable);
|
||||
BytesReference parsedBytes = toXContent(parsedAggregation, xContentType, params, humanReadable);
|
||||
assertToXContentEquivalent(originalBytes, parsedBytes, xContentType);
|
||||
|
||||
return (P) parsedAggregation;
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.terms;
|
|||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -33,17 +34,17 @@ import java.util.Set;
|
|||
public class DoubleTermsTests extends InternalTermsTestCase {
|
||||
|
||||
@Override
|
||||
protected InternalTerms<?, ?> createTestInstance(
|
||||
String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
protected InternalTerms<?, ?> createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations,
|
||||
boolean showTermDocCountError,
|
||||
long docCountError) {
|
||||
Terms.Order order = Terms.Order.count(false);
|
||||
long minDocCount = 1;
|
||||
int requiredSize = 3;
|
||||
int shardSize = requiredSize + 2;
|
||||
DocValueFormat format = DocValueFormat.RAW;
|
||||
boolean showTermDocCountError = false;
|
||||
long docCountError = -1;
|
||||
DocValueFormat format = randomNumericDocValueFormat();
|
||||
long otherDocCount = 0;
|
||||
List<DoubleTerms.Bucket> buckets = new ArrayList<>();
|
||||
final int numBuckets = randomInt(shardSize);
|
||||
|
@ -51,8 +52,7 @@ public class DoubleTermsTests extends InternalTermsTestCase {
|
|||
for (int i = 0; i < numBuckets; ++i) {
|
||||
double term = randomValueOtherThanMany(d -> terms.add(d) == false, random()::nextDouble);
|
||||
int docCount = randomIntBetween(1, 100);
|
||||
buckets.add(new DoubleTerms.Bucket(term, docCount, InternalAggregations.EMPTY,
|
||||
showTermDocCountError, docCountError, format));
|
||||
buckets.add(new DoubleTerms.Bucket(term, docCount, aggregations, showTermDocCountError, docCountError, format));
|
||||
}
|
||||
return new DoubleTerms(name, order, requiredSize, minDocCount, pipelineAggregators,
|
||||
metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
|
@ -63,4 +63,9 @@ public class DoubleTermsTests extends InternalTermsTestCase {
|
|||
return DoubleTerms::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
|
||||
return ParsedDoubleTerms.class;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,18 +19,44 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public abstract class InternalTermsTestCase extends InternalAggregationTestCase<InternalTerms<?,?>> {
|
||||
public abstract class InternalTermsTestCase extends InternalMultiBucketAggregationTestCase<InternalTerms<?, ?>> {
|
||||
|
||||
private boolean showDocCount;
|
||||
private long docCountError;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
showDocCount = randomBoolean();
|
||||
docCountError = showDocCount ? randomInt(1000) : -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected InternalTerms<?, ?> createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations) {
|
||||
return createTestInstance(name, pipelineAggregators, metaData, aggregations, showDocCount, docCountError);
|
||||
}
|
||||
|
||||
protected abstract InternalTerms<?, ?> createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations,
|
||||
boolean showTermDocCountError,
|
||||
long docCountError);
|
||||
|
||||
@Override
|
||||
protected InternalTerms<?, ?> createUnmappedInstance(
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.terms;
|
|||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -33,17 +34,17 @@ import java.util.Set;
|
|||
public class LongTermsTests extends InternalTermsTestCase {
|
||||
|
||||
@Override
|
||||
protected InternalTerms<?, ?> createTestInstance(
|
||||
String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
protected InternalTerms<?, ?> createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations,
|
||||
boolean showTermDocCountError,
|
||||
long docCountError) {
|
||||
Terms.Order order = Terms.Order.count(false);
|
||||
long minDocCount = 1;
|
||||
int requiredSize = 3;
|
||||
int shardSize = requiredSize + 2;
|
||||
DocValueFormat format = DocValueFormat.RAW;
|
||||
boolean showTermDocCountError = false;
|
||||
long docCountError = -1;
|
||||
DocValueFormat format = randomNumericDocValueFormat();
|
||||
long otherDocCount = 0;
|
||||
List<LongTerms.Bucket> buckets = new ArrayList<>();
|
||||
final int numBuckets = randomInt(shardSize);
|
||||
|
@ -51,8 +52,7 @@ public class LongTermsTests extends InternalTermsTestCase {
|
|||
for (int i = 0; i < numBuckets; ++i) {
|
||||
long term = randomValueOtherThanMany(l -> terms.add(l) == false, random()::nextLong);
|
||||
int docCount = randomIntBetween(1, 100);
|
||||
buckets.add(new LongTerms.Bucket(term, docCount, InternalAggregations.EMPTY,
|
||||
showTermDocCountError, docCountError, format));
|
||||
buckets.add(new LongTerms.Bucket(term, docCount, aggregations, showTermDocCountError, docCountError, format));
|
||||
}
|
||||
return new LongTerms(name, order, requiredSize, minDocCount, pipelineAggregators,
|
||||
metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
|
@ -63,4 +63,8 @@ public class LongTermsTests extends InternalTermsTestCase {
|
|||
return LongTerms::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
|
||||
return ParsedLongTerms.class;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -34,17 +35,17 @@ import java.util.Set;
|
|||
public class StringTermsTests extends InternalTermsTestCase {
|
||||
|
||||
@Override
|
||||
protected InternalTerms<?, ?> createTestInstance(
|
||||
String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
protected InternalTerms<?, ?> createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations,
|
||||
boolean showTermDocCountError,
|
||||
long docCountError) {
|
||||
Terms.Order order = Terms.Order.count(false);
|
||||
long minDocCount = 1;
|
||||
int requiredSize = 3;
|
||||
int shardSize = requiredSize + 2;
|
||||
DocValueFormat format = DocValueFormat.RAW;
|
||||
boolean showTermDocCountError = false;
|
||||
long docCountError = -1;
|
||||
long otherDocCount = 0;
|
||||
List<StringTerms.Bucket> buckets = new ArrayList<>();
|
||||
final int numBuckets = randomInt(shardSize);
|
||||
|
@ -52,8 +53,7 @@ public class StringTermsTests extends InternalTermsTestCase {
|
|||
for (int i = 0; i < numBuckets; ++i) {
|
||||
BytesRef term = randomValueOtherThanMany(b -> terms.add(b) == false, () -> new BytesRef(randomAlphaOfLength(10)));
|
||||
int docCount = randomIntBetween(1, 100);
|
||||
buckets.add(new StringTerms.Bucket(term, docCount, InternalAggregations.EMPTY,
|
||||
showTermDocCountError, docCountError, format));
|
||||
buckets.add(new StringTerms.Bucket(term, docCount, aggregations, showTermDocCountError, docCountError, format));
|
||||
}
|
||||
return new StringTerms(name, order, requiredSize, minDocCount, pipelineAggregators,
|
||||
metaData, format, shardSize, showTermDocCountError, otherDocCount, buckets, docCountError);
|
||||
|
@ -64,4 +64,8 @@ public class StringTermsTests extends InternalTermsTestCase {
|
|||
return StringTerms::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Class<? extends ParsedMultiBucketAggregation> implementationClass() {
|
||||
return ParsedStringTerms.class;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue