Migrate top_hits, histogram, and ip_range aggregations to NamedWriteable

This is just another step towards removing AggregationStreams
in favor of NamedWriteable.
This commit is contained in:
Nik Everett 2016-07-11 17:57:03 -04:00
parent f2978f41b9
commit d14e06ce51
9 changed files with 222 additions and 274 deletions

View File

@ -536,8 +536,11 @@ public class SearchModule extends AbstractModule {
RangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalRange::new)); RangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalRange::new));
registerAggregation(new AggregationSpec(DateRangeAggregationBuilder::new, new DateRangeParser(), registerAggregation(new AggregationSpec(DateRangeAggregationBuilder::new, new DateRangeParser(),
DateRangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateRange::new)); DateRangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateRange::new));
registerAggregation(IpRangeAggregationBuilder::new, new IpRangeParser(), IpRangeAggregationBuilder.AGGREGATION_NAME_FIELD); registerAggregation(
registerAggregation(HistogramAggregationBuilder::new, new HistogramParser(), HistogramAggregationBuilder.AGGREGATION_NAME_FIELD); new AggregationSpec(IpRangeAggregationBuilder::new, new IpRangeParser(), IpRangeAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalBinaryRange::new));
registerAggregation(new AggregationSpec(HistogramAggregationBuilder::new, new HistogramParser(),
HistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalHistogram::new));
registerAggregation(DateHistogramAggregationBuilder::new, new DateHistogramParser(), registerAggregation(DateHistogramAggregationBuilder::new, new DateHistogramParser(),
DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD); DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD);
registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(), registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(),
@ -548,8 +551,8 @@ public class SearchModule extends AbstractModule {
NestedAggregationBuilder.AGGREGATION_FIELD_NAME).addResultReader(InternalNested::new)); NestedAggregationBuilder.AGGREGATION_FIELD_NAME).addResultReader(InternalNested::new));
registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder::new, ReverseNestedAggregationBuilder::parse, registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder::new, ReverseNestedAggregationBuilder::parse,
ReverseNestedAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalReverseNested::new)); ReverseNestedAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalReverseNested::new));
registerAggregation(TopHitsAggregationBuilder::new, TopHitsAggregationBuilder::parse, registerAggregation(new AggregationSpec(TopHitsAggregationBuilder::new, TopHitsAggregationBuilder::parse,
TopHitsAggregationBuilder.AGGREGATION_NAME_FIELD); TopHitsAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalTopHits::new));
registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder::new, new GeoBoundsParser(), registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder::new, new GeoBoundsParser(),
GeoBoundsAggregationBuilder.AGGREGATION_NAME_FIED).addResultReader(InternalGeoBounds::new)); GeoBoundsAggregationBuilder.AGGREGATION_NAME_FIED).addResultReader(InternalGeoBounds::new));
registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder::new, new GeoCentroidParser(), registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder::new, new GeoCentroidParser(),
@ -821,11 +824,6 @@ public class SearchModule extends AbstractModule {
} }
static { static {
// buckets
InternalBinaryRange.registerStream();
InternalHistogram.registerStream();
InternalTopHits.registerStreams();
// Pipeline Aggregations // Pipeline Aggregations
DerivativePipelineAggregator.registerStreams(); DerivativePipelineAggregator.registerStreams();
InternalDerivative.registerStreams(); InternalDerivative.registerStreams();

View File

@ -126,7 +126,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St
private List<PipelineAggregator> pipelineAggregators; private List<PipelineAggregator> pipelineAggregators;
/** Constructs an un initialized addAggregation (used for serialization) **/ /** Constructs an un initialized addAggregation (used for serialization) **/
protected InternalAggregation() {} protected InternalAggregation() {} // NORELEASE remove when removing Streamable
/** /**
* Constructs an get with a given name. * Constructs an get with a given name.

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.search.aggregations.bucket.histogram; package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregation.Type;
@ -26,8 +27,10 @@ import org.elasticsearch.search.aggregations.support.ValueType;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.DateTimeZone; import org.joda.time.DateTimeZone;
import java.io.IOException;
/** /**
* * Results of a date_historgram aggregation.
*/ */
public class InternalDateHistogram { public class InternalDateHistogram {
@ -35,16 +38,18 @@ public class InternalDateHistogram {
static final Type TYPE = new Type("date_histogram", "dhisto"); static final Type TYPE = new Type("date_histogram", "dhisto");
static class Bucket extends InternalHistogram.Bucket { static class Bucket extends InternalHistogram.Bucket {
Bucket(boolean keyed, DocValueFormat formatter, InternalHistogram.Factory<Bucket> factory) {
super(keyed, formatter, factory);
}
Bucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter, Bucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, DocValueFormat formatter,
InternalHistogram.Factory<Bucket> factory) { InternalHistogram.Factory<Bucket> factory) {
super(key, docCount, keyed, formatter, factory, aggregations); super(key, docCount, keyed, formatter, factory, aggregations);
} }
/**
* Read from a stream.
*/
Bucket(StreamInput in, boolean keyed, DocValueFormat formatter, InternalHistogram.Factory<Bucket> factory) throws IOException {
super(in, keyed, formatter, factory);
}
@Override @Override
public String getKeyAsString() { public String getKeyAsString() {
return format.format(key); return format.format(key);
@ -94,8 +99,8 @@ public class InternalDateHistogram {
} }
@Override @Override
protected InternalDateHistogram.Bucket createEmptyBucket(boolean keyed, DocValueFormat formatter) { protected Bucket readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
return new Bucket(keyed, formatter, this); return new Bucket(in, keyed, format, this);
} }
} }

View File

@ -26,7 +26,6 @@ import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.AggregationStreams;
import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
@ -46,50 +45,49 @@ import java.util.Map;
* TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})? * TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})?
*/ */
public class InternalHistogram<B extends InternalHistogram.Bucket> extends InternalMultiBucketAggregation<InternalHistogram<B>, B> public class InternalHistogram<B extends InternalHistogram.Bucket> extends InternalMultiBucketAggregation<InternalHistogram<B>, B>
implements implements Histogram {
Histogram {
public static final Factory<Bucket> HISTOGRAM_FACTORY = new Factory<Bucket>(); public static final Factory<Bucket> HISTOGRAM_FACTORY = new Factory<Bucket>();
static final Type TYPE = new Type("histogram", "histo"); static final Type TYPE = new Type("histogram");
private static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@SuppressWarnings("rawtypes")
@Override
public InternalHistogram readResult(StreamInput in) throws IOException {
InternalHistogram histogram = new InternalHistogram();
histogram.readFrom(in);
return histogram;
}
};
public static void registerStream() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket { public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket {
long key; final long key;
long docCount; final long docCount;
InternalAggregations aggregations; final InternalAggregations aggregations;
private final transient boolean keyed; private final transient boolean keyed;
protected final transient DocValueFormat format; protected final transient DocValueFormat format;
private Factory<?> factory; private final Factory<?> factory;
public Bucket(boolean keyed, DocValueFormat formatter, Factory<?> factory) { public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, Factory<?> factory,
this.format = formatter; InternalAggregations aggregations) {
this.format = format;
this.keyed = keyed; this.keyed = keyed;
this.factory = factory; this.factory = factory;
}
public Bucket(long key, long docCount, boolean keyed, DocValueFormat formatter, Factory<?> factory,
InternalAggregations aggregations) {
this(keyed, formatter, factory);
this.key = key; this.key = key;
this.docCount = docCount; this.docCount = docCount;
this.aggregations = aggregations; this.aggregations = aggregations;
} }
/**
* Read from a stream.
*/
public Bucket(StreamInput in, boolean keyed, DocValueFormat format, Factory<?> factory) throws IOException {
this.format = format;
this.keyed = keyed;
this.factory = factory;
key = in.readLong();
docCount = in.readVLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(key);
out.writeVLong(docCount);
aggregations.writeTo(out);
}
protected Factory<?> getFactory() { protected Factory<?> getFactory() {
return factory; return factory;
} }
@ -144,20 +142,6 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
return builder; return builder;
} }
@Override
public void readFrom(StreamInput in) throws IOException {
key = in.readLong();
docCount = in.readVLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(key);
out.writeVLong(docCount);
aggregations.writeTo(out);
}
public DocValueFormat getFormatter() { public DocValueFormat getFormatter() {
return format; return format;
} }
@ -244,21 +228,18 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected B createEmptyBucket(boolean keyed, DocValueFormat formatter) { protected B readBucket(StreamInput in, boolean keyed, DocValueFormat format) throws IOException {
return (B) new Bucket(keyed, formatter, this); return (B) new Bucket(in, keyed, format, this);
} }
} }
protected List<B> buckets; private final List<B> buckets;
private InternalOrder order; private final InternalOrder order;
private DocValueFormat format; private final DocValueFormat format;
private boolean keyed; private final boolean keyed;
private long minDocCount; private final long minDocCount;
private EmptyBucketInfo emptyBucketInfo; private final EmptyBucketInfo emptyBucketInfo;
protected Factory<B> factory; private final Factory<B> factory;
InternalHistogram() {} // for serialization
InternalHistogram(String name, List<B> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, InternalHistogram(String name, List<B> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
DocValueFormat formatter, boolean keyed, Factory<B> factory, List<PipelineAggregator> pipelineAggregators, DocValueFormat formatter, boolean keyed, Factory<B> factory, List<PipelineAggregator> pipelineAggregators,
@ -274,6 +255,53 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
this.factory = factory; this.factory = factory;
} }
/**
* Stream from a stream.
*/
public InternalHistogram(StreamInput in) throws IOException {
super(in);
factory = resolveFactory(in.readString());
order = InternalOrder.Streams.readOrder(in);
minDocCount = in.readVLong();
if (minDocCount == 0) {
emptyBucketInfo = EmptyBucketInfo.readFrom(in);
} else {
emptyBucketInfo = null;
}
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
buckets = in.readList(stream -> factory.readBucket(stream, keyed, format));
}
@SuppressWarnings("unchecked")
protected static <B extends InternalHistogram.Bucket> Factory<B> resolveFactory(String factoryType) {
if (factoryType.equals(InternalDateHistogram.TYPE.name())) {
return (Factory<B>) new InternalDateHistogram.Factory();
} else if (factoryType.equals(TYPE.name())) {
return new Factory<>();
} else {
throw new IllegalStateException("Invalid histogram factory type [" + factoryType + "]");
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(factory.type().name());
InternalOrder.Streams.writeOrder(order, out);
out.writeVLong(minDocCount);
if (minDocCount == 0) {
EmptyBucketInfo.writeTo(emptyBucketInfo, out);
}
out.writeNamedWriteable(format);
out.writeBoolean(keyed);
out.writeList(buckets);
}
@Override
public String getWriteableName() {
return HistogramAggregationBuilder.NAME;
}
@Override @Override
public Type type() { public Type type() {
return TYPE; return TYPE;
@ -465,53 +493,6 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
getMetaData()); getMetaData());
} }
@Override
protected void doReadFrom(StreamInput in) throws IOException {
this.factory = resolveFactory(in.readString());
order = InternalOrder.Streams.readOrder(in);
minDocCount = in.readVLong();
if (minDocCount == 0) {
emptyBucketInfo = EmptyBucketInfo.readFrom(in);
}
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
int size = in.readVInt();
List<B> buckets = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
B bucket = getFactory().createEmptyBucket(keyed, format);
bucket.readFrom(in);
buckets.add(bucket);
}
this.buckets = buckets;
}
@SuppressWarnings("unchecked")
protected static <B extends InternalHistogram.Bucket> Factory<B> resolveFactory(String factoryType) {
if (factoryType.equals(InternalDateHistogram.TYPE.name())) {
return (Factory<B>) new InternalDateHistogram.Factory();
} else if (factoryType.equals(TYPE.name())) {
return new Factory<>();
} else {
throw new IllegalStateException("Invalid histogram factory type [" + factoryType + "]");
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(factory.type().name());
InternalOrder.Streams.writeOrder(order, out);
out.writeVLong(minDocCount);
if (minDocCount == 0) {
EmptyBucketInfo.writeTo(emptyBucketInfo, out);
}
out.writeNamedWriteable(format);
out.writeBoolean(keyed);
out.writeVInt(buckets.size());
for (B bucket : buckets) {
bucket.writeTo(out);
}
}
@Override @Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
if (keyed) { if (keyed) {

View File

@ -19,11 +19,14 @@
package org.elasticsearch.search.aggregations.bucket.range; package org.elasticsearch.search.aggregations.bucket.range;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static java.util.Collections.emptyList;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -318,19 +321,18 @@ public final class BinaryRangeAggregator extends BucketsAggregator {
@Override @Override
public InternalAggregation buildAggregation(long bucket) throws IOException { public InternalAggregation buildAggregation(long bucket) throws IOException {
InternalBinaryRange.Bucket[] buckets = new InternalBinaryRange.Bucket[ranges.length]; List<InternalBinaryRange.Bucket> buckets = new ArrayList<>(ranges.length);
for (int i = 0; i < buckets.length; ++i) { for (int i = 0; i < ranges.length; ++i) {
long bucketOrd = bucket * ranges.length + i; long bucketOrd = bucket * ranges.length + i;
buckets[i] = new InternalBinaryRange.Bucket(format, keyed, buckets.add(new InternalBinaryRange.Bucket(format, keyed,
ranges[i].key, ranges[i].from, ranges[i].to, ranges[i].key, ranges[i].from, ranges[i].to,
bucketDocCount(bucketOrd), bucketAggregations(bucketOrd)); bucketDocCount(bucketOrd), bucketAggregations(bucketOrd)));
} }
return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData()); return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData());
} }
@Override @Override
public InternalAggregation buildEmptyAggregation() { public InternalAggregation buildEmptyAggregation() {
return new InternalBinaryRange(name, format, keyed, new InternalBinaryRange.Bucket[0], pipelineAggregators(), metaData()); return new InternalBinaryRange(name, format, keyed, emptyList(), pipelineAggregators(), metaData());
} }
} }

View File

@ -23,50 +23,38 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationStreams;
import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static java.util.Collections.unmodifiableList;
/** A range aggregation for data that is encoded in doc values using a binary representation. */ /** A range aggregation for data that is encoded in doc values using a binary representation. */
public final class InternalBinaryRange public final class InternalBinaryRange
extends InternalMultiBucketAggregation<InternalBinaryRange, InternalBinaryRange.Bucket> extends InternalMultiBucketAggregation<InternalBinaryRange, InternalBinaryRange.Bucket>
implements Range { implements Range {
public static final Type TYPE = new Type("binary_range");
private static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalBinaryRange readResult(StreamInput in) throws IOException {
InternalBinaryRange range = new InternalBinaryRange();
range.readFrom(in);
return range;
}
};
public static void registerStream() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Range.Bucket { public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Range.Bucket {
private final transient DocValueFormat format; private final transient DocValueFormat format;
private final transient boolean keyed; private final transient boolean keyed;
private String key; private final String key;
private BytesRef from, to; private final BytesRef from, to;
private long docCount; private final long docCount;
private InternalAggregations aggregations; private final InternalAggregations aggregations;
public Bucket(DocValueFormat format, boolean keyed, String key, BytesRef from, BytesRef to, public Bucket(DocValueFormat format, boolean keyed, String key, BytesRef from, BytesRef to,
long docCount, InternalAggregations aggregations) { long docCount, InternalAggregations aggregations) {
this(format, keyed); this.format = format;
this.keyed = keyed;
this.key = key; this.key = key;
this.from = from; this.from = from;
this.to = to; this.to = to;
@ -75,9 +63,37 @@ public final class InternalBinaryRange
} }
// for serialization // for serialization
private Bucket(DocValueFormat format, boolean keyed) { private Bucket(StreamInput in, DocValueFormat format, boolean keyed) throws IOException {
this.format = format; this.format = format;
this.keyed = keyed; this.keyed = keyed;
key = in.readOptionalString();
if (in.readBoolean()) {
from = in.readBytesRef();
} else {
from = null;
}
if (in.readBoolean()) {
to = in.readBytesRef();
} else {
to = null;
}
docCount = in.readLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(key);
out.writeBoolean(from != null);
if (from != null) {
out.writeBytesRef(from);
}
out.writeBoolean(to != null);
if (to != null) {
out.writeBytesRef(to);
}
out.writeLong(docCount);
aggregations.writeTo(out);
} }
@Override @Override
@ -130,38 +146,6 @@ public final class InternalBinaryRange
return builder; return builder;
} }
@Override
public void readFrom(StreamInput in) throws IOException {
key = in.readOptionalString();
if (in.readBoolean()) {
from = in.readBytesRef();
} else {
from = null;
}
if (in.readBoolean()) {
to = in.readBytesRef();
} else {
to = null;
}
docCount = in.readLong();
aggregations = InternalAggregations.readAggregations(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(key);
out.writeBoolean(from != null);
if (from != null) {
out.writeBytesRef(from);
}
out.writeBoolean(to != null);
if (to != null) {
out.writeBytesRef(to);
}
out.writeLong(docCount);
aggregations.writeTo(out);
}
@Override @Override
public Object getFrom() { public Object getFrom() {
return getFromAsString(); return getFromAsString();
@ -184,11 +168,11 @@ public final class InternalBinaryRange
} }
private DocValueFormat format; private final DocValueFormat format;
private boolean keyed; private final boolean keyed;
private Bucket[] buckets; private final List<Bucket> buckets;
public InternalBinaryRange(String name, DocValueFormat format, boolean keyed, Bucket[] buckets, public InternalBinaryRange(String name, DocValueFormat format, boolean keyed, List<Bucket> buckets,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) { List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData); super(name, pipelineAggregators, metaData);
this.format = format; this.format = format;
@ -196,17 +180,37 @@ public final class InternalBinaryRange
this.buckets = buckets; this.buckets = buckets;
} }
private InternalBinaryRange() {} // for serialization /**
* Read from a stream.
*/
public InternalBinaryRange(StreamInput in) throws IOException {
super(in);
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
buckets = in.readList(stream -> new Bucket(stream, format, keyed));
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(format);
out.writeBoolean(keyed);
out.writeList(buckets);
}
@Override
public String getWriteableName() {
return IpRangeAggregationBuilder.NAME;
}
@Override @Override
public List<Range.Bucket> getBuckets() { public List<Range.Bucket> getBuckets() {
return Arrays.asList(buckets); return unmodifiableList(buckets);
} }
@Override @Override
public InternalBinaryRange create(List<Bucket> buckets) { public InternalBinaryRange create(List<Bucket> buckets) {
return new InternalBinaryRange(name, format, keyed, buckets.toArray(new Bucket[0]), return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData);
pipelineAggregators(), metaData);
} }
@Override @Override
@ -214,34 +218,29 @@ public final class InternalBinaryRange
return new Bucket(format, keyed, prototype.key, prototype.from, prototype.to, prototype.docCount, aggregations); return new Bucket(format, keyed, prototype.key, prototype.from, prototype.to, prototype.docCount, aggregations);
} }
@Override
public Type type() {
return TYPE;
}
@Override @Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
long[] docCounts = new long[buckets.length]; long[] docCounts = new long[buckets.size()];
InternalAggregations[][] aggs = new InternalAggregations[buckets.length][]; InternalAggregations[][] aggs = new InternalAggregations[buckets.size()][];
for (int i = 0; i < aggs.length; ++i) { for (int i = 0; i < aggs.length; ++i) {
aggs[i] = new InternalAggregations[aggregations.size()]; aggs[i] = new InternalAggregations[aggregations.size()];
} }
for (int i = 0; i < aggregations.size(); ++i) { for (int i = 0; i < aggregations.size(); ++i) {
InternalBinaryRange range = (InternalBinaryRange) aggregations.get(i); InternalBinaryRange range = (InternalBinaryRange) aggregations.get(i);
if (range.buckets.length != buckets.length) { if (range.buckets.size() != buckets.size()) {
throw new IllegalStateException("Expected " + buckets.length + " buckets, but got " + range.buckets.length); throw new IllegalStateException("Expected [" + buckets.size() + "] buckets, but got [" + range.buckets.size() + "]");
} }
for (int j = 0; j < buckets.length; ++j) { for (int j = 0; j < buckets.size(); ++j) {
Bucket bucket = range.buckets[j]; Bucket bucket = range.buckets.get(j);
docCounts[j] += bucket.docCount; docCounts[j] += bucket.docCount;
aggs[j][i] = bucket.aggregations; aggs[j][i] = bucket.aggregations;
} }
} }
Bucket[] buckets = new Bucket[this.buckets.length]; List<Bucket> buckets = new ArrayList<>(this.buckets.size());
for (int i = 0; i < buckets.length; ++i) { for (int i = 0; i < this.buckets.size(); ++i) {
Bucket b = this.buckets[i]; Bucket b = this.buckets.get(i);
buckets[i] = new Bucket(format, keyed, b.key, b.from, b.to, docCounts[i], buckets.add(new Bucket(format, keyed, b.key, b.from, b.to, docCounts[i],
InternalAggregations.reduce(Arrays.asList(aggs[i]), reduceContext)); InternalAggregations.reduce(Arrays.asList(aggs[i]), reduceContext)));
} }
return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData); return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData);
} }
@ -264,28 +263,4 @@ public final class InternalBinaryRange
} }
return builder; return builder;
} }
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(format);
out.writeBoolean(keyed);
out.writeVInt(buckets.length);
for (Bucket bucket : buckets) {
bucket.writeTo(out);
}
}
@Override
protected void doReadFrom(StreamInput in) throws IOException {
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
Bucket[] buckets = new Bucket[in.readVInt()];
for (int i = 0; i < buckets.length; ++i) {
Bucket bucket = new Bucket(format, keyed);
bucket.readFrom(in);
buckets[i] = bucket;
}
this.buckets = buckets;
}
} }

View File

@ -52,7 +52,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceType;
public final class IpRangeAggregationBuilder public final class IpRangeAggregationBuilder
extends ValuesSourceAggregationBuilder<ValuesSource.Bytes, IpRangeAggregationBuilder> { extends ValuesSourceAggregationBuilder<ValuesSource.Bytes, IpRangeAggregationBuilder> {
private static final String NAME = "ip_range"; public static final String NAME = "ip_range";
public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);
private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME); private static final InternalAggregation.Type TYPE = new InternalAggregation.Type(NAME);

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregationStreams;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
@ -40,32 +39,14 @@ import java.util.List;
import java.util.Map; import java.util.Map;
/** /**
* Results of the {@link TopHitsAggregator}.
*/ */
public class InternalTopHits extends InternalMetricsAggregation implements TopHits { public class InternalTopHits extends InternalMetricsAggregation implements TopHits {
public static final InternalAggregation.Type TYPE = new Type("top_hits");
public static final AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalTopHits readResult(StreamInput in) throws IOException {
InternalTopHits buckets = new InternalTopHits();
buckets.readFrom(in);
return buckets;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
private int from; private int from;
private int size; private int size;
private TopDocs topDocs; private TopDocs topDocs;
private InternalSearchHits searchHits; private InternalSearchHits searchHits;
InternalTopHits() {
}
public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits, public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) { List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData); super(name, pipelineAggregators, metaData);
@ -75,9 +56,29 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
this.searchHits = searchHits; this.searchHits = searchHits;
} }
/**
* Read from a stream.
*/
public InternalTopHits(StreamInput in) throws IOException {
super(in);
from = in.readVInt();
size = in.readVInt();
topDocs = Lucene.readTopDocs(in);
assert topDocs != null;
searchHits = InternalSearchHits.readSearchHits(in);
}
@Override @Override
public Type type() { protected void doWriteTo(StreamOutput out) throws IOException {
return TYPE; out.writeVInt(from);
out.writeVInt(size);
Lucene.writeTopDocs(out, topDocs);
searchHits.writeTo(out);
}
@Override
public String getWriteableName() {
return TopHitsAggregationBuilder.NAME;
} }
@Override @Override
@ -139,23 +140,6 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi
} }
} }
@Override
protected void doReadFrom(StreamInput in) throws IOException {
from = in.readVInt();
size = in.readVInt();
topDocs = Lucene.readTopDocs(in);
assert topDocs != null;
searchHits = InternalSearchHits.readSearchHits(in);
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeVInt(from);
out.writeVInt(size);
Lucene.writeTopDocs(out, topDocs);
searchHits.writeTo(out);
}
@Override @Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
searchHits.toXContent(builder, params); searchHits.toXContent(builder, params);

View File

@ -32,7 +32,9 @@ import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregationInitializationException;
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
import org.elasticsearch.search.aggregations.InternalAggregation.Type;
import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
@ -52,7 +54,8 @@ import java.util.Objects;
import java.util.Set; import java.util.Set;
public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHitsAggregationBuilder> { public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHitsAggregationBuilder> {
public static final String NAME = InternalTopHits.TYPE.name(); public static final String NAME = "top_hits";
private static final InternalAggregation.Type TYPE = new Type(NAME);
public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME); public static final ParseField AGGREGATION_NAME_FIELD = new ParseField(NAME);
private int from = 0; private int from = 0;
@ -68,14 +71,14 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
private FetchSourceContext fetchSourceContext; private FetchSourceContext fetchSourceContext;
public TopHitsAggregationBuilder(String name) { public TopHitsAggregationBuilder(String name) {
super(name, InternalTopHits.TYPE); super(name, TYPE);
} }
/** /**
* Read from a stream. * Read from a stream.
*/ */
public TopHitsAggregationBuilder(StreamInput in) throws IOException { public TopHitsAggregationBuilder(StreamInput in) throws IOException {
super(in, InternalTopHits.TYPE); super(in, TYPE);
explain = in.readBoolean(); explain = in.readBoolean();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new); fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
if (in.readBoolean()) { if (in.readBoolean()) {