Refactor common code for unmapped aggregators into NonCollectingAggregator.

Our aggregators that are dedicated to unmapped fields all look more or less
the same so this hopefully helps remove some spaghetti code.

Close #5528
This commit is contained in:
Adrien Grand 2014-03-25 10:13:42 +01:00
parent fdcc843627
commit 7f640dbcc6
8 changed files with 92 additions and 211 deletions

View File

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import java.io.IOException;
/**
* An aggregator that is not collected, this can typically be used when running an aggregation over a field that doesn't have
* a mapping.
*/
public abstract class NonCollectingAggregator extends Aggregator {
protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent) {
super(name, BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 0, context, parent);
}
private void fail() {
throw new IllegalStateException("This aggregator should not be collected");
}
@Override
public final void setNextReader(AtomicReaderContext reader) {
fail();
}
@Override
public final boolean shouldCollect() {
return false;
}
@Override
public final void collect(int doc, long owningBucketOrdinal) throws IOException {
fail();
}
@Override
public final InternalAggregation buildAggregation(long owningBucketOrdinal) {
return buildEmptyAggregation();
}
}

View File

@ -20,12 +20,12 @@ package org.elasticsearch.search.aggregations.bucket.geogrid;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.index.fielddata.LongValues;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource;
@ -134,38 +134,6 @@ public class GeoHashGridAggregator extends BucketsAggregator {
@Override
public void doRelease() {
Releasables.release(bucketOrds);
}
public static class Unmapped extends Aggregator {
private int requiredSize;
public Unmapped(String name, int requiredSize, AggregationContext aggregationContext, Aggregator parent) {
super(name, BucketAggregationMode.PER_BUCKET, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
this.requiredSize=requiredSize;
}
@Override
public boolean shouldCollect() {
return false;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
@Override
public InternalGeoHashGrid buildAggregation(long owningBucketOrdinal) {
return (InternalGeoHashGrid) buildEmptyAggregation();
}
@Override
public InternalGeoHashGrid buildEmptyAggregation() {
return new InternalGeoHashGrid(name, requiredSize, Collections.<InternalGeoHashGrid.Bucket>emptyList());
}
}
}
}

View File

@ -26,6 +26,8 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.query.GeoBoundingBoxFilterBuilder;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.NonCollectingAggregator;
import org.elasticsearch.search.aggregations.bucket.BucketUtils;
import org.elasticsearch.search.aggregations.support.*;
import org.elasticsearch.search.aggregations.support.geopoints.GeoPointValuesSource;
@ -33,6 +35,7 @@ import org.elasticsearch.search.aggregations.support.numeric.NumericValuesSource
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Collections;
/**
* Aggregates Geo information into cells determined by geohashes of a given precision.
@ -129,7 +132,12 @@ public class GeoHashGridParser implements Aggregator.Parser {
@Override
protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
return new GeoHashGridAggregator.Unmapped(name, requiredSize, aggregationContext, parent);
final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, Collections.<InternalGeoHashGrid.Bucket>emptyList());
return new NonCollectingAggregator(name, aggregationContext, parent) {
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}
};
}
@Override

View File

@ -22,10 +22,7 @@ import com.google.common.collect.Lists;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.*;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.ValueSourceAggregatorFactory;
@ -237,7 +234,7 @@ public class RangeAggregator extends BucketsAggregator {
}.sort(0, ranges.length);
}
public static class Unmapped extends Aggregator {
public static class Unmapped extends NonCollectingAggregator {
private final List<RangeAggregator.Range> ranges;
private final boolean keyed;
@ -254,7 +251,7 @@ public class RangeAggregator extends BucketsAggregator {
Aggregator parent,
InternalRange.Factory factory) {
super(name, BucketAggregationMode.MULTI_BUCKETS, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
super(name, aggregationContext, parent);
this.ranges = ranges;
for (Range range : this.ranges) {
range.process(parser, context);
@ -266,25 +263,7 @@ public class RangeAggregator extends BucketsAggregator {
}
@Override
public boolean shouldCollect() {
return false;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
@Override
public InternalRange buildAggregation(long owningBucketOrdinal) {
return buildEmptyAggregation();
}
@Override
public InternalRange buildEmptyAggregation() {
public InternalAggregation buildEmptyAggregation() {
InternalAggregations subAggs = buildEmptySubAggregations();
List<org.elasticsearch.search.aggregations.bucket.range.Range.Bucket> buckets =
new ArrayList<>(ranges.size());

View File

@ -31,8 +31,7 @@ import org.elasticsearch.common.util.BytesRefHash;
import org.elasticsearch.common.util.IntArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.*;
import org.elasticsearch.search.aggregations.Aggregator.BucketAggregationMode;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.AggregationContext;
@ -86,7 +85,13 @@ public class SignificantTermsAggregatorFactory extends ValueSourceAggregatorFact
@Override
protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
return new UnmappedSignificantTermsAggregator(name, requiredSize, minDocCount, aggregationContext, parent, this);
final InternalAggregation aggregation = new UnmappedSignificantTerms(name, requiredSize, minDocCount);
return new NonCollectingAggregator(name, aggregationContext, parent) {
@Override
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}
};
}
private static boolean hasParentBucketAggregator(Aggregator parent) {

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.significant;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import java.io.IOException;
/**
*
*/
public class UnmappedSignificantTermsAggregator extends Aggregator {
private final int requiredSize;
private final long minDocCount;
private final SignificantTermsAggregatorFactory termsAggFactory;
public UnmappedSignificantTermsAggregator(String name, int requiredSize, long minDocCount, AggregationContext aggregationContext,
Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory) {
super(name, BucketAggregationMode.PER_BUCKET, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
this.requiredSize = requiredSize;
this.minDocCount = minDocCount;
this.termsAggFactory = termsAggFactory;
}
@Override
public boolean shouldCollect() {
return false;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
assert owningBucketOrdinal == 0;
return new UnmappedSignificantTerms(name, requiredSize, minDocCount);
}
@Override
public InternalAggregation buildEmptyAggregation() {
return new UnmappedSignificantTerms(name, requiredSize, minDocCount);
}
@Override
protected void doRelease() {
Releasables.release(termsAggFactory);
}
}

View File

@ -21,8 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.terms;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.*;
import org.elasticsearch.search.aggregations.Aggregator.BucketAggregationMode;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
@ -107,7 +106,13 @@ public class TermsAggregatorFactory extends ValueSourceAggregatorFactory {
@Override
protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent) {
return new UnmappedTermsAggregator(name, order, requiredSize, minDocCount, aggregationContext, parent);
final InternalAggregation aggregation = new UnmappedTerms(name, order, requiredSize, minDocCount);
return new NonCollectingAggregator(name, aggregationContext, parent) {
@Override
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}
};
}
private static boolean hasParentBucketAggregator(Aggregator parent) {

View File

@ -1,68 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.terms;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import java.io.IOException;
/**
*
*/
public class UnmappedTermsAggregator extends Aggregator {
private final InternalOrder order;
private final int requiredSize;
private final long minDocCount;
public UnmappedTermsAggregator(String name, InternalOrder order, int requiredSize, long minDocCount, AggregationContext aggregationContext, Aggregator parent) {
super(name, BucketAggregationMode.PER_BUCKET, AggregatorFactories.EMPTY, 0, aggregationContext, parent);
this.order = order;
this.requiredSize = requiredSize;
this.minDocCount = minDocCount;
}
@Override
public boolean shouldCollect() {
return false;
}
@Override
public void setNextReader(AtomicReaderContext reader) {
}
@Override
public void collect(int doc, long owningBucketOrdinal) throws IOException {
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
assert owningBucketOrdinal == 0;
return new UnmappedTerms(name, order, requiredSize, minDocCount);
}
@Override
public InternalAggregation buildEmptyAggregation() {
return new UnmappedTerms(name, order, requiredSize, minDocCount);
}
}