LUCENE-5144: remove FacetRequest.createAggregator, rename StandardFacetsAccumulator to OldFA and move it and associated classes under o.a.l.facet.old

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1508085 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shai Erera 2013-07-29 14:43:03 +00:00
parent bb1164b2fc
commit 8c55fcde84
38 changed files with 184 additions and 212 deletions

View File

@ -108,6 +108,13 @@ API Changes
need to use the same config more than once, e.g. when sharing between multiple
writers, make sure to clone it before passing to each writer.
(Shai Erera, Mike McCandless)
* LUCENE-5144: StandardFacetsAccumulator renamed to OldFacetsAccumulator, and all
associated classes were moved under o.a.l.facet.old. The intention to remove it
one day, when the features it covers (complements, partitiona, sampling) will be
migrated to the new FacetsAggregator and FacetsAccumulator API. Also,
FacetRequest.createAggregator was replaced by OldFacetsAccumulator.createAggregator.
(Shai Erera)
Optimizations

View File

@ -11,20 +11,20 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.facet.old.Aggregator;
import org.apache.lucene.facet.old.CountingAggregator;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.old.ScoredDocIdsUtils;
import org.apache.lucene.facet.params.CategoryListParams;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.Aggregator;
import org.apache.lucene.facet.search.CategoryListIterator;
import org.apache.lucene.facet.search.CountFacetRequest;
import org.apache.lucene.facet.search.CountingAggregator;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.facet.util.PartitionsUtils;
import org.apache.lucene.facet.util.ScoredDocIdsUtils;
import org.apache.lucene.index.IndexReader;
/*
@ -159,7 +159,7 @@ public class TotalFacetCounts {
final int[][] counts = new int[(int) Math.ceil(taxonomy.getSize() /(float) partitionSize)][partitionSize];
FacetSearchParams newSearchParams = new FacetSearchParams(facetIndexingParams, DUMMY_REQ);
//createAllListsSearchParams(facetIndexingParams, this.totalCounts);
StandardFacetsAccumulator sfa = new StandardFacetsAccumulator(newSearchParams, indexReader, taxonomy) {
OldFacetsAccumulator sfa = new OldFacetsAccumulator(newSearchParams, indexReader, taxonomy) {
@Override
protected HashMap<CategoryListIterator, Aggregator> getCategoryListMap(
FacetArrays facetArrays, int partition) throws IOException {
@ -172,7 +172,7 @@ public class TotalFacetCounts {
return map;
}
};
sfa.setComplementThreshold(StandardFacetsAccumulator.DISABLE_COMPLEMENT);
sfa.setComplementThreshold(OldFacetsAccumulator.DISABLE_COMPLEMENT);
sfa.accumulate(ScoredDocIdsUtils.createAllDocsScoredDocIDs(indexReader));
return new TotalFacetCounts(taxonomy, facetIndexingParams, counts, CreationType.Computed);
}

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;
import java.util.List;
@ -7,6 +7,9 @@ import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.RandomSampler;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.sampling.SamplingAccumulator;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
@ -39,13 +42,13 @@ import org.apache.lucene.index.IndexReader;
*
* @lucene.experimental
*/
public final class AdaptiveFacetsAccumulator extends StandardFacetsAccumulator {
public final class AdaptiveFacetsAccumulator extends OldFacetsAccumulator {
private Sampler sampler = new RandomSampler();
/**
* Create an {@link AdaptiveFacetsAccumulator}
* @see StandardFacetsAccumulator#StandardFacetsAccumulator(FacetSearchParams, IndexReader, TaxonomyReader)
* @see OldFacetsAccumulator#OldFacetsAccumulator(FacetSearchParams, IndexReader, TaxonomyReader)
*/
public AdaptiveFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
TaxonomyReader taxonomyReader) {
@ -55,7 +58,7 @@ public final class AdaptiveFacetsAccumulator extends StandardFacetsAccumulator {
/**
* Create an {@link AdaptiveFacetsAccumulator}
*
* @see StandardFacetsAccumulator#StandardFacetsAccumulator(FacetSearchParams,
* @see OldFacetsAccumulator#OldFacetsAccumulator(FacetSearchParams,
* IndexReader, TaxonomyReader, FacetArrays)
*/
public AdaptiveFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
@ -73,7 +76,7 @@ public final class AdaptiveFacetsAccumulator extends StandardFacetsAccumulator {
@Override
public List<FacetResult> accumulate(ScoredDocIDs docids) throws IOException {
StandardFacetsAccumulator delegee = appropriateFacetCountingAccumulator(docids);
OldFacetsAccumulator delegee = appropriateFacetCountingAccumulator(docids);
if (delegee == this) {
return super.accumulate(docids);
@ -86,7 +89,7 @@ public final class AdaptiveFacetsAccumulator extends StandardFacetsAccumulator {
* Compute the appropriate facet accumulator to use.
* If no special/clever adaptation is possible/needed return this (self).
*/
private StandardFacetsAccumulator appropriateFacetCountingAccumulator(ScoredDocIDs docids) {
private OldFacetsAccumulator appropriateFacetCountingAccumulator(ScoredDocIDs docids) {
// Verify that searchPareams permit sampling/complement/etc... otherwise do default
if (!mayComplement()) {
return this;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;

View File

@ -1,8 +1,7 @@
package org.apache.lucene.facet.complements;
package org.apache.lucene.facet.old;
import java.io.IOException;
import org.apache.lucene.facet.search.CountingAggregator;
import org.apache.lucene.util.IntsRef;
/*

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;
@ -23,9 +23,9 @@ import org.apache.lucene.util.IntsRef;
*/
/**
* A CountingAggregator updates a counter array with the size of the whole
* taxonomy, counting the number of times each category appears in the given set
* of documents.
* An {@link Aggregator} which updates a counter array with the size of the
* whole taxonomy, counting the number of times each category appears in the
* given set of documents.
*
* @lucene.experimental
*/

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;
import java.util.Iterator;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;
import java.util.ArrayList;
@ -7,8 +7,6 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.lucene.facet.complements.TotalFacetCounts;
import org.apache.lucene.facet.complements.TotalFacetCountsCache;
@ -16,11 +14,22 @@ import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.partitions.IntermediateFacetResult;
import org.apache.lucene.facet.partitions.PartitionsFacetResultsHandler;
import org.apache.lucene.facet.sampling.Sampler.OverSampledFacetRequest;
import org.apache.lucene.facet.search.CategoryListIterator;
import org.apache.lucene.facet.search.CountFacetRequest;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetsAccumulator;
import org.apache.lucene.facet.search.FacetsAggregator;
import org.apache.lucene.facet.search.FacetsCollector.MatchingDocs;
import org.apache.lucene.facet.search.SumScoreFacetRequest;
import org.apache.lucene.facet.search.TaxonomyFacetsAccumulator;
import org.apache.lucene.facet.search.TopKFacetResultsHandler;
import org.apache.lucene.facet.search.TopKInEachNodeHandler;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.facet.util.PartitionsUtils;
import org.apache.lucene.facet.util.ScoredDocIdsUtils;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.IntsRef;
@ -43,30 +52,20 @@ import org.apache.lucene.util.IntsRef;
*/
/**
* Standard implementation for {@link TaxonomyFacetsAccumulator}, utilizing partitions to save on memory.
* A {@link FacetsAccumulator} which supports partitions, sampling and
* complement counting.
* <p>
* Why partitions? Because if there are say 100M categories out of which
* only top K are required, we must first compute value for all 100M categories
* (going over all documents) and only then could we select top K.
* This is made easier on memory by working in partitions of distinct categories:
* Once a values for a partition are found, we take the top K for that
* partition and work on the next partition, them merge the top K of both,
* and so forth, thereby computing top K with RAM needs for the size of
* a single partition rather than for the size of all the 100M categories.
* <p>
* Decision on partitions size is done at indexing time, and the facet information
* for each partition is maintained separately.
* <p>
* <u>Implementation detail:</u> Since facets information of each partition is
* maintained in a separate "category list", we can be more efficient
* at search time, because only the facet info for a single partition
* need to be read while processing that partition.
* <b>NOTE:</b> this accumulator still uses the old API and will be removed
* eventually in favor of dedicated accumulators which support the above
* features ovee the new {@link FacetsAggregator} API. It provides
* {@link Aggregator} implementations for {@link CountFacetRequest},
* {@link SumScoreFacetRequest} and {@link OverSampledFacetRequest}. If you need
* to use it in conjunction with other facet requests, you should override
* {@link #createAggregator(FacetRequest, FacetArrays)}.
*
* @lucene.experimental
*/
public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
private static final Logger logger = Logger.getLogger(StandardFacetsAccumulator.class.getName());
public class OldFacetsAccumulator extends TaxonomyFacetsAccumulator {
/**
* Default threshold for using the complements optimization.
@ -100,12 +99,12 @@ public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
return new FacetArrays(PartitionsUtils.partitionSize(searchParams.indexingParams, taxoReader));
}
public StandardFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
public OldFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
TaxonomyReader taxonomyReader) {
this(searchParams, indexReader, taxonomyReader, null);
}
public StandardFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
public OldFacetsAccumulator(FacetSearchParams searchParams, IndexReader indexReader,
TaxonomyReader taxonomyReader, FacetArrays facetArrays) {
super(searchParams, indexReader, taxonomyReader, facetArrays == null ? createFacetArrays(searchParams, taxonomyReader) : facetArrays);
@ -143,14 +142,8 @@ public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
// MultiReader, which might be problematic for several applications.
// We could, for example, base our "isCurrent" logic on something else
// than the reader's version. Need to think more deeply about it.
if (logger.isLoggable(Level.FINEST)) {
logger.log(Level.FINEST, "IndexReader used does not support completents: ", e);
}
isUsingComplements = false;
} catch (IOException e) {
if (logger.isLoggable(Level.FINEST)) {
logger.log(Level.FINEST, "Failed to load/calculate total counts (complement counting disabled): ", e);
}
// silently fail if for some reason failed to load/save from/to dir
isUsingComplements = false;
} catch (Exception e) {
@ -224,7 +217,7 @@ public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
}
@Override
protected PartitionsFacetResultsHandler createFacetResultsHandler(FacetRequest fr) {
public PartitionsFacetResultsHandler createFacetResultsHandler(FacetRequest fr) {
if (fr.getResultMode() == ResultMode.PER_NODE_IN_TREE) {
return new TopKInEachNodeHandler(taxonomyReader, fr, facetArrays);
} else {
@ -340,6 +333,28 @@ public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
return 1;
}
protected Aggregator createAggregator(FacetRequest fr, FacetArrays facetArrays) {
if (fr instanceof CountFacetRequest) {
// we rely on that, if needed, result is cleared by arrays!
int[] a = facetArrays.getIntArray();
if (isUsingComplements) {
return new ComplementCountingAggregator(a);
} else {
return new CountingAggregator(a);
}
} else if (fr instanceof SumScoreFacetRequest) {
if (isUsingComplements) {
throw new IllegalArgumentException("complements are not supported by this SumScoreFacetRequest");
} else {
return new ScoringAggregator(facetArrays.getFloatArray());
}
} else if (fr instanceof OverSampledFacetRequest) {
return createAggregator(((OverSampledFacetRequest) fr).orig, facetArrays);
} else {
throw new IllegalArgumentException("unknown Aggregator implementation for request " + fr.getClass());
}
}
/**
* Create an {@link Aggregator} and a {@link CategoryListIterator} for each
* and every {@link FacetRequest}. Generating a map, matching each
@ -359,7 +374,7 @@ public class StandardFacetsAccumulator extends TaxonomyFacetsAccumulator {
FacetIndexingParams indexingParams = searchParams.indexingParams;
for (FacetRequest facetRequest : searchParams.facetRequests) {
Aggregator categoryAggregator = facetRequest.createAggregator(isUsingComplements, facetArrays, taxonomyReader);
Aggregator categoryAggregator = createAggregator(facetRequest, facetArrays); // nocommit remove! facetRequest.createAggregator(isUsingComplements, facetArrays, taxonomyReader);
CategoryListIterator cli = indexingParams.getCategoryListParams(facetRequest.categoryPath).createCategoryListIterator(partition);

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;
@ -25,7 +25,7 @@ import org.apache.lucene.search.DocIdSet;
* Document IDs with scores for each, driving facets accumulation. Document
* scores are optionally used in the process of facets scoring.
*
* @see StandardFacetsAccumulator#accumulate(ScoredDocIDs)
* @see OldFacetsAccumulator#accumulate(ScoredDocIDs)
* @lucene.experimental
*/
public interface ScoredDocIDs {

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -1,10 +1,10 @@
package org.apache.lucene.facet.util;
package org.apache.lucene.facet.old;
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.ScoredDocIDsIterator;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.old.ScoredDocIDsIterator;
import org.apache.lucene.index.AtomicReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.search;
package org.apache.lucene.facet.old;
import java.io.IOException;

View File

@ -0,0 +1,24 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<title>Old Faceted Search API</title>
</head>
<body>
Old faceted search API, kept until complements, sampling and partitions are migrated to the new API.
</body>
</html>

View File

@ -2,13 +2,13 @@ package org.apache.lucene.facet.partitions;
import java.io.IOException;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.FacetResultsHandler;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
@ -103,7 +103,7 @@ public abstract class PartitionsFacetResultsHandler extends FacetResultsHandler
/**
* Label results according to settings in {@link FacetRequest}, such as
* {@link FacetRequest#getNumLabel()}. Usually invoked by
* {@link StandardFacetsAccumulator#accumulate(ScoredDocIDs)}
* {@link OldFacetsAccumulator#accumulate(ScoredDocIDs)}
*
* @param facetResult
* facet result to be labeled.

View File

@ -20,12 +20,10 @@ package org.apache.lucene.facet.range;
import java.util.List;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.search.Aggregator;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetsAggregator;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/**
* Facet request for dynamic ranges based on a
@ -49,11 +47,6 @@ public class RangeFacetRequest<T extends Range> extends FacetRequest {
this(field, (T[]) ranges.toArray(new Range[ranges.size()]));
}
@Override
public Aggregator createAggregator(boolean useComplements, FacetArrays arrays, TaxonomyReader taxonomy) {
throw new UnsupportedOperationException();
}
@Override
public FacetsAggregator createFacetsAggregator(FacetIndexingParams fip) {
return null;

View File

@ -3,9 +3,9 @@ package org.apache.lucene.facet.sampling;
import java.io.IOException;
import java.util.Random;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.ScoredDocIDsIterator;
import org.apache.lucene.facet.util.ScoredDocIdsUtils;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.old.ScoredDocIDsIterator;
import org.apache.lucene.facet.old.ScoredDocIdsUtils;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -5,12 +5,11 @@ import java.util.Arrays;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.old.ScoredDocIDsIterator;
import org.apache.lucene.facet.old.ScoredDocIdsUtils;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.ScoredDocIDsIterator;
import org.apache.lucene.facet.util.ScoredDocIdsUtils;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with

View File

@ -2,9 +2,9 @@ package org.apache.lucene.facet.sampling;
import java.io.IOException;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.ScoredDocIDs;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -4,16 +4,14 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.Aggregator;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.FacetsAggregator;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -198,15 +196,9 @@ public abstract class Sampler {
return res;
}
/**
* Wrapping a facet request for over sampling.
* Implementation detail: even if the original request is a count request, no
* statistics will be computed for it as the wrapping is not a count request.
* This is ok, as the sampling accumulator is later computing the statistics
* over the original requests.
*/
private static class OverSampledFacetRequest extends FacetRequest {
final FacetRequest orig;
/** Wrapping a facet request for over sampling. */
public static class OverSampledFacetRequest extends FacetRequest {
public final FacetRequest orig;
public OverSampledFacetRequest(FacetRequest orig, int num) {
super(orig.categoryPath, num);
this.orig = orig;
@ -221,12 +213,6 @@ public abstract class Sampler {
return orig.createFacetsAggregator(fip);
}
@Override
public Aggregator createAggregator(boolean useComplements, FacetArrays arrays, TaxonomyReader taxonomy)
throws IOException {
return orig.createAggregator(useComplements, arrays, taxonomy);
}
@Override
public FacetArraysSource getFacetArraysSource() {
return orig.getFacetArraysSource();

View File

@ -4,14 +4,14 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.partitions.PartitionsFacetResultsHandler;
import org.apache.lucene.facet.sampling.Sampler.SampleResult;
import org.apache.lucene.facet.search.FacetArrays;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetsAccumulator;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
@ -38,10 +38,10 @@ import org.apache.lucene.index.IndexReader;
* Note two major differences between this class and {@link SamplingWrapper}:
* <ol>
* <li>Latter can wrap any other {@link FacetsAccumulator} while this class
* directly extends {@link StandardFacetsAccumulator}.</li>
* directly extends {@link OldFacetsAccumulator}.</li>
* <li>This class can effectively apply sampling on the complement set of
* matching document, thereby working efficiently with the complement
* optimization - see {@link StandardFacetsAccumulator#getComplementThreshold()}
* optimization - see {@link OldFacetsAccumulator#getComplementThreshold()}
* .</li>
* </ol>
* <p>
@ -52,7 +52,7 @@ import org.apache.lucene.index.IndexReader;
* @see Sampler
* @lucene.experimental
*/
public class SamplingAccumulator extends StandardFacetsAccumulator {
public class SamplingAccumulator extends OldFacetsAccumulator {
private double samplingRatio = -1d;
private final Sampler sampler;

View File

@ -32,19 +32,19 @@ public class SamplingParams {
/**
* Default ratio between size of sample to original size of document set.
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public static final double DEFAULT_SAMPLE_RATIO = 0.01;
/**
* Default maximum size of sample.
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public static final int DEFAULT_MAX_SAMPLE_SIZE = 10000;
/**
* Default minimum size of sample.
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public static final int DEFAULT_MIN_SAMPLE_SIZE = 100;
@ -65,7 +65,7 @@ public class SamplingParams {
/**
* Return the maxSampleSize.
* In no case should the resulting sample size exceed this value.
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public final int getMaxSampleSize() {
return maxSampleSize;
@ -74,7 +74,7 @@ public class SamplingParams {
/**
* Return the minSampleSize.
* In no case should the resulting sample size be smaller than this value.
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public final int getMinSampleSize() {
return minSampleSize;
@ -82,7 +82,7 @@ public class SamplingParams {
/**
* @return the sampleRatio
* @see Sampler#getSampleSet(org.apache.lucene.facet.search.ScoredDocIDs)
* @see Sampler#getSampleSet(org.apache.lucene.facet.old.ScoredDocIDs)
*/
public final double getSampleRatio() {
return sampleRatio;

View File

@ -4,12 +4,12 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.partitions.PartitionsFacetResultsHandler;
import org.apache.lucene.facet.sampling.Sampler.SampleResult;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
@ -38,12 +38,12 @@ import org.apache.lucene.facet.taxonomy.TaxonomyReader;
*
* @lucene.experimental
*/
public class SamplingWrapper extends StandardFacetsAccumulator {
public class SamplingWrapper extends OldFacetsAccumulator {
private StandardFacetsAccumulator delegee;
private OldFacetsAccumulator delegee;
private Sampler sampler;
public SamplingWrapper(StandardFacetsAccumulator delegee, Sampler sampler) {
public SamplingWrapper(OldFacetsAccumulator delegee, Sampler sampler) {
super(delegee.searchParams, delegee.indexReader, delegee.taxonomyReader);
this.delegee = delegee;
this.sampler = sampler;

View File

@ -2,11 +2,11 @@ package org.apache.lucene.facet.sampling;
import java.io.IOException;
import org.apache.lucene.facet.old.ScoredDocIDs;
import org.apache.lucene.facet.old.ScoredDocIDsIterator;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.DrillDownQuery;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.ScoredDocIDsIterator;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.DocsEnum;

View File

@ -1,9 +1,7 @@
package org.apache.lucene.facet.search;
import org.apache.lucene.facet.complements.ComplementCountingAggregator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -33,17 +31,6 @@ public class CountFacetRequest extends FacetRequest {
super(path, num);
}
// TODO nuke Aggregator and move this logic to StandardFacetsAccumulator -- it should only be used for counting
@Override
public Aggregator createAggregator(boolean useComplements, FacetArrays arrays, TaxonomyReader taxonomy) {
// we rely on that, if needed, result is cleared by arrays!
int[] a = arrays.getIntArray();
if (useComplements) {
return new ComplementCountingAggregator(a);
}
return new CountingAggregator(a);
}
@Override
public FacetsAggregator createFacetsAggregator(FacetIndexingParams fip) {
return CountingFacetsAggregator.create(fip.getCategoryListParams(categoryPath));

View File

@ -1,12 +1,9 @@
package org.apache.lucene.facet.search;
import java.io.IOException;
import org.apache.lucene.facet.params.CategoryListParams.OrdinalPolicy;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.range.RangeFacetRequest;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -121,26 +118,6 @@ public abstract class FacetRequest {
hashCode = categoryPath.hashCode() ^ this.numResults;
}
/**
* Create an aggregator for this facet request. Aggregator action depends on
* request definition. For a count request, it will usually increment the
* count for that facet.
*
* @param useComplements
* whether the complements optimization is being used for current
* computation.
* @param arrays
* provider for facet arrays in use for current computation.
* @param taxonomy
* reader of taxonomy in effect.
* @throws IOException If there is a low-level I/O error.
*/
public Aggregator createAggregator(boolean useComplements, FacetArrays arrays, TaxonomyReader taxonomy)
throws IOException {
throw new UnsupportedOperationException("this FacetRequest does not support this type of Aggregator anymore; " +
"you should override FacetsAccumulator to return the proper FacetsAggregator");
}
/**
* Returns the {@link FacetsAggregator} which can aggregate the categories of
* this facet request. The aggregator is expected to aggregate category values

View File

@ -4,6 +4,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.range.RangeAccumulator;
@ -72,7 +73,7 @@ public abstract class FacetsAccumulator {
public static FacetsAccumulator create(FacetSearchParams fsp, IndexReader indexReader, TaxonomyReader taxoReader,
FacetArrays arrays) {
if (fsp.indexingParams.getPartitionSize() != Integer.MAX_VALUE) {
return new StandardFacetsAccumulator(fsp, indexReader, taxoReader, arrays);
return new OldFacetsAccumulator(fsp, indexReader, taxoReader, arrays);
}
List<FacetRequest> rangeRequests = new ArrayList<FacetRequest>();

View File

@ -2,7 +2,6 @@ package org.apache.lucene.facet.search;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -39,12 +38,6 @@ public class SumScoreFacetRequest extends FacetRequest {
return new SumScoreFacetsAggregator();
}
@Override
public Aggregator createAggregator(boolean useComplements, FacetArrays arrays, TaxonomyReader taxonomy) {
assert !useComplements : "complements are not supported by this FacetRequest";
return new ScoringAggregator(arrays.getFloatArray());
}
@Override
public double getValueOf(FacetArrays arrays, int ordinal) {
return arrays.getFloatArray()[ordinal];

View File

@ -4,13 +4,13 @@ import java.io.IOException;
import java.util.List;
import org.apache.lucene.facet.FacetTestBase;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.CountFacetRequest;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.FacetsCollector;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
@ -112,8 +112,8 @@ public class TestFacetsAccumulatorWithComplement extends FacetTestBase {
/** compute facets with certain facet requests and docs */
private List<FacetResult> findFacets(boolean withComplement) throws IOException {
FacetSearchParams fsp = new FacetSearchParams(fip, new CountFacetRequest(new CategoryPath("root","a"), 10));
StandardFacetsAccumulator sfa = new StandardFacetsAccumulator(fsp, indexReader, taxoReader);
sfa.setComplementThreshold(withComplement ? StandardFacetsAccumulator.FORCE_COMPLEMENT : StandardFacetsAccumulator.DISABLE_COMPLEMENT);
OldFacetsAccumulator sfa = new OldFacetsAccumulator(fsp, indexReader, taxoReader);
sfa.setComplementThreshold(withComplement ? OldFacetsAccumulator.FORCE_COMPLEMENT : OldFacetsAccumulator.DISABLE_COMPLEMENT);
FacetsCollector fc = FacetsCollector.create(sfa);
searcher.search(new MatchAllDocsQuery(), fc);

View File

@ -1,12 +1,11 @@
package org.apache.lucene.facet.search;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.LuceneTestCase.Slow;
package org.apache.lucene.facet.old;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.BaseSampleTestTopK;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.LuceneTestCase.Slow;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -29,7 +28,7 @@ import org.apache.lucene.facet.taxonomy.TaxonomyReader;
public class AdaptiveAccumulatorTest extends BaseSampleTestTopK {
@Override
protected StandardFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
protected OldFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
IndexReader indexReader, FacetSearchParams searchParams) {
AdaptiveFacetsAccumulator res = new AdaptiveFacetsAccumulator(searchParams, indexReader, taxoReader);
res.setSampler(sampler);

View File

@ -1,4 +1,4 @@
package org.apache.lucene.facet.util;
package org.apache.lucene.facet.old;
import java.io.IOException;
import java.util.Random;
@ -10,8 +10,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.facet.FacetTestCase;
import org.apache.lucene.facet.search.ScoredDocIDs;
import org.apache.lucene.facet.search.ScoredDocIDsIterator;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;

View File

@ -3,18 +3,14 @@ package org.apache.lucene.facet.sampling;
import java.util.List;
import java.util.Random;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.RandomSampler;
import org.apache.lucene.facet.sampling.RepeatableSampler;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.sampling.SamplingParams;
import org.apache.lucene.facet.search.BaseTestTopK;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetsCollector;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -60,7 +56,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK {
return res;
}
protected abstract StandardFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
protected abstract OldFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
IndexReader indexReader, FacetSearchParams searchParams);
/**
@ -123,8 +119,8 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK {
private FacetsCollector samplingCollector(final boolean complement, final Sampler sampler,
FacetSearchParams samplingSearchParams) {
StandardFacetsAccumulator sfa = getSamplingAccumulator(sampler, taxoReader, indexReader, samplingSearchParams);
sfa.setComplementThreshold(complement ? StandardFacetsAccumulator.FORCE_COMPLEMENT : StandardFacetsAccumulator.DISABLE_COMPLEMENT);
OldFacetsAccumulator sfa = getSamplingAccumulator(sampler, taxoReader, indexReader, samplingSearchParams);
sfa.setComplementThreshold(complement ? OldFacetsAccumulator.FORCE_COMPLEMENT : OldFacetsAccumulator.DISABLE_COMPLEMENT);
return FacetsCollector.create(sfa);
}

View File

@ -6,19 +6,15 @@ import java.util.Collections;
import org.apache.lucene.document.Document;
import org.apache.lucene.facet.FacetTestCase;
import org.apache.lucene.facet.index.FacetFields;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.RandomSampler;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.sampling.SamplingAccumulator;
import org.apache.lucene.facet.sampling.SamplingParams;
import org.apache.lucene.facet.search.CountFacetRequest;
import org.apache.lucene.facet.search.FacetRequest;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
import org.apache.lucene.facet.search.FacetResult;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.FacetsCollector;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
@ -116,7 +112,7 @@ public class OversampleWithDepthTest extends FacetTestCase {
final SamplingParams params) throws IOException {
// a FacetsCollector with a sampling accumulator
Sampler sampler = new RandomSampler(params, random());
StandardFacetsAccumulator sfa = new SamplingAccumulator(sampler, fsp, r, tr);
OldFacetsAccumulator sfa = new SamplingAccumulator(sampler, fsp, r, tr);
FacetsCollector fcWithSampling = FacetsCollector.create(sfa);
IndexSearcher s = newSearcher(r);

View File

@ -4,12 +4,12 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.facet.FacetTestBase;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.CountFacetRequest;
import org.apache.lucene.facet.search.FacetResultNode;
import org.apache.lucene.facet.search.FacetsCollector;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.CategoryPath;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.junit.After;
@ -99,7 +99,7 @@ public class SamplerTest extends FacetTestBase {
// Make sure no complements are in action
accumulator
.setComplementThreshold(StandardFacetsAccumulator.DISABLE_COMPLEMENT);
.setComplementThreshold(OldFacetsAccumulator.DISABLE_COMPLEMENT);
FacetsCollector fc = FacetsCollector.create(accumulator);

View File

@ -1,9 +1,7 @@
package org.apache.lucene.facet.sampling;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.sampling.SamplingAccumulator;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.LuceneTestCase.Slow;
@ -29,7 +27,7 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
public class SamplingAccumulatorTest extends BaseSampleTestTopK {
@Override
protected StandardFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
protected OldFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
IndexReader indexReader, FacetSearchParams searchParams) {
return new SamplingAccumulator(sampler, searchParams, indexReader, taxoReader);
}

View File

@ -1,14 +1,11 @@
package org.apache.lucene.facet.sampling;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.sampling.Sampler;
import org.apache.lucene.facet.sampling.SamplingWrapper;
import org.apache.lucene.facet.search.StandardFacetsAccumulator;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -30,9 +27,9 @@ import org.apache.lucene.facet.taxonomy.TaxonomyReader;
public class SamplingWrapperTest extends BaseSampleTestTopK {
@Override
protected StandardFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
protected OldFacetsAccumulator getSamplingAccumulator(Sampler sampler, TaxonomyReader taxoReader,
IndexReader indexReader, FacetSearchParams searchParams) {
return new SamplingWrapper(new StandardFacetsAccumulator(searchParams, indexReader, taxoReader), sampler);
return new SamplingWrapper(new OldFacetsAccumulator(searchParams, indexReader, taxoReader), sampler);
}
}

View File

@ -11,6 +11,8 @@ import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.facet.FacetTestCase;
import org.apache.lucene.facet.index.FacetFields;
import org.apache.lucene.facet.old.AdaptiveFacetsAccumulator;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.CategoryListParams;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
@ -218,7 +220,7 @@ public class TestFacetsCollector extends FacetTestCase {
FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(CategoryPath.EMPTY, 10));
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new StandardFacetsAccumulator(fsp, r, taxo);
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new OldFacetsAccumulator(fsp, r, taxo);
FacetsCollector fc = FacetsCollector.create(fa);
newSearcher(r).search(new MatchAllDocsQuery(), fc);
@ -252,7 +254,7 @@ public class TestFacetsCollector extends FacetTestCase {
FacetSearchParams fsp = new FacetSearchParams(
new CountFacetRequest(new CategoryPath("a"), 10),
new CountFacetRequest(new CategoryPath("b"), 10));
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new StandardFacetsAccumulator(fsp, r, taxo);
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new OldFacetsAccumulator(fsp, r, taxo);
final FacetsCollector fc = FacetsCollector.create(fa);
newSearcher(r).search(new MatchAllDocsQuery(), fc);
@ -284,7 +286,7 @@ public class TestFacetsCollector extends FacetTestCase {
FacetSearchParams fsp = new FacetSearchParams(
new CountFacetRequest(new CategoryPath("a"), 10),
new CountFacetRequest(new CategoryPath("b"), 10));
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new StandardFacetsAccumulator(fsp, r, taxo);
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new OldFacetsAccumulator(fsp, r, taxo);
final FacetsCollector fc = FacetsCollector.create(fa);
// this should populate the cached results, but doing search should clear the cache
fc.getFacetResults();
@ -325,7 +327,7 @@ public class TestFacetsCollector extends FacetTestCase {
// assert IntFacetResultHandler
FacetSearchParams fsp = new FacetSearchParams(new CountFacetRequest(new CategoryPath("a"), 10));
TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new StandardFacetsAccumulator(fsp, r, taxo);
TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new OldFacetsAccumulator(fsp, r, taxo);
FacetsCollector fc = FacetsCollector.create(fa);
newSearcher(r).search(new MatchAllDocsQuery(), fc);
assertTrue("invalid ordinal for child node: 0", 0 != fc.getFacetResults().get(0).getFacetResultNode().subResults.get(0).ordinal);
@ -340,7 +342,7 @@ public class TestFacetsCollector extends FacetTestCase {
}
};
} else {
fa = new StandardFacetsAccumulator(fsp, r, taxo);
fa = new OldFacetsAccumulator(fsp, r, taxo);
}
fc = FacetsCollector.create(fa);
newSearcher(r).search(new MatchAllDocsQuery(), fc);
@ -374,7 +376,7 @@ public class TestFacetsCollector extends FacetTestCase {
CountFacetRequest cfr = new CountFacetRequest(new CategoryPath("a"), 2);
cfr.setResultMode(random().nextBoolean() ? ResultMode.GLOBAL_FLAT : ResultMode.PER_NODE_IN_TREE);
FacetSearchParams fsp = new FacetSearchParams(cfr);
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new StandardFacetsAccumulator(fsp, r, taxo);
final TaxonomyFacetsAccumulator fa = random().nextBoolean() ? new TaxonomyFacetsAccumulator(fsp, r, taxo) : new OldFacetsAccumulator(fsp, r, taxo);
FacetsCollector fc = FacetsCollector.create(fa);
newSearcher(r).search(new MatchAllDocsQuery(), fc);
@ -415,10 +417,10 @@ public class TestFacetsCollector extends FacetTestCase {
TaxonomyFacetsAccumulator[] accumulators = new TaxonomyFacetsAccumulator[] {
new TaxonomyFacetsAccumulator(fsp, indexReader, taxoReader),
new StandardFacetsAccumulator(fsp, indexReader, taxoReader),
new OldFacetsAccumulator(fsp, indexReader, taxoReader),
new SamplingAccumulator(sampler, fsp, indexReader, taxoReader),
new AdaptiveFacetsAccumulator(fsp, indexReader, taxoReader),
new SamplingWrapper(new StandardFacetsAccumulator(fsp, indexReader, taxoReader), sampler)
new SamplingWrapper(new OldFacetsAccumulator(fsp, indexReader, taxoReader), sampler)
};
for (TaxonomyFacetsAccumulator fa : accumulators) {

View File

@ -11,6 +11,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.facet.FacetTestCase;
import org.apache.lucene.facet.index.FacetFields;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.facet.search.FacetRequest.ResultMode;
@ -150,8 +151,8 @@ public class TestTopKInEachNodeResultHandler extends FacetTestCase {
FacetSearchParams facetSearchParams = new FacetSearchParams(iParams, facetRequests);
FacetArrays facetArrays = new FacetArrays(PartitionsUtils.partitionSize(facetSearchParams.indexingParams, tr));
StandardFacetsAccumulator sfa = new StandardFacetsAccumulator(facetSearchParams, is.getIndexReader(), tr, facetArrays);
sfa.setComplementThreshold(StandardFacetsAccumulator.DISABLE_COMPLEMENT);
OldFacetsAccumulator sfa = new OldFacetsAccumulator(facetSearchParams, is.getIndexReader(), tr, facetArrays);
sfa.setComplementThreshold(OldFacetsAccumulator.DISABLE_COMPLEMENT);
FacetsCollector fc = FacetsCollector.create(sfa);
is.search(q, fc);

View File

@ -4,6 +4,7 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import org.apache.lucene.facet.old.OldFacetsAccumulator;
import org.apache.lucene.facet.params.FacetIndexingParams;
import org.apache.lucene.facet.params.FacetSearchParams;
import org.apache.lucene.search.MatchAllDocsQuery;
@ -33,8 +34,8 @@ public class TestTopKResultsHandlerRandom extends BaseTestTopK {
throws IOException {
Query q = new MatchAllDocsQuery();
FacetSearchParams facetSearchParams = searchParamsWithRequests(numResults, fip);
StandardFacetsAccumulator sfa = new StandardFacetsAccumulator(facetSearchParams, indexReader, taxoReader);
sfa.setComplementThreshold(doComplement ? StandardFacetsAccumulator.FORCE_COMPLEMENT : StandardFacetsAccumulator.DISABLE_COMPLEMENT);
OldFacetsAccumulator sfa = new OldFacetsAccumulator(facetSearchParams, indexReader, taxoReader);
sfa.setComplementThreshold(doComplement ? OldFacetsAccumulator.FORCE_COMPLEMENT : OldFacetsAccumulator.DISABLE_COMPLEMENT);
FacetsCollector fc = FacetsCollector.create(sfa);
searcher.search(q, fc);
List<FacetResult> facetResults = fc.getFacetResults();