SOLR-3960: Fixed a bug where Distributed Grouping ignored PostFilters

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1413975 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris M. Hostetter 2012-11-27 02:54:29 +00:00
parent b2c51ff89a
commit 309312aa7e
3 changed files with 37 additions and 14 deletions

View File

@ -253,6 +253,9 @@ Bug Fixes
* SOLR-4099: Allow the collection api work queue to make forward progress even
when it's watcher is not fired for some reason. (Raintung Li via Mark Miller)
* SOLR-3960: Fixed a bug where Distributed Grouping ignored PostFilters
(Nathan Visagan, hossman)
Other Changes
----------------------

View File

@ -23,6 +23,7 @@ import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
import org.apache.lucene.util.OpenBitSet;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.search.*;
import org.apache.solr.search.SolrIndexSearcher.ProcessedFilter;
import org.apache.solr.search.QueryUtils;
import org.apache.solr.search.grouping.distributed.shardresultserializer.ShardResultTransformer;
import org.slf4j.Logger;
@ -132,32 +133,30 @@ public class CommandHandler {
collectors.addAll(command.create());
}
SolrIndexSearcher.ProcessedFilter pf = searcher.getProcessedFilter(
queryCommand.getFilter(), queryCommand.getFilterList()
);
Filter luceneFilter = pf.filter;
ProcessedFilter filter = searcher.getProcessedFilter
(queryCommand.getFilter(), queryCommand.getFilterList());
Query query = QueryUtils.makeQueryable(queryCommand.getQuery());
if (truncateGroups) {
docSet = computeGroupedDocSet(query, luceneFilter, collectors);
docSet = computeGroupedDocSet(query, filter, collectors);
} else if (needDocset) {
docSet = computeDocSet(query, luceneFilter, collectors);
docSet = computeDocSet(query, filter, collectors);
} else if (!collectors.isEmpty()) {
searchWithTimeLimiter(query, luceneFilter, MultiCollector.wrap(collectors.toArray(new Collector[nrOfCommands])));
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[nrOfCommands])));
} else {
searchWithTimeLimiter(query, luceneFilter, null);
searchWithTimeLimiter(query, filter, null);
}
}
private DocSet computeGroupedDocSet(Query query, Filter luceneFilter, List<Collector> collectors) throws IOException {
private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
Command firstCommand = commands.get(0);
AbstractAllGroupHeadsCollector termAllGroupHeadsCollector =
TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getSortWithinGroup());
if (collectors.isEmpty()) {
searchWithTimeLimiter(query, luceneFilter, termAllGroupHeadsCollector);
searchWithTimeLimiter(query, filter, termAllGroupHeadsCollector);
} else {
collectors.add(termAllGroupHeadsCollector);
searchWithTimeLimiter(query, luceneFilter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
}
int maxDoc = searcher.maxDoc();
@ -165,7 +164,7 @@ public class CommandHandler {
return new BitDocSet(new OpenBitSet(bits, bits.length));
}
private DocSet computeDocSet(Query query, Filter luceneFilter, List<Collector> collectors) throws IOException {
private DocSet computeDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
int maxDoc = searcher.maxDoc();
DocSetCollector docSetCollector;
if (collectors.isEmpty()) {
@ -174,7 +173,7 @@ public class CommandHandler {
Collector wrappedCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
docSetCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, wrappedCollectors);
}
searchWithTimeLimiter(query, luceneFilter, docSetCollector);
searchWithTimeLimiter(query, filter, docSetCollector);
return docSetCollector.getDocSet();
}
@ -191,7 +190,9 @@ public class CommandHandler {
* Invokes search with the specified filter and collector.
* If a time limit has been specified then wrap the collector in the TimeLimitingCollector
*/
private void searchWithTimeLimiter(final Query query, final Filter luceneFilter, Collector collector) throws IOException {
private void searchWithTimeLimiter(final Query query,
final ProcessedFilter filter,
Collector collector) throws IOException {
if (queryCommand.getTimeAllowed() > 0 ) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), queryCommand.getTimeAllowed());
}
@ -201,6 +202,12 @@ public class CommandHandler {
collector = MultiCollector.wrap(collector, hitCountCollector);
}
Filter luceneFilter = filter.filter;
if (filter.postFilter != null) {
filter.postFilter.setLastDelegate(collector);
collector = filter.postFilter;
}
try {
searcher.search(query, luceneFilter, collector);
} catch (TimeLimitingCollector.TimeExceededException x) {

View File

@ -179,6 +179,19 @@ public class TestDistributedGrouping extends BaseDistributedSearchTestCase {
query("q", "*:*", "fq", s1 + ":a", "fl", "id," + i1, "group", "true", "group.field", i1, "sort", i1 + " asc, id asc", "group.ngroups", "true");
query("q", "*:*", "fq", s1 + ":a", "rows", 0, "fl", "id," + i1, "group", "true", "group.field", i1, "sort", i1 + " asc, id asc", "group.ngroups", "true");
// SOLR-3960 - include a postfilter
for (String facet : new String[] { "false", "true"}) {
for (String fcache : new String[] { "", " cache=false cost=200"}) {
query("q", "*:*", "rows", 100, "fl", "id," + i1,
"group.limit", 10, "sort", i1 + " asc, id asc",
"group", "true", "group.field", i1,
"fq", "{!frange l=50 "+fcache+"}"+tlong,
"facet.field", t1,
"facet", facet
);
}
}
ModifiableSolrParams params = new ModifiableSolrParams();
Object[] q = {"q", "*:*", "fq", s1 + ":a", "rows", 1, "fl", "id," + i1, "group", "true", "group.field", i1, "group.limit", 10, "group.ngroups", "true"};