SOLR-1111: also use lucene filters for generating docsets

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@779403 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2009-05-28 02:23:05 +00:00
parent d54f4e703a
commit 6bccd58110
1 changed files with 11 additions and 25 deletions

View File

@ -33,6 +33,7 @@ import org.apache.solr.core.SolrInfoMBean;
import org.apache.solr.schema.IndexSchema; import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField; import org.apache.solr.schema.SchemaField;
import org.apache.solr.request.UnInvertedField; import org.apache.solr.request.UnInvertedField;
import org.apache.solr.search.function.BoostedQuery;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
import java.io.IOException; import java.io.IOException;
@ -619,8 +620,9 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
// query must be positive // query must be positive
protected DocSet getDocSetNC(Query query, DocSet filter) throws IOException { protected DocSet getDocSetNC(Query query, DocSet filter) throws IOException {
DocSetCollector collector = new DocSetCollector(maxDoc()>>6, maxDoc());
if (filter==null) { if (filter==null) {
DocSetCollector hc = new DocSetCollector(maxDoc()>>6, maxDoc());
if (query instanceof TermQuery) { if (query instanceof TermQuery) {
Term t = ((TermQuery)query).getTerm(); Term t = ((TermQuery)query).getTerm();
SolrIndexReader[] readers = reader.getLeafReaders(); SolrIndexReader[] readers = reader.getLeafReaders();
@ -630,42 +632,26 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
for (int i=0; i<readers.length; i++) { for (int i=0; i<readers.length; i++) {
SolrIndexReader sir = readers[i]; SolrIndexReader sir = readers[i];
int offset = offsets[i]; int offset = offsets[i];
hc.setNextReader(sir, offset); collector.setNextReader(sir, offset);
TermDocs tdocs = sir.termDocs(t); TermDocs tdocs = sir.termDocs(t);
for(;;) { for(;;) {
int num = tdocs.read(arr, freq); int num = tdocs.read(arr, freq);
if (num==0) break; if (num==0) break;
for (int j=0; j<num; j++) { for (int j=0; j<num; j++) {
hc.collect(arr[j]); collector.collect(arr[j]);
} }
} }
tdocs.close(); tdocs.close();
} }
} else { } else {
super.search(query,null,hc); super.search(query,null,collector);
} }
return hc.getDocSet(); return collector.getDocSet();
} else { } else {
// FUTURE: if the filter is sorted by docid, could use skipTo (SkipQueryFilter) Filter luceneFilter = filter.getTopFilter();
final DocSetCollector hc = new DocSetCollector(maxDoc()>>6, maxDoc()); super.search(query, luceneFilter, collector);
final DocSet filt = filter; return collector.getDocSet();
super.search(query, null, new Collector() {
int base = 0;
public void collect(int doc) throws IOException {
doc += base;
if (filt.exists(doc)) hc.collect(doc);
}
public void setNextReader(IndexReader reader, int docBase) throws IOException {
this.base = docBase;
}
public void setScorer(Scorer scorer) throws IOException {
}
}
);
return hc.getDocSet();
} }
} }