mirror of https://github.com/apache/lucene.git
LUCENE-1818: QueryWrapperFilter using createWeight rather than weight
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@805185 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4184f368ff
commit
a77168d3c9
|
@ -61,7 +61,8 @@ Changes in backwards compatibility policy
|
||||||
Going forward Searchable will be kept for convenience only and may
|
Going forward Searchable will be kept for convenience only and may
|
||||||
be changed between minor releases without any deprecation
|
be changed between minor releases without any deprecation
|
||||||
process. It is not recommended that you implement it, but rather extend
|
process. It is not recommended that you implement it, but rather extend
|
||||||
Searcher. (Shai Erera, Chris Hostetter, Mark Miller via Mike McCandless)
|
Searcher.
|
||||||
|
(Shai Erera, Chris Hostetter, Martin Ruckli, Mark Miller via Mike McCandless)
|
||||||
|
|
||||||
4. LUCENE-1422, LUCENE-1693: The new TokenStream API (see below) using
|
4. LUCENE-1422, LUCENE-1693: The new TokenStream API (see below) using
|
||||||
Attributes has some backwards breaks in rare cases.
|
Attributes has some backwards breaks in rare cases.
|
||||||
|
@ -320,7 +321,7 @@ API Changes
|
||||||
out of order when used with a Collector that can accept docs out of order.
|
out of order when used with a Collector that can accept docs out of order.
|
||||||
Finally, Weight#explain now also takes a the top-level searcher, sub-reader
|
Finally, Weight#explain now also takes a the top-level searcher, sub-reader
|
||||||
and sub-docID.
|
and sub-docID.
|
||||||
(Shai Erera, Chris Hostetter, Mark Miller via Mike McCandless)
|
(Shai Erera, Chris Hostetter, Martin Ruckli, Mark Miller via Mike McCandless)
|
||||||
|
|
||||||
25. LUCENE-1466: Changed Tokenizer.input to be a CharStream; added
|
25. LUCENE-1466: Changed Tokenizer.input to be a CharStream; added
|
||||||
CharFilter and MappingCharFilter, which allows chaining & mapping
|
CharFilter and MappingCharFilter, which allows chaining & mapping
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class QueryWrapperFilter extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException {
|
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException {
|
||||||
final Weight weight = query.createWeight(new IndexSearcher(reader));
|
final Weight weight = query.weight(new IndexSearcher(reader));
|
||||||
return new DocIdSet() {
|
return new DocIdSet() {
|
||||||
public DocIdSetIterator iterator() throws IOException {
|
public DocIdSetIterator iterator() throws IOException {
|
||||||
return weight.scorer(reader, true, false);
|
return weight.scorer(reader, true, false);
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package org.apache.lucene.search;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
|
import org.apache.lucene.document.Field.Index;
|
||||||
|
import org.apache.lucene.document.Field.Store;
|
||||||
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
import org.apache.lucene.index.Term;
|
||||||
|
import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
|
import org.apache.lucene.store.Directory;
|
||||||
|
import org.apache.lucene.store.RAMDirectory;
|
||||||
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
|
|
||||||
|
public class TestQueryWrapperFilter extends LuceneTestCase {
|
||||||
|
|
||||||
|
public void testBasic() throws Exception {
|
||||||
|
Directory dir = new RAMDirectory();
|
||||||
|
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true,
|
||||||
|
IndexWriter.MaxFieldLength.LIMITED);
|
||||||
|
Document doc = new Document();
|
||||||
|
doc.add(new Field("field", "value", Store.NO, Index.ANALYZED));
|
||||||
|
writer.addDocument(doc);
|
||||||
|
writer.close();
|
||||||
|
|
||||||
|
TermQuery termQuery = new TermQuery(new Term("field", "value"));
|
||||||
|
|
||||||
|
// should not throw exception with primitive query
|
||||||
|
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
|
||||||
|
|
||||||
|
IndexSearcher searcher = new IndexSearcher(dir, true);
|
||||||
|
TopDocs hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
|
||||||
|
assertEquals(1, hits.totalHits);
|
||||||
|
|
||||||
|
// should not throw exception with non primitive query
|
||||||
|
BooleanQuery booleanQuery = new BooleanQuery();
|
||||||
|
booleanQuery.add(termQuery, Occur.MUST);
|
||||||
|
booleanQuery.add(new TermQuery(new Term("field", "missing")),
|
||||||
|
Occur.MUST_NOT);
|
||||||
|
qwf = new QueryWrapperFilter(termQuery);
|
||||||
|
|
||||||
|
hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
|
||||||
|
assertEquals(1, hits.totalHits);
|
||||||
|
|
||||||
|
// should not throw exception with Query that doesn't implement
|
||||||
|
// Query#createWeight
|
||||||
|
qwf = new QueryWrapperFilter(new FuzzyQuery(new Term("field", "valu")));
|
||||||
|
|
||||||
|
hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
|
||||||
|
assertEquals(1, hits.totalHits);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue