SOLR-12337: Remove the obsolete QueryWrapperFilter

This commit is contained in:
David Smiley 2018-06-01 17:12:21 -04:00
parent 1d33130fcb
commit 4888adf08a
8 changed files with 57 additions and 413 deletions

View File

@ -321,6 +321,10 @@ Optimizations
* SOLR-12366: A slow "live docs" implementation was being used instead of a bitset. Affects classic faceting
enum method, JSON Facets enum method, UnInvertedField faceting, GraphTermsQParser, JoinQParser. (David Smiley)
* SOLR-12337: Remove the obsolete QueryWrapperFilter intermediate wrapper, which also removed some needless uses of
SolrConstantScoreQuery as well. QWF since v5.4.0 sometimes needlessly internally executed and cached the query.
Affects ExpandComponent, ChildDocTransformer, CurrencyFieldType, TermsQParser. (David Smiley)
Other Changes
----------------------

View File

@ -27,14 +27,10 @@ import java.util.Set;
import java.util.function.Predicate;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.util.Bits;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.analytics.ExpressionFactory;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.RefCounted;
import org.junit.AfterClass;
@ -266,31 +262,24 @@ public class AbstractAnalyticsFieldTest extends SolrTestCaseJ4 {
protected Set<String> collectFieldValues(AnalyticsField testField, Predicate<String> valuesFiller) throws IOException {
StringField idField = new StringField("id");
Filter filter = new QueryWrapperFilter(new MatchAllDocsQuery());
Set<String> missing = new HashSet<>();
List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
LeafReaderContext context = contexts.get(leafNum);
DocIdSet dis = filter.getDocIdSet(context, null); // solr docsets already exclude any deleted docs
if (dis == null) {
continue;
}
DocIdSetIterator disi = dis.iterator();
if (disi != null) {
testField.doSetNextReader(context);
idField.doSetNextReader(context);
int doc = disi.nextDoc();
while( doc != DocIdSetIterator.NO_MORE_DOCS){
// Add a document to the statistics being generated
testField.collect(doc);
idField.collect(doc);
for (LeafReaderContext context : contexts) {
testField.doSetNextReader(context);
idField.doSetNextReader(context);
Bits liveDocs = context.reader().getLiveDocs();
for (int doc = 0; doc < context.reader().maxDoc(); doc++) {
if (liveDocs != null && !liveDocs.get(doc)) {
continue;
}
// Add a document to the statistics being generated
testField.collect(doc);
idField.collect(doc);
String id = idField.getString();
if (!valuesFiller.test(id)) {
missing.add(id);
}
doc = disi.nextDoc();
String id = idField.getString();
if (!valuesFiller.test(id)) {
missing.add(id);
}
}
}

View File

@ -24,6 +24,15 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntObjectHashMap;
import com.carrotsearch.hppc.LongHashSet;
import com.carrotsearch.hppc.LongObjectHashMap;
import com.carrotsearch.hppc.LongObjectMap;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.carrotsearch.hppc.cursors.LongObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
@ -73,24 +82,12 @@ import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocSlice;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SolrConstantScoreQuery;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SortSpecParsing;
import org.apache.solr.uninverting.UninvertingReader;
import org.apache.solr.util.plugin.PluginInfoInitialized;
import org.apache.solr.util.plugin.SolrCoreAware;
import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.IntObjectHashMap;
import com.carrotsearch.hppc.LongHashSet;
import com.carrotsearch.hppc.LongObjectHashMap;
import com.carrotsearch.hppc.LongObjectMap;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.carrotsearch.hppc.cursors.LongObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
/**
* The ExpandComponent is designed to work with the CollapsingPostFilter.
* The CollapsingPostFilter collapses a result set on a field.
@ -705,7 +702,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
bytesRefs[++index] = term.toBytesRef();
}
return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs)));
return new TermInSetQuery(fname, bytesRefs);
}
private Query getPointGroupQuery(SchemaField sf,
@ -720,7 +717,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
values.add(numericToString(ft, cursor.value));
}
return new SolrConstantScoreQuery(new QueryWrapperFilter(sf.getType().getSetQuery(null, sf, values)));
return sf.getType().getSetQuery(null, sf, values);
}
private String numericToString(FieldType fieldType, long val) {
@ -750,7 +747,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
IntObjectCursor<BytesRef> cursor = it.next();
bytesRefs[++index] = cursor.value;
}
return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs)));
return new TermInSetQuery(fname, bytesRefs);
}

View File

@ -27,8 +27,8 @@ import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.ToChildBlockJoinQuery;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.DocsStreamer;
@ -38,10 +38,9 @@ import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SyntaxError;
import org.apache.solr.search.SolrDocumentFetcher;
import org.apache.solr.search.SolrReturnFields;
import org.apache.solr.search.SyntaxError;
/**
*
@ -81,7 +80,11 @@ public class ChildDocTransformerFactory extends TransformerFactory {
BitSetProducer parentsFilter = null;
try {
Query parentFilterQuery = QParser.getParser( parentFilter, req).getQuery();
parentsFilter = new QueryBitSetProducer(new QueryWrapperFilter(parentFilterQuery));
//TODO shouldn't we try to use the Solr filter cache, and then ideally implement
// BitSetProducer over that?
// DocSet parentDocSet = req.getSearcher().getDocSet(parentFilterQuery);
// then return BitSetProducer with custom BitSet impl accessing the docSet
parentsFilter = new QueryBitSetProducer(parentFilterQuery);
} catch (SyntaxError syntaxError) {
throw new SolrException( ErrorCode.BAD_REQUEST, "Failed to create correct parent filter query" );
}

View File

@ -33,16 +33,14 @@ import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SortField;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SolrConstantScoreQuery;
import org.apache.solr.search.function.ValueSourceRangeFilter;
import org.apache.solr.uninverting.UninvertingReader.Type;
import org.slf4j.Logger;
@ -337,17 +335,15 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
(p2 != null) ? p2.getCurrencyCode() : defaultCurrency;
// ValueSourceRangeFilter doesn't check exists(), so we have to
final Filter docsWithValues = new QueryWrapperFilter(new DocValuesFieldExistsQuery(getAmountField(field).getName()));
final Filter vsRangeFilter = new ValueSourceRangeFilter
final Query docsWithValues = new DocValuesFieldExistsQuery(getAmountField(field).getName());
final Query vsRangeFilter = new ValueSourceRangeFilter
(new RawCurrencyValueSource(field, currencyCode, parser),
p1 == null ? null : p1.getAmount() + "",
p2 == null ? null : p2.getAmount() + "",
minInclusive, maxInclusive);
final BooleanQuery.Builder docsInRange = new BooleanQuery.Builder();
docsInRange.add(docsWithValues, Occur.FILTER);
docsInRange.add(vsRangeFilter, Occur.FILTER);
return new SolrConstantScoreQuery(new QueryWrapperFilter(docsInRange.build()));
return new ConstantScoreQuery(new BooleanQuery.Builder()
.add(docsWithValues, Occur.FILTER)
.add(vsRangeFilter, Occur.FILTER).build());
}
@Override

View File

@ -1,106 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
/**
* Constrains search results to only match those which also match a provided
* query.
*
* <p> This could be used, for example, with a {@link org.apache.solr.legacy.LegacyNumericRangeQuery} on a suitably
* formatted date field to implement date filtering. One could re-use a single
* CachingWrapperFilter(QueryWrapperFilter) that matches, e.g., only documents modified
* within the last week. This would only need to be reconstructed once per day.
*/
public class QueryWrapperFilter extends Filter {
private final Query query;
/** Constructs a filter which only matches documents matching
* <code>query</code>.
*/
public QueryWrapperFilter(Query query) {
if (query == null)
throw new NullPointerException("Query may not be null");
this.query = query;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
return new BoostQuery(new ConstantScoreQuery(query), 0f);
}
/** returns the inner Query */
public final Query getQuery() {
return query;
}
@Override
public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException {
// get a private context that is used to rewrite, createWeight and score eventually
final LeafReaderContext privateContext = context.reader().getContext();
final IndexSearcher searcher = new IndexSearcher(privateContext);
final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
DocIdSet set = new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
Scorer scorer = weight.scorer(privateContext);
return scorer == null ? null : scorer.iterator();
}
@Override
public long ramBytesUsed() {
return 0L;
}
};
return BitsFilteredDocIdSet.wrap(set, acceptDocs);
}
@Override
public String toString(String field) {
return "QueryWrapperFilter(" + query.toString(field) + ")";
}
@Override
public boolean equals(Object o) {
return sameClassAs(o) &&
equalsTo(getClass().cast(o));
}
private boolean equalsTo(QueryWrapperFilter other) {
return query.equals(other.query);
}
@Override
public int hashCode() {
return query.hashCode();
}
}

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocValuesTermsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
@ -61,35 +62,35 @@ public class TermsQParserPlugin extends QParserPlugin {
private static enum Method {
termsFilter {
@Override
Filter makeFilter(String fname, BytesRef[] bytesRefs) {
return new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs));
Query makeFilter(String fname, BytesRef[] bytesRefs) {
return new TermInSetQuery(fname, bytesRefs);// constant scores
}
},
booleanQuery {
@Override
Filter makeFilter(String fname, BytesRef[] byteRefs) {
Query makeFilter(String fname, BytesRef[] byteRefs) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
for (BytesRef byteRef : byteRefs) {
bq.add(new TermQuery(new Term(fname, byteRef)), BooleanClause.Occur.SHOULD);
}
return new QueryWrapperFilter(bq.build());
return new ConstantScoreQuery(bq.build());
}
},
automaton {
@Override
Filter makeFilter(String fname, BytesRef[] byteRefs) {
Query makeFilter(String fname, BytesRef[] byteRefs) {
Automaton union = Automata.makeStringUnion(Arrays.asList(byteRefs));
return new QueryWrapperFilter(new AutomatonQuery(new Term(fname), union));
return new AutomatonQuery(new Term(fname), union);//constant scores
}
},
docValuesTermsFilter {//on 4x this is FieldCacheTermsFilter but we use the 5x name any way
@Override
Filter makeFilter(String fname, BytesRef[] byteRefs) {
return new QueryWrapperFilter(new DocValuesTermsQuery(fname, byteRefs));
Query makeFilter(String fname, BytesRef[] byteRefs) {
return new DocValuesTermsQuery(fname, byteRefs);//constant scores
}
};
abstract Filter makeFilter(String fname, BytesRef[] byteRefs);
abstract Query makeFilter(String fname, BytesRef[] byteRefs);
}
@Override
@ -103,6 +104,7 @@ public class TermsQParserPlugin extends QParserPlugin {
String qstr = localParams.get(QueryParsing.V);//never null
Method method = Method.valueOf(localParams.get(METHOD, Method.termsFilter.name()));
//TODO pick the default method based on various heuristics from benchmarks
//TODO pick the default using FieldType.getSetQuery
//if space then split on all whitespace & trim, otherwise strictly interpret
final boolean sepIsSpace = separator.equals(" ");
@ -134,7 +136,7 @@ public class TermsQParserPlugin extends QParserPlugin {
bytesRefs[i] = term.toBytesRef();
}
return new SolrConstantScoreQuery(method.makeFilter(fname, bytesRefs));
return method.makeFilter(fname, bytesRefs);
}
};
}

View File

@ -1,241 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.IOException;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomApproximationQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
public class TestQueryWrapperFilter extends LuceneTestCase {
// a filter for which other queries don't have special rewrite rules
private static class FilterWrapper extends Filter {
final Filter in;
FilterWrapper(Filter in) {
this.in = in;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return in.getDocIdSet(context, acceptDocs);
}
@Override
public String toString(String field) {
return in.toString(field);
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) &&
Objects.equals(in, getClass().cast(other).in);
}
@Override
public int hashCode() {
return 31 * classHash() + in.hashCode();
}
}
public void testBasic() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(newTextField("field", "value", Field.Store.NO));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
TermQuery termQuery = new TermQuery(new Term("field", "value"));
// should not throw exception with primitive query
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
IndexSearcher searcher = newSearcher(reader);
TopDocs hits = searcher.search(qwf, 10);
assertEquals(1, hits.totalHits);
hits = searcher.search(new FilterWrapper(qwf), 10);
assertEquals(1, hits.totalHits);
// should not throw exception with complex primitive query
BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
booleanQuery.add(termQuery, Occur.MUST);
booleanQuery.add(new TermQuery(new Term("field", "missing")),
Occur.MUST_NOT);
qwf = new QueryWrapperFilter(termQuery);
hits = searcher.search(qwf, 10);
assertEquals(1, hits.totalHits);
hits = searcher.search(new FilterWrapper(qwf), 10);
assertEquals(1, hits.totalHits);
// should not throw exception with non primitive Query (doesn't implement
// Query#createWeight)
qwf = new QueryWrapperFilter(new FuzzyQuery(new Term("field", "valu")));
hits = searcher.search(qwf, 10);
assertEquals(1, hits.totalHits);
hits = searcher.search(new FilterWrapper(qwf), 10);
assertEquals(1, hits.totalHits);
// test a query with no hits
termQuery = new TermQuery(new Term("field", "not_exist"));
qwf = new QueryWrapperFilter(termQuery);
hits = searcher.search(qwf, 10);
assertEquals(0, hits.totalHits);
hits = searcher.search(new FilterWrapper(qwf), 10);
assertEquals(0, hits.totalHits);
reader.close();
dir.close();
}
public void testRandom() throws Exception {
final Directory d = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random(), d);
w.w.getConfig().setMaxBufferedDocs(17);
final int numDocs = atLeast(100);
final Set<String> aDocs = new HashSet<>();
for(int i=0;i<numDocs;i++) {
final Document doc = new Document();
final String v;
if (random().nextInt(5) == 4) {
v = "a";
aDocs.add(""+i);
} else {
v = "b";
}
final Field f = newStringField("field", v, Field.Store.NO);
doc.add(f);
doc.add(newStringField("id", ""+i, Field.Store.YES));
w.addDocument(doc);
}
final int numDelDocs = atLeast(10);
for(int i=0;i<numDelDocs;i++) {
final String delID = ""+random().nextInt(numDocs);
w.deleteDocuments(new Term("id", delID));
aDocs.remove(delID);
}
final IndexReader r = w.getReader();
w.close();
final TopDocs hits = newSearcher(r).search(new QueryWrapperFilter(new TermQuery(new Term("field", "a"))),
numDocs);
assertEquals(aDocs.size(), hits.totalHits);
for(ScoreDoc sd: hits.scoreDocs) {
assertTrue(aDocs.contains(r.document(sd.doc).get("id")));
}
r.close();
d.close();
}
public void testThousandDocuments() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(newStringField("field", English.intToEnglish(i), Field.Store.NO));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(reader);
for (int i = 0; i < 1000; i++) {
TermQuery termQuery = new TermQuery(new Term("field", English.intToEnglish(i)));
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
TopDocs td = searcher.search(qwf, 10);
assertEquals(1, td.totalHits);
}
reader.close();
dir.close();
}
public void testScore() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new StringField("foo", "bar", Store.NO));
writer.addDocument(doc);
writer.commit();
final IndexReader reader = writer.getReader();
writer.close();
final IndexSearcher searcher = new IndexSearcher(reader);
final Query query = new QueryWrapperFilter(new TermQuery(new Term("foo", "bar")));
final TopDocs topDocs = searcher.search(query, 1);
assertEquals(1, topDocs.totalHits);
assertEquals(0f, topDocs.scoreDocs[0].score, 0f);
reader.close();
dir.close();
}
public void testQueryWrapperFilterPropagatesApproximations() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new StringField("foo", "bar", Store.NO));
writer.addDocument(doc);
writer.commit();
final IndexReader reader = writer.getReader();
writer.close();
final IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(null); // to still have approximations
final Query query = new QueryWrapperFilter(new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
final Weight weight = searcher.createWeight(searcher.rewrite(query), RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertNotNull(scorer.twoPhaseIterator());
reader.close();
dir.close();
}
public void testBasics() {
org.apache.lucene.search.QueryUtils.check(new QueryWrapperFilter(new TermQuery(new Term("foo", "bar"))));
}
}