LUCENE-4903: Add AssertingScorer.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1466709 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Adrien Grand 2013-04-10 22:13:13 +00:00
parent 6e24f5adf4
commit 24376b1bd7
49 changed files with 486 additions and 205 deletions

View File

@ -240,11 +240,11 @@ public class FilteredQuery extends Query {
// optimization: we are topScorer and collect directly using short-circuited algo
@Override
public final void score(Collector collector) throws IOException {
int primDoc = primaryNext();
int secDoc = secondary.advance(primDoc);
// the normalization trick already applies the boost of this query,
// so we can use the wrapped scorer directly:
collector.setScorer(scorer);
int primDoc = primaryNext();
int secDoc = secondary.advance(primDoc);
for (;;) {
if (primDoc == secDoc) {
// Check if scorer has exhausted, only before collecting.

View File

@ -58,6 +58,7 @@ public abstract class Scorer extends DocsEnum {
* @param collector The collector to which all matching documents are passed.
*/
public void score(Collector collector) throws IOException {
assert docID() == -1 || docID() == NO_MORE_DOCS; // not started
collector.setScorer(this);
int doc;
while ((doc = nextDoc()) != NO_MORE_DOCS) {
@ -80,11 +81,11 @@ public abstract class Scorer extends DocsEnum {
* @return true if more matching documents may remain.
*/
public boolean score(Collector collector, int max, int firstDocID) throws IOException {
assert docID() == firstDocID;
collector.setScorer(this);
int doc = firstDocID;
while (doc < max) {
int doc;
for (doc = firstDocID; doc < max; doc = nextDoc()) {
collector.collect(doc);
doc = nextDoc();
}
return doc != NO_MORE_DOCS;
}

View File

@ -57,7 +57,7 @@ public class TestDemo extends LuceneTestCase {
// Now search the index:
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
IndexSearcher isearcher = new IndexSearcher(ireader);
IndexSearcher isearcher = newSearcher(ireader);
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));

View File

@ -55,7 +55,7 @@ public class TestSearch extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(directory);
try {
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
@ -136,7 +136,7 @@ public class TestSearch extends LuceneTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = null;

View File

@ -90,7 +90,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
// try a search without OR
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
out.println("Query: " + query.toString(PRIORITY_FIELD));
@ -106,7 +106,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
checkHits(hits, MAX_DOCS, searcher);
// try a new search with OR
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = null;
BooleanQuery booleanQuery = new BooleanQuery();

View File

@ -97,7 +97,7 @@ public class TestPerFieldDocValuesFormat extends BaseDocValuesFormatTestCase {
// Now search the index:
IndexReader ireader = DirectoryReader.open(directory); // read-only=true
IndexSearcher isearcher = new IndexSearcher(ireader);
IndexSearcher isearcher = newSearcher(ireader);
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));

View File

@ -238,7 +238,7 @@ public class Test2BTerms extends LuceneTestCase {
private void testSavedTerms(IndexReader r, List<BytesRef> terms) throws IOException {
System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
Collections.shuffle(terms);
TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null);
boolean failed = false;

View File

@ -362,7 +362,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
//Query query = parser.parse("handle:1");
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
_TestUtil.checkIndex(dir);
@ -522,7 +522,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// make sure searching sees right # hits
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
StoredDocument d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("wrong first document", "0", d.get("id"));
@ -535,7 +535,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 44, hits.length);
d = searcher.doc(hits[0].doc);
@ -547,7 +547,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
public void changeIndexNoAdds(Random random, Directory dir) throws IOException {
// make sure searching sees right # hits
DirectoryReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
StoredDocument d = searcher.doc(hits[0].doc);
@ -560,7 +560,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
doTestHits(hits, 34, searcher.getIndexReader());
@ -852,7 +852,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory dir = oldIndexDirs.get(name);
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
for (int id=10; id<15; id++) {
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;

View File

@ -689,7 +689,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(3*(N+1)+1, policy.numOnCommit);
IndexReader rwReader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(rwReader);
IndexSearcher searcher = newSearcher(rwReader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);

View File

@ -60,7 +60,7 @@ public class TestForTooMuchCloning extends LuceneTestCase {
//System.out.println("merge clone count=" + cloneCount);
assertTrue("too many calls to IndexInput.clone during merging: " + dir.getInputCloneCount(), cloneCount < 500);
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
// MTQ that matches all terms so the AUTO_REWRITE should
// cutover to filter rewrite and reuse a single DocsEnum

View File

@ -458,7 +458,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int totalHits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1).totalHits;
assertEquals(n*100, totalHits);
reader.close();
@ -489,7 +489,7 @@ public class TestIndexWriter extends LuceneTestCase {
Term searchTerm = new Term("field", "aaa");
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
reader.close();
@ -511,7 +511,7 @@ public class TestIndexWriter extends LuceneTestCase {
}
writer.close();
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
reader.close();
@ -589,7 +589,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
reader.close();

View File

@ -51,7 +51,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
Term searchTerm = new Term("content", "aaa");
DirectoryReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
reader.close();
@ -64,7 +64,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
TestIndexWriter.addDoc(writer);
}
IndexReader r = DirectoryReader.open(dir);
searcher = new IndexSearcher(r);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
r.close();
@ -76,7 +76,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
assertFalse("reader should not be current now", reader.isCurrent());
IndexReader r = DirectoryReader.open(dir);
searcher = new IndexSearcher(r);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
r.close();
@ -102,7 +102,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
Term searchTerm = new Term("content", "aaa");
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
reader.close();
@ -116,7 +116,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.deleteDocuments(searchTerm);
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
reader.close();
@ -127,7 +127,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
reader.close();
@ -148,7 +148,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
TestIndexWriter.addDoc(writer);
}
IndexReader r = DirectoryReader.open(dir);
searcher = new IndexSearcher(r);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
r.close();
@ -156,7 +156,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.close();
IndexReader r = DirectoryReader.open(dir);
searcher = new IndexSearcher(r);
searcher = newSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
r.close();

View File

@ -406,7 +406,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
private int getHitCount(Directory dir, Term term) throws IOException {
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int hitCount = searcher.search(new TermQuery(term), null, 1000).totalHits;
reader.close();
return hitCount;

View File

@ -1394,7 +1394,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
final IndexReader r = w.getReader();
w.close();
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("content", "silly"));
pq.add(new Term("content", "content"));
@ -1474,7 +1474,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
final IndexReader r = w.getReader();
w.close();
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("content", "silly"));
pq.add(new Term("content", "content"));

View File

@ -282,7 +282,7 @@ public class TestIndexableField extends LuceneTestCase {
final IndexReader r = w.getReader();
w.close();
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
int counter = 0;
for(int id=0;id<NUM_DOCS;id++) {
if (VERBOSE) {

View File

@ -18,6 +18,7 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
@ -281,7 +282,7 @@ public class TestOmitTf extends LuceneTestCase {
* Verify the index
*/
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("noTf", term);
@ -299,8 +300,15 @@ public class TestOmitTf extends LuceneTestCase {
try {
searcher.search(pq, 10);
fail("did not hit expected exception");
} catch (IllegalStateException ise) {
// expected
} catch (Exception e) {
Throwable cause = e;
// If the searcher uses an executor service, the IAE is wrapped into other exceptions
while (cause.getCause() != null) {
cause = cause.getCause();
}
if (!(cause instanceof IllegalStateException)) {
throw new AssertionError("Expected an IAE", e);
} // else OK because positions are not indexed
}
searcher.search(q1,

View File

@ -112,7 +112,7 @@ public class TestRollingUpdates extends LuceneTestCase {
r = w.getReader(applyDeletions);
if (applyDeletions) {
s = new IndexSearcher(r);
s = newSearcher(r);
} else {
s = null;
}

View File

@ -104,7 +104,7 @@ public class TestStressIndexing extends LuceneTestCase {
public void doWork() throws Throwable {
for (int i=0; i<100; i++) {
IndexReader ir = DirectoryReader.open(directory);
IndexSearcher is = new IndexSearcher(ir);
IndexSearcher is = newSearcher(ir);
ir.close();
}
count += 100;

View File

@ -318,7 +318,7 @@ public class TestStressNRT extends LuceneTestCase {
}
// sreq = req("wt","json", "q","id:"+Integer.toString(id), "omitHeader","true");
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
Query q = new TermQuery(new Term("id",Integer.toString(id)));
TopDocs results = searcher.search(q, 10);

View File

@ -164,7 +164,7 @@ public class TestBooleanQuery extends LuceneTestCase {
query.add(wildcardQuery, BooleanClause.Occur.MUST_NOT);
MultiReader multireader = new MultiReader(reader1, reader2);
IndexSearcher searcher = new IndexSearcher(multireader);
IndexSearcher searcher = newSearcher(multireader);
assertEquals(0, searcher.search(query, 10).totalHits);
final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
@ -307,7 +307,7 @@ public class TestBooleanQuery extends LuceneTestCase {
writer.close();
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(indexReader);
IndexSearcher searcher = newSearcher(indexReader);
BooleanQuery query = new BooleanQuery();
SpanQuery sq1 = new SpanTermQuery(new Term(FIELD, "clockwork"));

View File

@ -61,7 +61,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
writer.addDocument(doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.AssertingIndexSearcher.AssertingScorer;
import org.apache.lucene.search.BooleanQuery.BooleanWeight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@ -152,13 +153,12 @@ public class TestBooleanScorer extends LuceneTestCase
final int[] count = new int[1];
s.search(q, new Collector() {
private Scorer scorer;
@Override
public void setScorer(Scorer scorer) {
// Make sure we got BooleanScorer:
this.scorer = scorer;
assertEquals("Scorer is implemented by wrong class", BooleanScorer.class.getName() + "$BucketScorer", scorer.getClass().getName());
final Class<?> clazz = scorer instanceof AssertingScorer ? ((AssertingScorer) scorer).getIn().getClass() : scorer.getClass();
assertEquals("Scorer is implemented by wrong class", BooleanScorer.class.getName() + "$BucketScorer", clazz.getName());
}
@Override

View File

@ -58,7 +58,7 @@ public class TestConjunctions extends LuceneTestCase {
writer.addDocument(doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
searcher.setSimilarity(new TFSimilarity());
}

View File

@ -95,7 +95,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = newSearcher(reader);
searcher = new IndexSearcher(reader);
// set a similarity that does not normalize our boost away
searcher.setSimilarity(new DefaultSimilarity() {
@ -145,7 +145,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
Filter filterB = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "b"))));
Query query = new ConstantScoreQuery(filterB);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
assertEquals(1, s.search(query, filterB, 1).totalHits); // Query for field:b, Filter field:b
Filter filterA = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "a"))));

View File

@ -47,7 +47,7 @@ public class TestFieldValueFilter extends LuceneTestCase {
}
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocs search = searcher.search(new TermQuery(new Term("all", "test")),
new FieldValueFilter("some", true), docs);
assertEquals(search.totalHits, numDocsNoValue);
@ -74,7 +74,7 @@ public class TestFieldValueFilter extends LuceneTestCase {
}
}
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocs search = searcher.search(new TermQuery(new Term("all", "test")),
new FieldValueFilter("some"), docs);
assertEquals(search.totalHits, numDocsWithValue);

View File

@ -78,7 +78,7 @@ public class TestFilteredSearch extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = new IndexSearcher(reader);
IndexSearcher indexSearcher = newSearcher(reader);
ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
assertEquals("Number of matched documents", 1, hits.length);
reader.close();

View File

@ -376,7 +376,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
writer.addDocument(doc);
IndexReader r = writer.getReader();
writer.close();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
MultiPhraseQuery mpq = new MultiPhraseQuery();
//mpq.setSlop(1);
@ -474,7 +474,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
writer.addDocument(doc);
IndexReader r = DirectoryReader.open(writer,false);
writer.close();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("QUERY=" + q);

View File

@ -39,6 +39,7 @@ import org.apache.lucene.index.ThreadedIndexingAndSearchingTestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.NRTCachingDirectory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.ThreadInterruptedException;
@ -402,7 +403,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase {
final SearcherFactory theEvilOne = new SearcherFactory() {
@Override
public IndexSearcher newSearcher(IndexReader ignored) {
return new IndexSearcher(other);
return LuceneTestCase.newSearcher(other);
}
};

View File

@ -327,7 +327,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
writer.close();
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
Query q=NumericRangeQuery.newIntRange("int", null, null, true, true);
TopDocs topDocs = s.search(q, 10);

View File

@ -352,7 +352,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
writer.close();
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
Query q=NumericRangeQuery.newLongRange("long", null, null, true, true);
TopDocs topDocs = s.search(q, 10);

View File

@ -113,7 +113,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase {
final IndexReader r = w.getReader();
w.close();
final TopDocs hits = new IndexSearcher(r).search(new MatchAllDocsQuery(),
final TopDocs hits = newSearcher(r).search(new MatchAllDocsQuery(),
new QueryWrapperFilter(new TermQuery(new Term("field", "a"))),
numDocs);
assertEquals(aDocs.size(), hits.totalHits);

View File

@ -57,7 +57,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase {
//System.out.println("numDocs=" + r.numDocs());
w.close();
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
Terms terms = MultiFields.getFields(r).terms("body");
int termCount = 0;
TermsEnum termsEnum = terms.iterator(null);

View File

@ -53,7 +53,7 @@ public class TestScorerPerf extends LuceneTestCase {
iw.addDocument(new Document());
iw.close();
r = DirectoryReader.open(d);
s = new IndexSearcher(r);
s = newSearcher(r);
}
public void createRandomTerms(int nDocs, int nTerms, double power, Directory dir) throws Exception {
@ -366,7 +366,7 @@ public class TestScorerPerf extends LuceneTestCase {
RAMDirectory dir = new RAMDirectory();
if (VERBOSE) System.out.println("Creating index");
createRandomTerms(100000,25,.5, dir);
s = new IndexSearcher(dir, true);
s = newSearcher(dir, true);
if (VERBOSE) System.out.println("Starting performance test");
for (int i=0; i<bigIter; i++) {
long start = System.currentTimeMillis();
@ -383,7 +383,7 @@ public class TestScorerPerf extends LuceneTestCase {
RAMDirectory dir = new RAMDirectory();
if (VERBOSE) System.out.println("Creating index");
createRandomTerms(100000,25,.2, dir);
s = new IndexSearcher(dir, true);
s = newSearcher(dir, true);
if (VERBOSE) System.out.println("Starting performance test");
for (int i=0; i<bigIter; i++) {
long start = System.currentTimeMillis();
@ -401,7 +401,7 @@ public class TestScorerPerf extends LuceneTestCase {
RAMDirectory dir = new RAMDirectory();
if (VERBOSE) System.out.println("Creating index");
createRandomTerms(100000,25,2,dir);
s = new IndexSearcher(dir, true);
s = newSearcher(dir, true);
if (VERBOSE) System.out.println("Starting performance test");
for (int i=0; i<bigIter; i++) {
long start = System.currentTimeMillis();

View File

@ -39,6 +39,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.ThreadedIndexingAndSearchingTestCase;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util._TestUtil;
@ -361,7 +362,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
final SearcherFactory theEvilOne = new SearcherFactory() {
@Override
public IndexSearcher newSearcher(IndexReader ignored) {
return new IndexSearcher(other);
return LuceneTestCase.newSearcher(other);
}
};

View File

@ -70,7 +70,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -98,7 +98,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -125,7 +125,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -151,7 +151,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -179,7 +179,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -206,7 +206,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -232,7 +232,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(SortField.FIELD_DOC);
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -258,7 +258,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField(null, SortField.Type.DOC, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -284,7 +284,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort();
TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
@ -314,7 +314,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField(null, SortField.Type.SCORE, true));
TopDocs actual = searcher.search(new TermQuery(new Term("value", "foo")), 10, sort);
@ -346,7 +346,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -375,7 +375,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -404,7 +404,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.BYTE);
sortField.setMissingValue(Byte.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -436,7 +436,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.BYTE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -466,7 +466,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -495,7 +495,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -524,7 +524,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.SHORT);
sortField.setMissingValue(Short.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -556,7 +556,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.SHORT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -586,7 +586,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -615,7 +615,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -644,7 +644,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.INT);
sortField.setMissingValue(Integer.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -676,7 +676,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -706,7 +706,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -735,7 +735,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -764,7 +764,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.LONG);
sortField.setMissingValue(Long.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -796,7 +796,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -826,7 +826,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -855,7 +855,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -884,7 +884,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.FLOAT);
sortField.setMissingValue(Float.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -916,7 +916,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -949,7 +949,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -982,7 +982,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -1015,7 +1015,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.DOUBLE);
sortField.setMissingValue(Double.MAX_VALUE);
Sort sort = new Sort(sortField);
@ -1051,7 +1051,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -1109,7 +1109,7 @@ public class TestSort extends LuceneTestCase {
SortField.FIELD_DOC);
// this should not throw AIOOBE or RuntimeEx
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
searcher.search(new MatchAllDocsQuery(), null, 500, sort);
reader.close();
indexStore.close();
@ -1151,7 +1151,7 @@ public class TestSort extends LuceneTestCase {
/** test sorts when there's nothing in the index */
public void testEmptyIndex() throws Exception {
IndexSearcher empty = new IndexSearcher(new MultiReader());
IndexSearcher empty = newSearcher(new MultiReader());
Query query = new TermQuery(new Term("contents", "foo"));
Sort sort = new Sort();
@ -1453,7 +1453,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -1474,7 +1474,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
TopDocs expected = searcher.search(new TermQuery(new Term("value", "foo")), 10);
@ -1503,7 +1503,7 @@ public class TestSort extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
// tievalue, then value
Sort sort = new Sort(new SortField("tievalue", SortField.Type.STRING),
new SortField("value", SortField.Type.STRING));

View File

@ -64,7 +64,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -93,7 +93,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -122,7 +122,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -151,7 +151,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -180,7 +180,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -209,7 +209,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -242,7 +242,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.BYTE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -276,7 +276,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.BYTE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -310,7 +310,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.SHORT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -344,7 +344,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.SHORT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -378,7 +378,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -412,7 +412,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -446,7 +446,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -480,7 +480,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -514,7 +514,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -548,7 +548,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -586,7 +586,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
@ -625,7 +625,7 @@ public class TestSortDocValues extends LuceneTestCase {
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);

View File

@ -56,21 +56,21 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = TermRangeQuery.newStringRange("content", "A", "C", false, false);
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 1, hits.length);
reader.close();
initializeIndex(new String[] {"A", "B", "D"});
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D, only B in range", 1, hits.length);
reader.close();
addDoc("C");
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added, still only B in range", 1, hits.length);
reader.close();
@ -81,21 +81,21 @@ public class TestTermRangeQuery extends LuceneTestCase {
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
reader.close();
initializeIndex(new String[]{"A", "B", "D"});
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D - A and B in range", 2, hits.length);
reader.close();
addDoc("C");
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added - A, B, C in range", 3, hits.length);
reader.close();
@ -104,7 +104,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testAllDocs() throws Exception {
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TermRangeQuery query = new TermRangeQuery("content", null, null, true, true);
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "content");
assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
@ -128,7 +128,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
initializeIndex(new String[]{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"});
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TermRangeQuery query = TermRangeQuery.newStringRange("content", "B", "J", true, true);
checkBooleanTerms(searcher, query, "B", "C", "D", "E", "F", "G", "H", "I", "J");
@ -276,7 +276,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
false, false);
initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer);
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 3, numHits);
@ -286,7 +286,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
reader.close();
initializeIndex(new String[] {"A", "B", "", "D"}, analyzer);
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 3, numHits);
@ -295,7 +295,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
reader.close();
addDoc("C");
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added, still A, B & <empty string> are in range", 3, numHits);
@ -311,7 +311,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = TermRangeQuery.newStringRange("content", null, "C", true, true);
initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer);
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 4, numHits);
@ -320,7 +320,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
reader.close();
initializeIndex(new String[]{"A", "B", "", "D"}, analyzer);
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D - A, B and <empty string> in range", 3, numHits);
@ -329,7 +329,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
reader.close();
addDoc("C");
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added => A,B,<empty string>,C in range", 4, numHits);

View File

@ -140,7 +140,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
Scorer ts = weight.scorer(context, true, false, context.reader().getLiveDocs());
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("score is not correct", ts.score() == 1.6931472f);
@ -159,7 +159,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
Scorer ts = weight.scorer(context, true, false, context.reader().getLiveDocs());
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5);

View File

@ -70,7 +70,7 @@ public class TestWildcard
public void testTermWithoutWildcard() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", "nowildcard"));
assertMatches(searcher, wq, 1);
@ -108,7 +108,7 @@ public class TestWildcard
public void testEmptyTerm() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", ""));
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
@ -128,7 +128,7 @@ public class TestWildcard
public void testPrefixTerm() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"prefix", "prefixx"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
assertMatches(searcher, wq, 2);
@ -151,7 +151,7 @@ public class TestWildcard
Directory indexStore = getIndexStore("body", new String[]
{"metal", "metals"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Query query1 = new TermQuery(new Term("body", "metal"));
Query query2 = new WildcardQuery(new Term("body", "metal*"));
Query query3 = new WildcardQuery(new Term("body", "m*tal"));
@ -193,7 +193,7 @@ public class TestWildcard
Directory indexStore = getIndexStore("body", new String[]
{"metal", "metals", "mXtals", "mXtXls"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Query query1 = new WildcardQuery(new Term("body", "m?tal"));
Query query2 = new WildcardQuery(new Term("body", "metal?"));
Query query3 = new WildcardQuery(new Term("body", "metals?"));
@ -218,7 +218,7 @@ public class TestWildcard
Directory indexStore = getIndexStore("field",
new String[]{"foo*bar", "foo??bar", "fooCDbar", "fooSOMETHINGbar", "foo\\"});
IndexReader reader = DirectoryReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// without escape: matches foo??bar, fooCDbar, foo*bar, and fooSOMETHINGbar
WildcardQuery unescaped = new WildcardQuery(new Term("field", "foo*bar"));
@ -355,7 +355,7 @@ public class TestWildcard
iw.close();
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// test queries that must find all
for (Query q : matchAll) {

View File

@ -229,7 +229,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
new MaxPayloadFunction(), false);
IndexReader reader = DirectoryReader.open(directory);
IndexSearcher theSearcher = new IndexSearcher(reader);
IndexSearcher theSearcher = newSearcher(reader);
theSearcher.setSimilarity(new FullSimilarity());
TopDocs hits = searcher.search(query, null, 100);
assertTrue("hits is null and it shouldn't be", hits != null);

View File

@ -349,7 +349,7 @@ public class TestLockFactory extends LuceneTestCase {
for(int i=0;i<this.numIteration;i++) {
try{
reader = DirectoryReader.open(dir);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
} catch (Exception e) {
hitException = true;
System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());

View File

@ -72,7 +72,7 @@ public class TestNRTCachingDirectory extends LuceneTestCase {
}
}
assertEquals(1+docCount, r.numDocs());
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
// Just make sure search can run; we can't assert
// totHits since it could be 0
TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10);

View File

@ -78,7 +78,7 @@ public class TestWindowsMMap extends LuceneTestCase {
.setOpenMode(OpenMode.CREATE));
writer.commit();
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int num = atLeast(1000);
for(int dx = 0; dx < num; dx ++) {

View File

@ -868,7 +868,7 @@ public class TestFSTs extends LuceneTestCase {
// turn writer into reader:
final IndexReader r = w.getReader();
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
w.close();
final List<String> allIDsList = new ArrayList<String>(allIDs);
@ -997,7 +997,7 @@ public class TestFSTs extends LuceneTestCase {
if (VERBOSE) {
System.out.println("TEST: got reader=" + r);
}
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
w.close();
final List<String> allTermsList = new ArrayList<String>(allTerms);

View File

@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
@ -200,6 +201,23 @@ class TermsIncludingScoreQuery extends Query {
this.cost = cost;
}
@Override
public void score(Collector collector) throws IOException {
score(collector, NO_MORE_DOCS, nextDocOutOfOrder());
}
@Override
public boolean score(Collector collector, int max, int firstDocID)
throws IOException {
assert collector.acceptsDocsOutOfOrder();
collector.setScorer(this);
int doc;
for (doc = firstDocID; doc < max; doc = nextDocOutOfOrder()) {
collector.collect(doc);
}
return doc != NO_MORE_DOCS;
}
@Override
public float score() throws IOException {
return scores[ords[scoreUpto]];
@ -214,8 +232,7 @@ class TermsIncludingScoreQuery extends Query {
return docsEnum != null ? docsEnum.docID() : DocIdSetIterator.NO_MORE_DOCS;
}
@Override
public int nextDoc() throws IOException {
int nextDocOutOfOrder() throws IOException {
if (docsEnum != null) {
int docId = docsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS) {
@ -239,6 +256,11 @@ class TermsIncludingScoreQuery extends Query {
return docsEnum.nextDoc();
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException("nextDoc() isn't supported because doc ids are emitted out of order");
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException("advance() isn't supported because doc ids are emitted out of order");
@ -247,7 +269,7 @@ class TermsIncludingScoreQuery extends Query {
private int advanceForExplainOnly(int target) throws IOException {
int docId;
do {
docId = nextDoc();
docId = nextDocOutOfOrder();
if (docId < target) {
int tempDocId = docsEnum.advance(target);
if (tempDocId == target) {
@ -286,7 +308,7 @@ class TermsIncludingScoreQuery extends Query {
}
@Override
public int nextDoc() throws IOException {
int nextDocOutOfOrder() throws IOException {
if (docsEnum != null) {
int docId;
do {

View File

@ -500,7 +500,7 @@ public class TestBlockJoin extends LuceneTestCase {
final IndexSearcher s = newSearcher(r);
final IndexSearcher joinS = newSearcher(joinR);
final IndexSearcher joinS = new IndexSearcher(joinR);
final Filter parentsFilter = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("isParent", "x"))));
@ -1074,7 +1074,7 @@ public class TestBlockJoin extends LuceneTestCase {
IndexReader r = w.getReader();
w.close();
IndexSearcher s = newSearcher(r);
IndexSearcher s = new IndexSearcher(r);
// Create a filter that defines "parent" documents in the index - in this case resumes
Filter parentsFilter = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));

View File

@ -223,14 +223,25 @@ public class AssertingAtomicReader extends FilterAtomicReader {
}
static enum DocsEnumState { START, ITERATING, FINISHED };
static class AssertingDocsEnum extends FilterDocsEnum {
public static class AssertingDocsEnum extends FilterDocsEnum {
private DocsEnumState state = DocsEnumState.START;
private int doc;
public AssertingDocsEnum(DocsEnum in) {
this(in, true);
}
public AssertingDocsEnum(DocsEnum in, boolean failOnUnsupportedDocID) {
super(in);
int docid = in.docID();
assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS : "invalid initial doc id: " + docid;
try {
int docid = in.docID();
assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS : in.getClass() + ": invalid initial doc id: " + docid;
} catch (UnsupportedOperationException e) {
if (failOnUnsupportedDocID) {
throw e;
}
}
doc = -1;
}
@ -238,7 +249,7 @@ public class AssertingAtomicReader extends FilterAtomicReader {
public int nextDoc() throws IOException {
assert state != DocsEnumState.FINISHED : "nextDoc() called after NO_MORE_DOCS";
int nextDoc = super.nextDoc();
assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc;
assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc + " " + in;
if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
} else {

View File

@ -17,20 +17,28 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.io.IOException;
import java.lang.ref.WeakReference;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.WeakHashMap;
import java.util.concurrent.ExecutorService;
import org.apache.lucene.index.AssertingAtomicReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.VirtualMethod;
import org.apache.lucene.util._TestUtil;
/**
/**
* Helper class that adds some extra checks to ensure correct
* usage of {@code IndexSearcher} and {@code Weight}.
* TODO: Extend this by more checks, that's just a start.
*/
public class AssertingIndexSearcher extends IndexSearcher {
final Random random;
@ -58,58 +66,279 @@ public class AssertingIndexSearcher extends IndexSearcher {
@Override
public Weight createNormalizedWeight(Query query) throws IOException {
final Weight w = super.createNormalizedWeight(query);
return new Weight() {
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return w.explain(context, doc);
}
@Override
public Query getQuery() {
return w.getQuery();
}
return new AssertingWeight(random, w) {
@Override
public void normalize(float norm, float topLevelBoost) {
throw new IllegalStateException("Weight already normalized.");
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
Scorer scorer = w.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
if (scorer != null) {
// check that scorer obeys disi contract for docID() before next()/advance
try {
int docid = scorer.docID();
assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS;
} catch (UnsupportedOperationException ignored) {
// from a top-level BS1
assert topScorer;
}
}
return scorer;
}
@Override
public float getValueForNormalization() {
throw new IllegalStateException("Weight already normalized.");
}
@Override
public boolean scoresDocsOutOfOrder() {
// TODO: if this returns false, we should wrap
// Scorer with AssertingScorer that confirms docIDs
// are in order?
return w.scoresDocsOutOfOrder();
}
};
}
@Override
protected Query wrapFilter(Query query, Filter filter) {
if (random.nextBoolean())
return super.wrapFilter(query, filter);
return (filter == null) ? query : new FilteredQuery(query, filter, _TestUtil.randomFilterStrategy(random));
}
@Override
protected void search(List<AtomicReaderContext> leaves, Weight weight, Collector collector) throws IOException {
super.search(leaves, AssertingWeight.wrap(random, weight), collector);
}
static class AssertingWeight extends Weight {
static Weight wrap(Random random, Weight other) {
return other instanceof AssertingWeight ? other : new AssertingWeight(random, other);
}
final Random random;
final Weight in;
AssertingWeight(Random random, Weight in) {
this.random = random;
this.in = in;
}
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return in.explain(context, doc);
}
@Override
public Query getQuery() {
return in.getQuery();
}
@Override
public float getValueForNormalization() throws IOException {
return in.getValueForNormalization();
}
@Override
public void normalize(float norm, float topLevelBoost) {
in.normalize(norm, topLevelBoost);
}
@Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
// if the caller asks for in-order scoring or if the weight does not support
// out-of order scoring then collection will have to happen in-order.
final boolean inOrder = scoreDocsInOrder || !scoresDocsOutOfOrder();
final Scorer inScorer = in.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
return AssertingScorer.wrap(new Random(random.nextLong()), inScorer, topScorer, inOrder);
}
@Override
public boolean scoresDocsOutOfOrder() {
return in.scoresDocsOutOfOrder();
}
}
enum TopScorer {
YES, NO, UNKNOWN;
}
public static class AssertingScorer extends Scorer {
private static final VirtualMethod<Scorer> SCORE_COLLECTOR = new VirtualMethod<Scorer>(Scorer.class, "score", Collector.class);
private static final VirtualMethod<Scorer> SCORE_COLLECTOR_RANGE = new VirtualMethod<Scorer>(Scorer.class, "score", Collector.class, int.class, int.class);
// we need to track scorers using a weak hash map because otherwise we
// could loose references because of eg.
// AssertingScorer.score(Collector) which needs to delegate to work correctly
private static Map<Scorer, WeakReference<AssertingScorer>> ASSERTING_INSTANCES = Collections.synchronizedMap(new WeakHashMap<Scorer, WeakReference<AssertingScorer>>());
private static Scorer wrap(Random random, Scorer other, TopScorer topScorer, boolean inOrder) {
if (other == null || other instanceof AssertingScorer) {
return other;
}
final AssertingScorer assertScorer = new AssertingScorer(random, other, topScorer, inOrder);
ASSERTING_INSTANCES.put(other, new WeakReference<AssertingScorer>(assertScorer));
return assertScorer;
}
static Scorer wrap(Random random, Scorer other, boolean topScorer, boolean inOrder) {
return wrap(random, other, topScorer ? TopScorer.YES : TopScorer.NO, inOrder);
}
static Scorer getAssertingScorer(Random random, Scorer other) {
if (other == null || other instanceof AssertingScorer) {
return other;
}
final WeakReference<AssertingScorer> assertingScorerRef = ASSERTING_INSTANCES.get(other);
final AssertingScorer assertingScorer = assertingScorerRef == null ? null : assertingScorerRef.get();
if (assertingScorer == null) {
// can happen in case of memory pressure or if
// scorer1.score(collector) calls
// collector.setScorer(scorer2) with scorer1 != scorer2, such as
// BooleanScorer. In that case we can't enable all assertions
return new AssertingScorer(random, other, TopScorer.UNKNOWN, false);
} else {
return assertingScorer;
}
}
final Random random;
final Scorer in;
final AssertingAtomicReader.AssertingDocsEnum docsEnumIn;
final TopScorer topScorer;
final boolean inOrder;
final boolean canCallNextDoc;
private AssertingScorer(Random random, Scorer in, TopScorer topScorer, boolean inOrder) {
super(in.weight);
this.random = random;
this.in = in;
this.topScorer = topScorer;
this.inOrder = inOrder;
this.docsEnumIn = new AssertingAtomicReader.AssertingDocsEnum(in, topScorer == TopScorer.NO);
this.canCallNextDoc = topScorer != TopScorer.YES // not a top scorer
|| !SCORE_COLLECTOR_RANGE.isOverriddenAsOf(in.getClass()) // the default impl relies upon nextDoc()
|| !SCORE_COLLECTOR.isOverriddenAsOf(in.getClass()); // the default impl relies upon nextDoc()
}
public Scorer getIn() {
return in;
}
boolean iterating() {
switch (docID()) {
case -1:
case NO_MORE_DOCS:
return false;
default:
return true;
}
}
@Override
public float score() throws IOException {
assert iterating();
final float score = in.score();
assert !Float.isNaN(score);
assert !Float.isNaN(score);
return score;
}
@Override
public void score(Collector collector) throws IOException {
assert topScorer != TopScorer.NO;
if (SCORE_COLLECTOR.isOverriddenAsOf(this.in.getClass())) {
if (random.nextBoolean()) {
try {
final boolean remaining = in.score(collector, DocsEnum.NO_MORE_DOCS, in.nextDoc());
assert !remaining;
} catch (UnsupportedOperationException e) {
in.score(collector);
}
} else {
in.score(collector);
}
} else {
// score(Collector) has not been overridden, use the super method in
// order to benefit from all assertions
super.score(collector);
}
}
@Override
public boolean score(Collector collector, int max, int firstDocID) throws IOException {
assert topScorer != TopScorer.NO;
if (SCORE_COLLECTOR_RANGE.isOverriddenAsOf(this.in.getClass())) {
return in.score(collector, max, firstDocID);
} else {
// score(Collector,int,int) has not been overridden, use the super
// method in order to benefit from all assertions
return super.score(collector, max, firstDocID);
}
}
@Override
public Collection<ChildScorer> getChildren() {
return in.getChildren();
}
@Override
public int freq() throws IOException {
assert iterating();
return in.freq();
}
@Override
public int docID() {
return in.docID();
}
@Override
public int nextDoc() throws IOException {
assert canCallNextDoc : "top scorers should not call nextDoc()";
return docsEnumIn.nextDoc();
}
@Override
public int advance(int target) throws IOException {
assert canCallNextDoc : "top scorers should not call advance(target)";
return docsEnumIn.advance(target);
}
@Override
public long cost() {
return in.cost();
}
}
static class AssertingCollector extends Collector {
static Collector wrap(Random random, Collector other, boolean inOrder) {
return other instanceof AssertingCollector ? other : new AssertingCollector(random, other, inOrder);
}
final Random random;
final Collector in;
final boolean inOrder;
int lastCollected;
AssertingCollector(Random random, Collector in, boolean inOrder) {
this.random = random;
this.in = in;
this.inOrder = inOrder;
lastCollected = -1;
}
@Override
public void setScorer(Scorer scorer) throws IOException {
in.setScorer(AssertingScorer.getAssertingScorer(random, scorer));
}
@Override
public void collect(int doc) throws IOException {
if (inOrder || !acceptsDocsOutOfOrder()) {
assert doc > lastCollected : "Out of order : " + lastCollected + " " + doc;
}
in.collect(doc);
lastCollected = doc;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
lastCollected = -1;
}
@Override
public boolean acceptsDocsOutOfOrder() {
return in.acceptsDocsOutOfOrder();
}
}
}

View File

@ -1237,7 +1237,7 @@ public abstract class LuceneTestCase extends Assert {
* Create a new searcher over the reader. This searcher might randomly use
* threads.
*/
public static IndexSearcher newSearcher(IndexReader r) throws IOException {
public static IndexSearcher newSearcher(IndexReader r) {
return newSearcher(r, true);
}
@ -1246,18 +1246,26 @@ public abstract class LuceneTestCase extends Assert {
* threads. if <code>maybeWrap</code> is true, this searcher might wrap the
* reader with one that returns null for getSequentialSubReaders.
*/
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) {
Random random = random();
if (usually()) {
if (maybeWrap) {
r = maybeWrapReader(r);
try {
r = maybeWrapReader(r);
} catch (IOException e) {
throw new AssertionError(e);
}
}
// TODO: this whole check is a coverage hack, we should move it to tests for various filterreaders.
// ultimately whatever you do will be checkIndex'd at the end anyway.
if (random.nextInt(500) == 0 && r instanceof AtomicReader) {
// TODO: not useful to check DirectoryReader (redundant with checkindex)
// but maybe sometimes run this on the other crazy readers maybeWrapReader creates?
_TestUtil.checkReader(r);
try {
_TestUtil.checkReader(r);
} catch (IOException e) {
throw new AssertionError(e);
}
}
IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
ret.setSimilarity(classEnvRule.similarity);