LUCENE-2751: add LuceneTestCase.newSearcher. use this to get an indexsearcher that randomly uses threads, etc

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1066691 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-02-02 23:27:25 +00:00
parent 2635ac0cf7
commit dde8fc7020
103 changed files with 335 additions and 203 deletions

View File

@ -70,7 +70,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term(FIELD, "fox"));
@ -114,7 +114,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
new SpanTermQuery(new Term(FIELD, "fox")),
@ -184,7 +184,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term(FIELD, "did"));
@ -227,7 +227,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term(FIELD, "did"));
@ -268,7 +268,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
new SpanTermQuery(new Term(FIELD, "did")),

View File

@ -113,7 +113,7 @@ public class TokenSourcesTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));
@ -159,7 +159,7 @@ public class TokenSourcesTest extends LuceneTestCase {
final IndexReader indexReader = IndexReader.open(directory, true);
try {
assertEquals(1, indexReader.numDocs());
final IndexSearcher indexSearcher = new IndexSearcher(indexReader);
final IndexSearcher indexSearcher = newSearcher(indexReader);
try {
final DisjunctionMaxQuery query = new DisjunctionMaxQuery(1);
query.add(new SpanTermQuery(new Term(FIELD, "{fox}")));

View File

@ -37,7 +37,7 @@ public class TestEmptyIndex extends LuceneTestCase {
InstantiatedIndex ii = new InstantiatedIndex();
IndexReader r = new InstantiatedIndexReader(ii);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
TopDocs td = s.search(new TermQuery(new Term("foo", "bar")), 1);

View File

@ -36,7 +36,7 @@ public class TestRealTime extends LuceneTestCase {
InstantiatedIndex index = new InstantiatedIndex();
InstantiatedIndexReader reader = new InstantiatedIndexReader(index);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
InstantiatedIndexWriter writer = new InstantiatedIndexWriter(index);
Document doc;

View File

@ -72,7 +72,7 @@ public class ChainedFilterTest extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
// query for everything to make life easier
BooleanQuery bq = new BooleanQuery();
@ -194,7 +194,7 @@ public class ChainedFilterTest extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Query query = new TermQuery(new Term("none", "none"));

View File

@ -61,7 +61,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher =new IndexSearcher(reader);
searcher =newSearcher(reader);
}

View File

@ -51,7 +51,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
addDoc(writer, "johnathon smythe","6");
reader = writer.getReader();
writer.close();
searcher=new IndexSearcher(reader);
searcher=newSearcher(reader);
}
@Override

View File

@ -51,7 +51,7 @@ public class TestRegexQuery extends LuceneTestCase {
writer.addDocument(doc);
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -53,7 +53,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -1278,11 +1278,12 @@ public class TestQPHelper extends LuceneTestCase {
doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = IndexReader.open(w, true);
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
Query q = new StandardQueryParser(new CannedAnalyzer()).parse("\"a\"", "field");
assertTrue(q instanceof MultiPhraseQuery);
assertEquals(1, s.search(q, 10).totalHits);
s.close();
r.close();
w.close();
dir.close();

View File

@ -73,7 +73,7 @@ public class TestParser extends LuceneTestCase {
d.close();
writer.close();
reader=IndexReader.open(dir, true);
searcher=new IndexSearcher(reader);
searcher=newSearcher(reader);
}

View File

@ -671,7 +671,7 @@ public class TestExternalCodecs extends LuceneTestCase {
testTermsOrder(r);
assertEquals(NUM_DOCS-1, r.numDocs());
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
assertEquals(NUM_DOCS-1, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits);
r.close();
@ -682,7 +682,7 @@ public class TestExternalCodecs extends LuceneTestCase {
r = IndexReader.open(w, true);
assertEquals(NUM_DOCS-2, r.maxDoc());
assertEquals(NUM_DOCS-2, r.numDocs());
s = new IndexSearcher(r);
s = newSearcher(r);
assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field1", "standard")), 1).totalHits);
assertEquals(NUM_DOCS-2, s.search(new TermQuery(new Term("field2", "pulsing")), 1).totalHits);
assertEquals(1, s.search(new TermQuery(new Term("id", "76")), 1).totalHits);

View File

@ -156,7 +156,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(makeDocumentWithFields());
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
@ -238,7 +238,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Query query = new TermQuery(new Term("keyword", "test"));

View File

@ -409,7 +409,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// make sure we can do delete & setNorm against this segment:
IndexReader reader = IndexReader.open(dir, false);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
Term searchTerm = new Term("id", "6");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("wrong delete count", 1, delCount);

View File

@ -362,7 +362,7 @@ public class TestCodecs extends LuceneTestCase {
private ScoreDoc[] search(final IndexWriter writer, final Query q, final int n) throws IOException {
final IndexReader reader = writer.getReader();
final IndexSearcher searcher = new IndexSearcher(reader);
final IndexSearcher searcher = newSearcher(reader);
try {
return searcher.search(q, null, n).scoreDocs;
}

View File

@ -652,7 +652,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
IndexReader reader = IndexReader.open(dir, policy, false);
reader.deleteDocument(3*i+1);
reader.setNorm(4*i+1, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(16*(1+i), hits.length);
// this is a commit
@ -696,7 +696,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
// Work backwards in commits on what the expected
// count should be.
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
if (i > 1) {
if (i % 2 == 0) {
@ -772,7 +772,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
IndexReader reader = IndexReader.open(dir, policy, false);
reader.deleteDocument(3);
reader.setNorm(5, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(16, hits.length);
// this is a commit
@ -807,7 +807,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
// Work backwards in commits on what the expected
// count should be.
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(expectedCount, hits.length);
searcher.close();

View File

@ -900,7 +900,7 @@ public class TestIndexReader extends LuceneTestCase
{
IndexReader r = IndexReader.open(startDir);
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
ScoreDoc[] hits = null;
try {
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
@ -908,6 +908,7 @@ public class TestIndexReader extends LuceneTestCase
e.printStackTrace();
fail("exception when init searching: " + e);
}
searcher.close();
r.close();
}
@ -1023,7 +1024,7 @@ public class TestIndexReader extends LuceneTestCase
}
*/
IndexSearcher searcher = new IndexSearcher(newReader);
IndexSearcher searcher = newSearcher(newReader);
ScoreDoc[] hits = null;
try {
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;

View File

@ -773,14 +773,14 @@ public class TestIndexReaderReopen extends LuceneTestCase {
// not synchronized
IndexReader refreshed = r.reopen();
IndexSearcher searcher = new IndexSearcher(refreshed);
IndexSearcher searcher = newSearcher(refreshed);
ScoreDoc[] hits = searcher.search(
new TermQuery(new Term("field1", "a" + rnd.nextInt(refreshed.maxDoc()))),
null, 1000).scoreDocs;
if (hits.length > 0) {
searcher.doc(hits[0].doc);
}
searcher.close();
if (refreshed != r) {
refreshed.close();
}

View File

@ -2817,7 +2817,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
@ -2833,6 +2833,7 @@ public class TestIndexWriter extends LuceneTestCase {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
s.close();
r.close();
w.optimize();
}

View File

@ -565,7 +565,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
+ e);
}
IndexSearcher searcher = new IndexSearcher(newReader);
IndexSearcher searcher = newSearcher(newReader);
ScoreDoc[] hits = null;
try {
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;

View File

@ -177,7 +177,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 57, hits.length);
searcher.close();
@ -360,7 +360,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
}
}
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
try {
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
} catch (IOException e) {

View File

@ -718,8 +718,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
// reader should remain usable even after IndexWriter is closed:
assertEquals(100, r.numDocs());
Query q = new TermQuery(new Term("indexname", "test"));
assertEquals(100, new IndexSearcher(r).search(q, 10).totalHits);
IndexSearcher searcher = newSearcher(r);
assertEquals(100, searcher.search(q, 10).totalHits);
searcher.close();
try {
r.reopen();
fail("failed to hit AlreadyClosedException");
@ -785,7 +786,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
r = r2;
}
Query q = new TermQuery(new Term("indexname", "test"));
final int count = new IndexSearcher(r).search(q, 10).totalHits;
IndexSearcher searcher = newSearcher(r);
final int count = searcher.search(q, 10).totalHits;
searcher.close();
assertTrue(count >= lastCount);
lastCount = count;
}
@ -800,7 +803,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
r = r2;
}
Query q = new TermQuery(new Term("indexname", "test"));
final int count = new IndexSearcher(r).search(q, 10).totalHits;
IndexSearcher searcher = newSearcher(r);
final int count = searcher.search(q, 10).totalHits;
searcher.close();
assertTrue(count >= lastCount);
assertEquals(0, excs.size());
@ -873,7 +878,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
r = r2;
}
Query q = new TermQuery(new Term("indexname", "test"));
sum += new IndexSearcher(r).search(q, 10).totalHits;
IndexSearcher searcher = newSearcher(r);
sum += searcher.search(q, 10).totalHits;
searcher.close();
}
for(int i=0;i<NUM_THREAD;i++) {
@ -886,8 +893,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
r = r2;
}
Query q = new TermQuery(new Term("indexname", "test"));
sum += new IndexSearcher(r).search(q, 10).totalHits;
IndexSearcher searcher = newSearcher(r);
sum += searcher.search(q, 10).totalHits;
searcher.close();
assertTrue("no documents found at all", sum > 0);
assertEquals(0, excs.size());
@ -973,10 +981,11 @@ public class TestIndexWriterReader extends LuceneTestCase {
setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
@Override
public void warm(IndexReader r) throws IOException {
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
TopDocs hits = s.search(new TermQuery(new Term("foo", "bar")), 10);
assertEquals(20, hits.totalHits);
didWarm.set(true);
s.close();
}
}).
setMergePolicy(newLogMergePolicy(10))

View File

@ -98,7 +98,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false));
this.searcher = new IndexSearcher(reader);
this.searcher = newSearcher(reader);
}
private ScoreDoc[] search() throws IOException {
@ -126,7 +126,9 @@ public class TestLazyProxSkipping extends LuceneTestCase {
// test whether only the minimum amount of seeks()
// are performed
performTest(5);
searcher.close();
performTest(10);
searcher.close();
}
public void testSeek() throws IOException {

View File

@ -88,7 +88,9 @@ public class TestNRTThreads extends LuceneTestCase {
}
}
sum += new IndexSearcher(reader).search(new TermQuery(new Term("body", "united")), 10).totalHits;
IndexSearcher searcher = newSearcher(reader);
sum += searcher.search(new TermQuery(new Term("body", "united")), 10).totalHits;
searcher.close();
if (VERBOSE) {
System.out.println("TEST: warm visited " + sum + " fields");
@ -352,7 +354,7 @@ public class TestNRTThreads extends LuceneTestCase {
}
final IndexReader r2 = writer.getReader();
final IndexSearcher s = new IndexSearcher(r2);
final IndexSearcher s = newSearcher(r2);
boolean doFail = false;
for(String id : delIDs) {
final TopDocs hits = s.search(new TermQuery(new Term("id", id)), 1);
@ -384,6 +386,7 @@ public class TestNRTThreads extends LuceneTestCase {
assertFalse(writer.anyNonBulkMerges);
writer.close(false);
_TestUtil.checkIndex(dir);
s.close();
dir.close();
_TestUtil.rmDir(tempDir);
docs.close();
@ -398,7 +401,7 @@ public class TestNRTThreads extends LuceneTestCase {
}
private void smokeTestReader(IndexReader r) throws Exception {
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
runQuery(s, new TermQuery(new Term("body", "united")));
runQuery(s, new TermQuery(new Term("titleTokenized", "states")));
PhraseQuery pq = new PhraseQuery();

View File

@ -47,7 +47,9 @@ public class TestParallelReader extends LuceneTestCase {
@Override
public void tearDown() throws Exception {
single.getIndexReader().close();
single.close();
parallel.getIndexReader().close();
parallel.close();
dir.close();
dir1.close();
dir2.close();
@ -267,7 +269,7 @@ public class TestParallelReader extends LuceneTestCase {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
return new IndexSearcher(pr);
return newSearcher(pr);
}
private Directory getDir1(Random random) throws IOException {

View File

@ -227,7 +227,7 @@ public class TestPerFieldCodecSupport extends LuceneTestCase {
}
IndexReader reader = IndexReader.open(dir, null, true,
IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, codecs);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocs search = searcher.search(new TermQuery(t), num + 10);
assertEquals(num, search.totalHits);
searcher.close();

View File

@ -1142,10 +1142,11 @@ public class TestQueryParser extends LuceneTestCase {
w.addDocument(doc);
IndexReader r = IndexReader.open(w, true);
w.close();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a);
Query q = qp.parse("\"wizard of ozzy\"");
assertEquals(1, s.search(q, 1).totalHits);
s.close();
r.close();
dir.close();
}

View File

@ -100,11 +100,13 @@ public class CheckHits {
for (int i = -1; i < 2; i++) {
actual.clear();
QueryUtils.wrapUnderlyingReader
(random, searcher, i).search(query, c);
IndexSearcher s = QueryUtils.wrapUnderlyingReader
(random, searcher, i);
s.search(query, c);
Assert.assertEquals("Wrap Reader " + i + ": " +
query.toString(defaultFieldName),
correct, actual);
s.close();
}
}

View File

@ -21,6 +21,7 @@ import org.apache.lucene.search.Weight.ScorerContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
@ -114,9 +115,13 @@ public class QueryUtils {
checkFirstSkipTo(q1,s);
checkSkipTo(q1,s);
if (wrap) {
check(random, q1, wrapUnderlyingReader(random, s, -1), false);
check(random, q1, wrapUnderlyingReader(random, s, 0), false);
check(random, q1, wrapUnderlyingReader(random, s, +1), false);
IndexSearcher wrapped;
check(random, q1, wrapped = wrapUnderlyingReader(random, s, -1), false);
wrapped.close();
check(random, q1, wrapped = wrapUnderlyingReader(random, s, 0), false);
wrapped.close();
check(random, q1, wrapped = wrapUnderlyingReader(random, s, +1), false);
wrapped.close();
}
checkExplanations(q1,s);
checkSerialization(q1,s);
@ -158,7 +163,7 @@ public class QueryUtils {
IndexReader.open(makeEmptyIndex(random, 0), true),
0 < edge ? r : IndexReader.open(makeEmptyIndex(random, 0), true))
};
IndexSearcher out = new IndexSearcher(new MultiReader(readers));
IndexSearcher out = LuceneTestCase.newSearcher(new MultiReader(readers));
out.setSimilarityProvider(s.getSimilarityProvider());
return out;
}
@ -318,7 +323,7 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = new IndexSearcher(previousReader);
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
@ -326,6 +331,7 @@ public class QueryUtils {
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
}
leafPtr++;
indexSearcher.close();
}
lastReader[0] = context.reader;
assert readerContextArray[leafPtr].reader == context.reader;
@ -343,13 +349,14 @@ public class QueryUtils {
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = new IndexSearcher(previousReader);
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
}
indexSearcher.close();
}
}
}
@ -400,13 +407,14 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = new IndexSearcher(previousReader);
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
}
indexSearcher.close();
leafPtr++;
}
@ -423,13 +431,14 @@ public class QueryUtils {
// confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = new IndexSearcher(previousReader);
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
}
indexSearcher.close();
}
}
}

View File

@ -62,7 +62,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
+ " with numbers 1234 5678.9 and letter b");
writer.addDocument(doc);
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -82,7 +82,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
field.setValue("\uFFFD\uFFFD");
writer.addDocument(doc);
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -92,7 +92,7 @@ public class TestBoolean2 extends LuceneTestCase {
w.addDocument(doc);
}
reader = w.getReader();
bigSearcher = new IndexSearcher(reader);
bigSearcher = newSearcher(reader);
w.close();
}

View File

@ -65,7 +65,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
}
r = w.getReader();
s = new IndexSearcher(r);
s = newSearcher(r);
w.close();
//System.out.println("Set up " + getName());
}

View File

@ -154,7 +154,7 @@ public class TestBooleanOr extends LuceneTestCase {
reader = writer.getReader();
//
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -73,7 +73,7 @@ public class TestBooleanQuery extends LuceneTestCase {
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
BooleanQuery q = new BooleanQuery();
q.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
@ -120,6 +120,7 @@ public class TestBooleanQuery extends LuceneTestCase {
dmq.add(pq);
assertEquals(1, s.search(dmq, 10).totalHits);
s.close();
r.close();
w.close();
dir.close();

View File

@ -56,9 +56,10 @@ public class TestBooleanScorer extends LuceneTestCase
query.add(booleanQuery1, BooleanClause.Occur.MUST);
query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);
IndexSearcher indexSearcher = new IndexSearcher(ir);
IndexSearcher indexSearcher = newSearcher(ir);
ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs;
assertEquals("Number of matched documents", 2, hits.length);
indexSearcher.close();
ir.close();
directory.close();
}
@ -74,7 +75,7 @@ public class TestBooleanScorer extends LuceneTestCase
writer.commit();
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
BooleanWeight weight = (BooleanWeight) new BooleanQuery().createWeight(searcher);
Scorer[] scorers = new Scorer[] {new Scorer(weight) {
private int doc = -1;

View File

@ -48,7 +48,7 @@ public class TestCachingSpanFilter extends LuceneTestCase {
// but we use .reopen on this reader below and expect to
// (must) get an NRT reader:
IndexReader reader = IndexReader.open(writer.w, true);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// add a doc, refresh the reader, and check that its there
Document doc = new Document();
@ -56,7 +56,8 @@ public class TestCachingSpanFilter extends LuceneTestCase {
writer.addDocument(doc);
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals("Should find a hit...", 1, docs.totalHits);
@ -76,7 +77,8 @@ public class TestCachingSpanFilter extends LuceneTestCase {
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
@ -90,7 +92,8 @@ public class TestCachingSpanFilter extends LuceneTestCase {
writer.addDocument(doc);
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
@ -108,7 +111,8 @@ public class TestCachingSpanFilter extends LuceneTestCase {
// that had no new deletions
reader = refreshReader(reader);
assertTrue(reader != oldReader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
int missCount = filter.missCount;
docs = searcher.search(constantScore, 1);
assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
@ -118,7 +122,8 @@ public class TestCachingSpanFilter extends LuceneTestCase {
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
@ -132,6 +137,7 @@ public class TestCachingSpanFilter extends LuceneTestCase {
// entry:
assertTrue(oldReader != null);
searcher.close();
writer.close();
reader.close();
dir.close();

View File

@ -170,7 +170,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// but we use .reopen on this reader below and expect to
// (must) get an NRT reader:
IndexReader reader = IndexReader.open(writer.w, true);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// add a doc, refresh the reader, and check that its there
Document doc = new Document();
@ -178,7 +178,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.addDocument(doc);
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
TopDocs docs = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals("Should find a hit...", 1, docs.totalHits);
@ -198,7 +199,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
@ -213,7 +215,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.addDocument(doc);
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
@ -232,7 +235,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// that had no change to deletions
reader = refreshReader(reader);
assertTrue(reader != oldReader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
int missCount = filter.missCount;
docs = searcher.search(constantScore, 1);
assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
@ -242,7 +246,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
missCount = filter.missCount;
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
@ -257,7 +262,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.addDocument(doc);
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should find a hit...", 1, docs.totalHits);
@ -269,7 +275,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);
searcher = new IndexSearcher(reader);
searcher.close();
searcher = newSearcher(reader);
docs = searcher.search(new MatchAllDocsQuery(), filter, 1);
assertEquals("[query + filter] Should *not* find a hit...", 0, docs.totalHits);
@ -287,6 +294,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// entry:
assertTrue(oldReader != null);
searcher.close();
reader.close();
writer.close();
dir.close();

View File

@ -36,6 +36,12 @@ public class TestComplexExplanations extends TestExplanations {
super.setUp();
searcher.setSimilarityProvider(createQnorm1Similarity());
}
@Override
public void tearDown() throws Exception {
searcher.close();
super.tearDown();
}
// must be static for weight serialization tests
private static DefaultSimilarity createQnorm1Similarity() {

View File

@ -94,7 +94,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
// set a similarity that does not normalize our boost away
searcher.setSimilarityProvider(new DefaultSimilarity() {

View File

@ -57,7 +57,7 @@ public class TestDateFilter extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// filter that should preserve matches
// DateFilter df1 = DateFilter.Before("datefield", now);
@ -98,6 +98,7 @@ public class TestDateFilter extends LuceneTestCase {
result = searcher.search(query2, df2, 1000).scoreDocs;
assertEquals(0, result.length);
searcher.close();
reader.close();
indexStore.close();
}
@ -123,7 +124,7 @@ public class TestDateFilter extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// filter that should preserve matches
// DateFilter df1 = DateFilter.After("datefield", now);
@ -165,6 +166,7 @@ public class TestDateFilter extends LuceneTestCase {
result = searcher.search(query2, df2, 1000).scoreDocs;
assertEquals(0, result.length);
searcher.close();
reader.close();
indexStore.close();
}

View File

@ -28,7 +28,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -78,7 +77,7 @@ public class TestDateSort extends LuceneTestCase {
}
public void testReverseDateSort() throws Exception {
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true));

View File

@ -149,7 +149,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
r = new SlowMultiReaderWrapper(writer.getReader());
writer.close();
s = new IndexSearcher(r);
s = newSearcher(r);
s.setSimilarityProvider(sim);
}

View File

@ -65,7 +65,7 @@ public class TestDocBoost extends LuceneTestCase {
final float[] scores = new float[4];
new IndexSearcher(reader).search
newSearcher(reader).search
(new TermQuery(new Term("field", "word")),
new Collector() {
private int base = 0;

View File

@ -109,7 +109,7 @@ public class TestDocIdSet extends LuceneTestCase {
writer.close();
// First verify the document is searchable.
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
Assert.assertEquals(1, searcher.search(new MatchAllDocsQuery(), 10).totalHits);
// Now search w/ a Filter which returns a null DocIdSet

View File

@ -53,7 +53,7 @@ public class TestElevationComparator extends LuceneTestCase {
IndexReader r = IndexReader.open(writer, true);
writer.close();
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
runTest(searcher, true);
runTest(searcher, false);

View File

@ -77,7 +77,7 @@ public class TestExplanations extends LuceneTestCase {
}
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
protected String[] docFields = {

View File

@ -44,7 +44,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterId() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -122,14 +122,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
result = search.search(q,FieldCacheRangeFilter.newStringRange("id",medIP,medIP,T,T), numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
search.close();
}
@Test
public void testFieldCacheRangeFilterRand() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
@ -185,6 +185,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("max,max,T,T", 1, result.length);
result = search.search(q,FieldCacheRangeFilter.newStringRange("rand",maxRP,null,T,F), numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
search.close();
}
// byte-ranges cannot be tested, because all ranges are too big for bytes, need an extra range for that
@ -193,7 +194,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterShorts() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int numDocs = reader.numDocs();
int medId = ((maxId - minId) / 2);
@ -277,13 +278,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("overflow special case", 0, result.length);
result = search.search(q,FieldCacheRangeFilter.newShortRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
assertEquals("inverse range", 0, result.length);
search.close();
}
@Test
public void testFieldCacheRangeFilterInts() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int numDocs = reader.numDocs();
int medId = ((maxId - minId) / 2);
@ -368,13 +370,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("overflow special case", 0, result.length);
result = search.search(q,FieldCacheRangeFilter.newIntRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
assertEquals("inverse range", 0, result.length);
search.close();
}
@Test
public void testFieldCacheRangeFilterLongs() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int numDocs = reader.numDocs();
int medId = ((maxId - minId) / 2);
@ -459,6 +462,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("overflow special case", 0, result.length);
result = search.search(q,FieldCacheRangeFilter.newLongRange("id",maxIdO,minIdO,T,T), numDocs).scoreDocs;
assertEquals("inverse range", 0, result.length);
search.close();
}
// float and double tests are a bit minimalistic, but its complicated, because missing precision
@ -467,7 +471,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterFloats() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int numDocs = reader.numDocs();
Float minIdO = Float.valueOf(minId + .5f);
@ -490,13 +494,14 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("infinity special case", 0, result.length);
result = search.search(q,FieldCacheRangeFilter.newFloatRange("id",null,Float.valueOf(Float.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
assertEquals("infinity special case", 0, result.length);
search.close();
}
@Test
public void testFieldCacheRangeFilterDoubles() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int numDocs = reader.numDocs();
Double minIdO = Double.valueOf(minId + .5);
@ -519,6 +524,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
assertEquals("infinity special case", 0, result.length);
result = search.search(q,FieldCacheRangeFilter.newDoubleRange("id",null, Double.valueOf(Double.NEGATIVE_INFINITY),F,F), numDocs).scoreDocs;
assertEquals("infinity special case", 0, result.length);
search.close();
}
// test using a sparse index (with deleted docs).
@ -539,7 +545,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
writer.close();
IndexReader reader = IndexReader.open(dir, true);
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
assertTrue(reader.hasDeletions());
ScoreDoc[] result;
@ -559,6 +565,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
assertEquals("find all", 11, result.length);
search.close();
reader.close();
dir.close();
}

View File

@ -47,7 +47,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
IndexReader reader = w.getReader();
w.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
int numDocs = reader.numDocs();
ScoreDoc[] results;
MatchAllDocsQuery q = new MatchAllDocsQuery();
@ -68,6 +68,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs;
assertEquals("Must match 2", 2, results.length);
searcher.close();
reader.close();
rd.close();
}

View File

@ -81,7 +81,7 @@ public class TestFilteredQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close ();
searcher = new IndexSearcher (reader);
searcher = newSearcher(reader);
query = new TermQuery (new Term ("field", "three"));
filter = newStaticFilterB();
}

View File

@ -50,7 +50,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("ddddd", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
writer.close();
FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0);
@ -198,7 +198,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("segment", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
writer.close();
FuzzyQuery query;
@ -309,7 +309,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
IndexReader ir2 = writer2.getReader();
MultiReader mr = new MultiReader(ir1, ir2);
IndexSearcher searcher = new IndexSearcher(mr);
IndexSearcher searcher = newSearcher(mr);
FuzzyQuery fq = new FuzzyQuery(new Term("field", "z123456"), 1f, 0, 2);
TopDocs docs = searcher.search(fq, 2);
assertEquals(5, docs.totalHits); // 5 docs, from the a and b's
@ -330,7 +330,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("segment", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
writer.close();
Query query;
@ -368,7 +368,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("Lucenne", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
writer.close();
FuzzyQuery query = new FuzzyQuery(new Term("field", "lucene"));
@ -413,7 +413,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
Query q = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer).parse( "giga~0.9" );
// 3. search
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
ScoreDoc[] hits = searcher.search(q, 10).scoreDocs;
assertEquals(1, hits.length);
assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field"));
@ -435,7 +435,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("test", w);
addDoc("working", w);
IndexReader reader = w.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
w.close();
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());

View File

@ -91,7 +91,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
}
IndexReader r = writer.getReader();
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
writer.close();
String line;
while ((line = reader.readLine()) != null) {

View File

@ -47,7 +47,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
iw.close();
IndexReader ir = IndexReader.open(dir, false);
IndexSearcher is = new IndexSearcher(ir);
IndexSearcher is = newSearcher(ir);
ScoreDoc[] hits;
// assert with norms scoring turned off
@ -93,7 +93,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
assertEquals(1, hits.length);
// delete a document:
is.getIndexReader().deleteDocument(0);
ir.deleteDocument(0);
hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
assertEquals(2, hits.length);

View File

@ -53,7 +53,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
add("piccadilly circus", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// search for "blueberry pi*":
MultiPhraseQuery query1 = new MultiPhraseQuery();
@ -142,12 +142,13 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
IndexReader r = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(r);
IndexSearcher searcher = newSearcher(r);
MultiPhraseQuery q = new MultiPhraseQuery();
q.add(new Term("body", "blueberry"));
q.add(new Term("body", "chocolate"));
q.add(new Term[] {new Term("body", "pie"), new Term("body", "tart")});
assertEquals(2, searcher.search(q, 1).totalHits);
searcher.close();
r.close();
indexStore.close();
}
@ -171,7 +172,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
add("blue raspberry pie", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// This query will be equivalent to +body:pie +body:"blue*"
BooleanQuery q = new BooleanQuery();
q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
@ -202,7 +203,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
add("a note", "note", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// This query will be equivalent to +type:note +body:"a t*"
BooleanQuery q = new BooleanQuery();
@ -229,7 +230,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
add("a note", "note", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
MultiPhraseQuery q = new MultiPhraseQuery();
q.add(new Term("body", "a"));
@ -294,7 +295,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
add("a note", "note", writer);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarityProvider(new DefaultSimilarity() {
@Override

View File

@ -151,7 +151,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
ScoreDoc[] result;
@ -174,13 +174,14 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result[i].score);
}
search.close();
}
@Test
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
// test for correct application of query normalization
// must use a non score normalizing method for this.
@ -246,13 +247,14 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
Assert.assertEquals(0, hits[0].doc);
Assert.assertEquals(1, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
search.close();
}
@Test
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
// docs with more terms in range get higher scores
@ -277,6 +279,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
actual[i].doc);
}
search.close();
}
@Test
@ -284,7 +287,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -405,6 +408,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT), null, numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
search.close();
}
@Test
@ -412,7 +417,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -489,6 +494,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result = search.search(csrq("id", medIP, medIP, T, T, c), null, numDocs).scoreDocs;
assertEquals("med,med,T,T,c", 1, result.length);
search.close();
}
@Test
@ -496,7 +503,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// NOTE: uses index build in *super* setUp
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
@ -552,6 +559,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result = search.search(csrq("rand", maxRP, null, T, F), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
search.close();
}
@Test
@ -560,7 +568,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
String minRP = pad(unsignedIndexDir.minR);
String maxRP = pad(unsignedIndexDir.maxR);
@ -617,6 +625,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
assertEquals("max,max,T,T,c", 1, result.length);
result = search.search(csrq("rand", maxRP, null, T, F, c), null, numDocs).scoreDocs;
assertEquals("max,nul,T,T,c", 1, result.length);
search.close();
}
@Test
@ -636,7 +646,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
@ -681,7 +691,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));

View File

@ -61,17 +61,17 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase {
writer.close(); swriter1.close(); swriter2.close();
reader = IndexReader.open(dir, true);
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
multiReader = new MultiReader(new IndexReader[] {
IndexReader.open(sdir1, true), IndexReader.open(sdir2, true)
}, true);
multiSearcher = new IndexSearcher(multiReader);
multiSearcher = newSearcher(multiReader);
multiReaderDupls = new MultiReader(new IndexReader[] {
IndexReader.open(sdir1, true), IndexReader.open(dir, true)
}, true);
multiSearcherDupls = new IndexSearcher(multiReaderDupls);
multiSearcherDupls = newSearcher(multiReaderDupls);
}
@AfterClass

View File

@ -59,7 +59,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher=new IndexSearcher(reader);
IndexSearcher searcher=newSearcher(reader);
num = 50 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
int lower=random.nextInt(Integer.MAX_VALUE);

View File

@ -44,7 +44,7 @@ public class TestNot extends LuceneTestCase {
writer.addDocument(d1);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);

View File

@ -89,7 +89,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
}
reader = writer.getReader();
searcher=new IndexSearcher(reader);
searcher=newSearcher(reader);
writer.close();
}

View File

@ -89,7 +89,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
writer.addDocument(doc);
}
reader = writer.getReader();
searcher=new IndexSearcher(reader);
searcher=newSearcher(reader);
writer.close();
}

View File

@ -65,7 +65,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// PhrasePrefixQuery query1 = new PhrasePrefixQuery();
MultiPhraseQuery query1 = new MultiPhraseQuery();

View File

@ -86,7 +86,7 @@ public class TestPhraseQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
query = new PhraseQuery();
}
@ -221,7 +221,7 @@ public class TestPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
@ -262,7 +262,7 @@ public class TestPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
@ -301,7 +301,7 @@ public class TestPhraseQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
@ -352,7 +352,7 @@ public class TestPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));
@ -649,7 +649,7 @@ public class TestPhraseQuery extends LuceneTestCase {
}
IndexReader reader = w.getReader();
IndexSearcher s = new IndexSearcher(reader);
IndexSearcher s = newSearcher(reader);
w.close();
// now search

View File

@ -96,7 +96,7 @@ public class TestPositionIncrement extends LuceneTestCase {
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
MultiFields.getDeletedDocs(searcher.getIndexReader()),
@ -264,7 +264,7 @@ public class TestPositionIncrement extends LuceneTestCase {
// only one doc has "a"
assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc());
IndexSearcher is = new IndexSearcher(readerFromWriter);
IndexSearcher is = newSearcher(readerFromWriter);
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));

View File

@ -74,7 +74,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
writer.commit();
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher);
Scorer s = new SimpleScorer(fake);
TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(scores.length, true);

View File

@ -48,7 +48,7 @@ public class TestPrefixFilter extends LuceneTestCase {
// PrefixFilter combined with ConstantScoreQuery
PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
Query query = new ConstantScoreQuery(filter);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(4, hits.length);

View File

@ -75,7 +75,7 @@ public class TestPrefixInBooleanQuery extends LuceneTestCase {
}
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -47,7 +47,7 @@ public class TestPrefixQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("All documents in /Computers category and below", 3, hits.length);

View File

@ -65,7 +65,7 @@ public class TestPrefixRandom extends LuceneTestCase {
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -43,7 +43,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase {
// should not throw exception with primitive query
QueryWrapperFilter qwf = new QueryWrapperFilter(termQuery);
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), qwf, 10);
assertEquals(1, hits.totalHits);
hits = searcher.search(new MatchAllDocsQuery(), new CachingWrapperFilter(qwf), 10);

View File

@ -54,7 +54,7 @@ public class TestRegexpQuery extends LuceneTestCase {
writer.addDocument(doc);
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -62,7 +62,7 @@ public class TestRegexpRandom extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
private char N() {

View File

@ -82,7 +82,7 @@ public class TestRegexpRandom2 extends LuceneTestCase {
}
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -104,7 +104,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
writer.commit();
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
IndexSearcher searcher = newSearcher(ir);
Weight fake = new TermQuery(new Term("fake", "weight")).createWeight(searcher);
Scorer s = new SimpleScorer(fake);
ScoreCachingCollector scc = new ScoreCachingCollector(scores.length);

View File

@ -63,7 +63,7 @@ public class TestSearchWithThreads extends LuceneTestCase {
final long endTime = System.currentTimeMillis();
if (VERBOSE) System.out.println("BUILD took " + (endTime-startTime));
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = newSearcher(r);
final AtomicBoolean failed = new AtomicBoolean();
final AtomicLong netSearch = new AtomicLong();

View File

@ -80,7 +80,7 @@ public class TestSimilarity extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
searcher.setSimilarityProvider(new SimpleSimilarity());
Term a = new Term("field", "a");

View File

@ -56,7 +56,7 @@ public class TestSimilarityProvider extends LuceneTestCase {
iw.addDocument(doc);
reader = iw.getReader();
iw.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
searcher.setSimilarityProvider(sim);
}

View File

@ -121,7 +121,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocs td = searcher.search(query,null,10);
//System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore());
assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, td.totalHits);

View File

@ -144,7 +144,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
IndexReader reader = writer.getReader();
writer.close ();
IndexSearcher s = new IndexSearcher (reader);
IndexSearcher s = newSearcher(reader);
s.setDefaultFieldSortScoring(true, true);
return s;
}
@ -1061,12 +1061,13 @@ public class TestSort extends LuceneTestCase implements Serializable {
IndexReader r = IndexReader.open(w, true);
w.close();
IndexSearcher s = new IndexSearcher(r);
IndexSearcher s = newSearcher(r);
TopDocs hits = s.search(new TermQuery(new Term("t", "1")), null, 10, new Sort(new SortField("f", SortField.STRING)));
assertEquals(2, hits.totalHits);
// null sorts first
assertEquals(1, hits.scoreDocs[0].doc);
assertEquals(0, hits.scoreDocs[1].doc);
s.close();
r.close();
dir.close();
}
@ -1105,10 +1106,11 @@ public class TestSort extends LuceneTestCase implements Serializable {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TotalHitCountCollector c = new TotalHitCountCollector();
searcher.search(new MatchAllDocsQuery(), null, c);
assertEquals(5, c.getTotalHits());
searcher.close();
reader.close();
indexStore.close();
}

View File

@ -54,7 +54,7 @@ public class TestSubScorerFreqs extends LuceneTestCase {
w.addDocument(doc);
}
s = new IndexSearcher(w.getReader());
s = newSearcher(w.getReader());
w.close();
}

View File

@ -44,7 +44,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterId() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -141,13 +141,14 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
search.close();
}
@Test
public void testRangeFilterIdCollating() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
Collator c = Collator.getInstance(Locale.ENGLISH);
@ -243,13 +244,15 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
numHits = search.search(q,
new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits;
assertEquals("med,med,T,T", 1, numHits);
search.close();
}
@Test
public void testRangeFilterRand() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
@ -320,6 +323,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
search.close();
}
@Test
@ -327,7 +331,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
Collator c = Collator.getInstance(Locale.ENGLISH);
@ -398,6 +402,8 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
numHits = search.search(q,
new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits;
assertEquals("max,nul,T,T", 1, numHits);
search.close();
}
@Test
@ -417,7 +423,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
Query q = new TermQuery(new Term("body", "body"));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
@ -461,7 +467,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher search = new IndexSearcher(reader);
IndexSearcher search = newSearcher(reader);
Query q = new TermQuery(new Term("body", "body"));
Collator collator = Collator.getInstance(new Locale("da", "dk"));

View File

@ -57,7 +57,7 @@ public class TestTermScorer extends LuceneTestCase {
}
indexReader = new SlowMultiReaderWrapper(writer.getReader());
writer.close();
indexSearcher = new IndexSearcher(indexReader);
indexSearcher = newSearcher(indexReader);
}
@Override

View File

@ -71,7 +71,7 @@ public class TestTermVectors extends LuceneTestCase {
}
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override
@ -246,7 +246,7 @@ public class TestTermVectors extends LuceneTestCase {
writer.addDocument(testDoc4);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher knownSearcher = new IndexSearcher(reader);
IndexSearcher knownSearcher = newSearcher(reader);
FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator();
DocsEnum docs = null;
@ -378,7 +378,7 @@ public class TestTermVectors extends LuceneTestCase {
}
IndexReader reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
Query query = new TermQuery(new Term("field", "hundred"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -414,7 +414,7 @@ public class TestTermVectors extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
Query query = new TermQuery(new Term("field", "one"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;

View File

@ -82,7 +82,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
}
reader = iw.getReader();
iw.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
String qtxt = "one";
// start from 1, so that the 0th doc never matches

View File

@ -95,7 +95,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
Query q = new MatchAllDocsQuery();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
searcher.search(q, tdc);
searcher.close();

View File

@ -47,7 +47,7 @@ public class TestTopScoreDocCollector extends LuceneTestCase {
// the clause instead of BQ.
bq.setMinimumNumberShouldMatch(1);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
for (int i = 0; i < inOrder.length; i++) {
TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(3, inOrder[i]);
assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());

View File

@ -61,7 +61,7 @@ public class TestWildcardRandom extends LuceneTestCase {
}
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SimilarityProvider;
@ -129,7 +130,7 @@ public class PayloadHelper {
reader = IndexReader.open(writer, true);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = LuceneTestCase.newSearcher(reader);
searcher.setSimilarityProvider(similarity);
return searcher;
}

View File

@ -117,7 +117,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
searcher.setSimilarityProvider(similarity);
}

View File

@ -124,7 +124,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
searcher.setSimilarityProvider(similarity);
}

View File

@ -77,7 +77,7 @@ public class TestBasics extends LuceneTestCase {
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
writer.close();
}

View File

@ -112,7 +112,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
field("last", "jones") }));
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -65,7 +65,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
}
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
protected String[] docFields = {

View File

@ -178,6 +178,7 @@ public class TestPayloadSpans extends LuceneTestCase {
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 2, new int[]{3,3});
searcher.close();
closeIndexReader.close();
directory.close();
}
@ -210,6 +211,7 @@ public class TestPayloadSpans extends LuceneTestCase {
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 1, new int[]{3});
searcher.close();
closeIndexReader.close();
directory.close();
}
@ -247,6 +249,7 @@ public class TestPayloadSpans extends LuceneTestCase {
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 2, new int[]{8, 8});
searcher.close();
closeIndexReader.close();
directory.close();
}
@ -262,7 +265,7 @@ public class TestPayloadSpans extends LuceneTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
IndexSearcher is = new IndexSearcher(reader);
IndexSearcher is = newSearcher(reader);
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
@ -285,6 +288,7 @@ public class TestPayloadSpans extends LuceneTestCase {
assertEquals(2, payloadSet.size());
assertTrue(payloadSet.contains("a:Noise:10"));
assertTrue(payloadSet.contains("k:Noise:11"));
is.close();
reader.close();
directory.close();
}
@ -299,7 +303,7 @@ public class TestPayloadSpans extends LuceneTestCase {
doc.add(new Field("content", new StringReader("a b a d k f a h i k a k")));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
IndexSearcher is = new IndexSearcher(reader);
IndexSearcher is = newSearcher(reader);
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
@ -321,6 +325,7 @@ public class TestPayloadSpans extends LuceneTestCase {
assertEquals(2, payloadSet.size());
assertTrue(payloadSet.contains("a:Noise:10"));
assertTrue(payloadSet.contains("k:Noise:11"));
is.close();
reader.close();
directory.close();
}
@ -335,7 +340,7 @@ public class TestPayloadSpans extends LuceneTestCase {
doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a")));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
IndexSearcher is = new IndexSearcher(reader);
IndexSearcher is = newSearcher(reader);
writer.close();
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
@ -363,6 +368,7 @@ public class TestPayloadSpans extends LuceneTestCase {
}
assertTrue(payloadSet.contains("a:Noise:10"));
assertTrue(payloadSet.contains("k:Noise:11"));
is.close();
reader.close();
directory.close();
}
@ -378,7 +384,7 @@ public class TestPayloadSpans extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext());
@ -389,6 +395,7 @@ public class TestPayloadSpans extends LuceneTestCase {
if(VERBOSE)
System.out.println(new String(bytes));
}
searcher.close();
reader.close();
directory.close();
}
@ -443,7 +450,7 @@ public class TestPayloadSpans extends LuceneTestCase {
closeIndexReader = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(closeIndexReader);
IndexSearcher searcher = newSearcher(closeIndexReader);
return searcher;
}

View File

@ -48,7 +48,7 @@ public class TestSpanFirstQuery extends LuceneTestCase {
writer.addDocument(doc2);
IndexReader reader = writer.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// user queries on "starts-with quick"
SpanQuery sfq = new SpanFirstQuery(new SpanTermQuery(new Term("field", "quick")), 1);

View File

@ -53,7 +53,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase {
iw.addDocument(doc);
reader = iw.getReader();
iw.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -61,7 +61,7 @@ public class TestSpans extends LuceneTestCase {
}
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override
@ -486,7 +486,7 @@ public class TestSpans extends LuceneTestCase {
// Get searcher
final IndexReader reader = IndexReader.open(dir, true);
final IndexSearcher searcher = new IndexSearcher(reader);
final IndexSearcher searcher = newSearcher(reader);
// Control (make sure docs indexed)
assertEquals(2, hitCount(searcher, "the"));
@ -499,6 +499,7 @@ public class TestSpans extends LuceneTestCase {
searcher.search(createSpan(0, true,
new SpanQuery[] {createSpan(4, false, "chased", "cat"),
createSpan("ate")}), 10).totalHits);
searcher.close();
reader.close();
dir.close();
}

View File

@ -66,7 +66,7 @@ public class TestSpansAdvanced extends LuceneTestCase {
addDocument(writer, "4", "I think it should work.");
reader = writer.getReader();
writer.close();
searcher = new IndexSearcher(reader);
searcher = newSearcher(reader);
}
@Override

View File

@ -57,7 +57,7 @@ public class TestSpansAdvanced2 extends TestSpansAdvanced {
writer.close();
// re-open the searcher since we added more docs
searcher2 = new IndexSearcher(reader2);
searcher2 = newSearcher(reader2);
}
@Override

View File

@ -271,7 +271,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
assertEquals(reader.docFreq(bbb), 37);
dir.tweakBufferSizes();
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(bbb), null, 1000).scoreDocs;
dir.tweakBufferSizes();
assertEquals(35, hits.length);

View File

@ -82,7 +82,7 @@ public class TestRAMDirectory extends LuceneTestCase {
assertEquals(docsToAdd, reader.numDocs());
// open search zo check if all doc's are there
IndexSearcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = newSearcher(reader);
// search for all documents
for (int i = 0; i < docsToAdd; i++) {

Some files were not shown because too many files have changed in this diff Show More