LUCENE-3571: nuke IndexSearcher(Directory)

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1202657 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-11-16 12:19:41 +00:00
parent 5408d4d36c
commit 598920d7bd
52 changed files with 397 additions and 189 deletions

View File

@ -741,6 +741,9 @@ API Changes
* LUCENE-3574: Deprecate outdated constants in org.apache.lucene.util.Constants
and add new ones for Java 6 and Java 7. (Uwe Schindler)
* LUCENE-3571: Deprecate IndexSearcher(Directory). Use the constructors
that take IndexReader instead. (Robert Muir)
New Features
* LUCENE-3448: Added FixedBitSet.and(other/DISI), andNot(other/DISI).

View File

@ -27,6 +27,7 @@ import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@ -85,7 +86,8 @@ public class SearchFiles {
}
}
IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(index)));
IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)));
IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
BufferedReader in = null;
@ -130,6 +132,7 @@ public class SearchFiles {
}
}
searcher.close();
reader.close();
}
/**

View File

@ -38,6 +38,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.xml.CorePlusExtensionsParser;
@ -145,6 +146,8 @@ public class FormBasedXmlQueryDemo extends HttpServlet {
writer.close();
//open searcher
searcher = new IndexSearcher(rd, true);
// this example never closes it reader!
IndexReader reader = IndexReader.open(rd);
searcher = new IndexSearcher(reader);
}
}

View File

@ -87,7 +87,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
phraseQuery.add(new Term(FIELD_NAME, "long"));
query = phraseQuery;
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
TopDocs hits = searcher.search(query, 10);
QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
@ -330,7 +330,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void testSpanRegexQuery() throws Exception {
query = new SpanOrQuery(new SpanMultiTermQueryWrapper<RegexpQuery>(new RegexpQuery(new Term(FIELD_NAME, "ken.*"))));
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, 100);
int maxNumFragmentsRequired = 2;
@ -354,7 +354,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void testRegexQuery() throws Exception {
query = new RegexpQuery(new Term(FIELD_NAME, "ken.*"));
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, 100);
int maxNumFragmentsRequired = 2;
@ -379,7 +379,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void testNumericRangeQuery() throws Exception {
// doesn't currently highlight, but make sure it doesn't cause exception either
query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, 100);
int maxNumFragmentsRequired = 2;
@ -754,7 +754,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
query = new WildcardQuery(new Term(FIELD_NAME, "ken*"));
((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
// can't rewrite ConstantScore if you want to highlight it -
// it rewrites to ConstantScoreQuery which cannot be highlighted
// query = unReWrittenQuery.rewrite(reader);
@ -1272,7 +1272,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
numHighlights = 0;
// test to show how rewritten query can still be used
if (searcher != null) searcher.close();
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
BooleanQuery query = new BooleanQuery();
@ -1649,7 +1649,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
private void searchIndex() throws IOException, InvalidTokenOffsetsException {
Query query = new TermQuery(new Term("t_text1", "random"));
IndexSearcher searcher = new IndexSearcher( dir, true );
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
// This scorer can return negative idf -> null fragment
Scorer scorer = new QueryTermScorer( query, searcher.getIndexReader(), "t_text1" );
// This scorer doesn't use idf (patch version)
@ -1664,6 +1665,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
assertEquals("more <B>random</B> words for second field", result);
}
searcher.close();
reader.close();
}
/*
@ -1702,7 +1704,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void doSearching(Query unReWrittenQuery) throws Exception {
if (searcher != null) searcher.close();
searcher = new IndexSearcher(ramDir, true);
searcher = new IndexSearcher(reader);
// for any multi-term queries to work (prefix, wildcard, range,fuzzy etc)
// you must use a rewritten query!
query = unReWrittenQuery.rewrite(reader);

View File

@ -32,6 +32,7 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat;
@ -128,7 +129,8 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
* Run all queries against both the RAMDirectory and MemoryIndex, ensuring they are the same.
*/
public void assertAllQueries(MemoryIndex memory, Directory ramdir, Analyzer analyzer) throws Exception {
IndexSearcher ram = new IndexSearcher(ramdir);
IndexReader reader = IndexReader.open(ramdir);
IndexSearcher ram = new IndexSearcher(reader);
IndexSearcher mem = memory.createSearcher();
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "foo", analyzer);
for (String query : queries) {
@ -137,6 +139,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
assertEquals(ramDocs.totalHits, memDocs.totalHits);
}
ram.close();
reader.close();
mem.close();
}

View File

@ -127,7 +127,8 @@ public class TestFieldNormModifier extends LuceneTestCase {
public void testGoodCases() throws Exception {
IndexSearcher searcher = new IndexSearcher(store, true);
IndexReader reader = IndexReader.open(store);
IndexSearcher searcher = new IndexSearcher(reader);
final float[] scores = new float[NUM_DOCS];
float lastScore = 0.0f;
@ -154,6 +155,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
}
});
searcher.close();
reader.close();
lastScore = Float.MAX_VALUE;
for (int i = 0; i < NUM_DOCS; i++) {
@ -167,7 +169,8 @@ public class TestFieldNormModifier extends LuceneTestCase {
fnm.reSetNorms("field");
// new norm (with default similarity) should put longer docs first
searcher = new IndexSearcher(store, true);
reader = IndexReader.open(store);
searcher = new IndexSearcher(reader);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0;
private Scorer scorer;
@ -189,6 +192,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
}
});
searcher.close();
reader.close();
lastScore = 0.0f;
for (int i = 0; i < NUM_DOCS; i++) {
@ -215,7 +219,8 @@ public class TestFieldNormModifier extends LuceneTestCase {
// verify that we still get documents in the same order as originally
IndexSearcher searcher = new IndexSearcher(store, true);
IndexReader reader = IndexReader.open(store);
IndexSearcher searcher = new IndexSearcher(reader);
final float[] scores = new float[NUM_DOCS];
float lastScore = 0.0f;
@ -241,6 +246,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
}
});
searcher.close();
reader.close();
lastScore = scores[0];
for (int i = 0; i < NUM_DOCS; i++) {

View File

@ -135,7 +135,8 @@ public class TestLengthNormModifier extends LuceneTestCase {
float lastScore = 0.0f;
// default similarity should put docs with shorter length first
searcher = new IndexSearcher(store, false);
IndexReader reader = IndexReader.open(store, false);
searcher = new IndexSearcher(reader);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0;
private Scorer scorer;
@ -157,6 +158,7 @@ public class TestLengthNormModifier extends LuceneTestCase {
}
});
searcher.close();
reader.close();
lastScore = Float.MAX_VALUE;
for (int i = 0; i < NUM_DOCS; i++) {
@ -183,7 +185,8 @@ public class TestLengthNormModifier extends LuceneTestCase {
fnm.reSetNorms("field");
// new norm (with default similarity) should put longer docs first
searcher = new IndexSearcher(store, false);
reader = IndexReader.open(store, false);
searcher = new IndexSearcher(reader);
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
private int docBase = 0;
private Scorer scorer;
@ -205,6 +208,7 @@ public class TestLengthNormModifier extends LuceneTestCase {
}
});
searcher.close();
reader.close();
lastScore = 0.0f;
for (int i = 0; i < NUM_DOCS; i++) {

View File

@ -24,6 +24,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@ -71,7 +72,8 @@ public class TestSpanRegexQuery extends LuceneTestCase {
writer.forceMerge(1);
writer.close();
IndexSearcher searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
SpanQuery srq = new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(new Term("field", "aut.*")));
SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
// SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
@ -79,6 +81,7 @@ public class TestSpanRegexQuery extends LuceneTestCase {
int numHits = searcher.search(sfq, null, 1000).totalHits;
assertEquals(1, numHits);
searcher.close();
reader.close();
directory.close();
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
@ -191,7 +192,8 @@ public class TestCartesian extends LuceneTestCase {
public void testAntiM() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
final double miles = 2800.0;
// Hawaii
@ -259,10 +261,12 @@ public class TestCartesian extends LuceneTestCase {
lastDistance = geo_distance;
}
searcher.close();
reader.close();
}
public void testPoleFlipping() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
final double miles = 3500.0;
lat = 41.6032207;
@ -329,10 +333,12 @@ public class TestCartesian extends LuceneTestCase {
lastDistance = geo_distance;
}
searcher.close();
reader.close();
}
public void testRange() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
final int[] expected = new int[] {7, 1, 0, 0};
@ -399,12 +405,14 @@ public class TestCartesian extends LuceneTestCase {
}
}
searcher.close();
reader.close();
}
public void testGeoHashRange() throws IOException, InvalidGeoException {
searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
final double[] milesToTest = new double[] {6.0, 0.5, 0.001, 0.0};
final int[] expected = new int[] {7, 1, 0, 0};
@ -469,5 +477,6 @@ public class TestCartesian extends LuceneTestCase {
}
}
searcher.close();
reader.close();
}
}

View File

@ -41,7 +41,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
import org.apache.lucene.search.similarities.SimilarityProvider;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.NIOFSDirectory; // javadoc
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@ -76,7 +75,6 @@ import org.apache.lucene.util.ThreadInterruptedException;
*/
public class IndexSearcher implements Closeable {
final IndexReader reader; // package private for testing!
private boolean closeReader;
// NOTE: these members might change in incompatible ways
// in the next release
@ -105,34 +103,9 @@ public class IndexSearcher implements Closeable {
/** The SimilarityProvider implementation used by this searcher. */
private SimilarityProvider similarityProvider = defaultProvider;
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
* @param path directory where IndexReader will be opened
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
this(IndexReader.open(path, true), true, null);
}
/** Creates a searcher searching the index in the named
* directory. You should pass readOnly=true, since it
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the underlying IndexReader.
* @param path directory where IndexReader will be opened
* @param readOnly if true, the underlying IndexReader
* will be opened readOnly
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
this(IndexReader.open(path, readOnly), true, null);
}
/** Creates a searcher searching the provided index. */
public IndexSearcher(IndexReader r) {
this(r, false, null);
this(r, null);
}
/** Runs searches for each segment separately, using the
@ -147,7 +120,7 @@ public class IndexSearcher implements Closeable {
*
* @lucene.experimental */
public IndexSearcher(IndexReader r, ExecutorService executor) {
this(r, false, executor);
this(r.getTopReaderContext(), executor);
}
/**
@ -167,7 +140,12 @@ public class IndexSearcher implements Closeable {
* @lucene.experimental
*/
public IndexSearcher(ReaderContext context, ExecutorService executor) {
this(context, false, executor);
assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader;
reader = context.reader;
this.executor = executor;
this.readerContext = context;
leafContexts = ReaderUtil.leaves(context);
this.leafSlices = executor == null ? null : slices(leafContexts);
}
/**
@ -178,22 +156,7 @@ public class IndexSearcher implements Closeable {
* @lucene.experimental
*/
public IndexSearcher(ReaderContext context) {
this(context, (ExecutorService) null);
}
// convenience ctor for other IR based ctors
private IndexSearcher(IndexReader reader, boolean closeReader, ExecutorService executor) {
this(reader.getTopReaderContext(), closeReader, executor);
}
private IndexSearcher(ReaderContext context, boolean closeReader, ExecutorService executor) {
assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader;
reader = context.reader;
this.executor = executor;
this.closeReader = closeReader;
this.readerContext = context;
leafContexts = ReaderUtil.leaves(context);
this.leafSlices = executor == null ? null : slices(leafContexts);
this(context, null);
}
/**
@ -236,17 +199,8 @@ public class IndexSearcher implements Closeable {
return similarityProvider;
}
/**
* Note that the underlying IndexReader is not closed, if
* IndexSearcher was constructed with IndexSearcher(IndexReader r).
* If the IndexReader was supplied implicitly by specifying a directory, then
* the IndexReader is closed.
*/
@Override
public void close() throws IOException {
if (closeReader) {
reader.close();
}
}
/** @lucene.internal */

View File

@ -61,7 +61,8 @@ to check if the results are what we expect):</p>
iwriter.close();
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
IndexReader ireader = IndexReader.open(directory); // read-only=true
IndexSearcher isearcher = new IndexSearcher(ireader);
// Parse a simple query that searches for "text":
QueryParser parser = new QueryParser("fieldname", analyzer);
Query query = parser.parse("text");
@ -73,6 +74,7 @@ to check if the results are what we expect):</p>
assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
}
isearcher.close();
ireader.close();
directory.close();</pre>
<!-- = END of automatically generated HTML code = -->
<!-- ======================================================== -->

View File

@ -85,7 +85,8 @@ public abstract class CollationTestBase extends LuceneTestCase {
doc.add(new Field("body", "body", StringField.TYPE_STORED));
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir, true);
IndexReader reader = IndexReader.open(ramDir);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("body","body"));
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -102,6 +103,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
assertEquals("The index Term should be included.", 1, result.length);
searcher.close();
reader.close();
}
public void testFarsiRangeQueryCollating(Analyzer analyzer, BytesRef firstBeg,
@ -119,7 +121,8 @@ public abstract class CollationTestBase extends LuceneTestCase {
doc.add(new Field("content", "\u0633\u0627\u0628", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir, true);
IndexReader reader = IndexReader.open(ramDir);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -129,6 +132,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, hits.length);
searcher.close();
reader.close();
}
public void testFarsiTermRangeQuery(Analyzer analyzer, BytesRef firstBeg,
@ -218,7 +222,8 @@ public abstract class CollationTestBase extends LuceneTestCase {
}
writer.forceMerge(1);
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
Sort sort = new Sort();
Query queryX = new TermQuery(new Term ("contents", "x"));
@ -235,6 +240,8 @@ public abstract class CollationTestBase extends LuceneTestCase {
sort.setSort(new SortField("Denmark", SortField.Type.STRING));
assertMatches(searcher, queryY, sort, dkResult);
searcher.close();
reader.close();
}
// Make sure the documents returned by the search match the expected list

View File

@ -419,9 +419,6 @@ public class CheckHits {
* @see ExplanationAsserter
*/
public static class ExplanationAssertingSearcher extends IndexSearcher {
public ExplanationAssertingSearcher(Directory d) throws IOException {
super(d, true);
}
public ExplanationAssertingSearcher(IndexReader r) throws IOException {
super(r);
}

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.*;
@ -53,7 +54,8 @@ public class TestDemo extends LuceneTestCase {
iwriter.close();
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
IndexReader ireader = IndexReader.open(directory); // read-only=true
IndexSearcher isearcher = new IndexSearcher(ireader);
assertEquals(1, isearcher.search(new TermQuery(new Term("fieldname", longTerm)), 1).totalHits);
Query query = new TermQuery(new Term("fieldname", "text"));
@ -72,6 +74,7 @@ public class TestDemo extends LuceneTestCase {
assertEquals(1, isearcher.search(phraseQuery, null, 1).totalHits);
isearcher.close();
ireader.close();
directory.close();
}
}

View File

@ -99,7 +99,8 @@ public class TestSearch extends LuceneTestCase {
}
writer.close();
IndexSearcher searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = null;
@ -122,6 +123,7 @@ public class TestSearch extends LuceneTestCase {
}
}
searcher.close();
reader.close();
directory.close();
}

View File

@ -98,7 +98,8 @@ public class TestSearchForDuplicates extends LuceneTestCase {
writer.close();
// try a search without OR
IndexSearcher searcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term(PRIORITY_FIELD, HIGH_PRIORITY));
out.println("Query: " + query.toString(PRIORITY_FIELD));
@ -117,7 +118,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
searcher.close();
// try a new search with OR
searcher = new IndexSearcher(directory, true);
searcher = new IndexSearcher(reader);
hits = null;
BooleanQuery booleanQuery = new BooleanQuery();
@ -130,6 +131,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
checkHits(hits, MAX_DOCS, searcher);
searcher.close();
reader.close();
directory.close();
}

View File

@ -281,8 +281,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
//Query query = parser.parse("handle:1");
Directory dir = newFSDirectory(indexDir);
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = searcher.getIndexReader();
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
_TestUtil.checkIndex(dir);
@ -336,6 +336,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
assertEquals(34, hits.length);
searcher.close();
reader.close();
dir.close();
}
@ -366,15 +367,17 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
doTestHits(hits, 44, searcher.getIndexReader());
searcher.close();
reader.close();
// make sure we can do delete & setNorm against this segment:
IndexReader reader = IndexReader.open(dir, false);
reader = IndexReader.open(dir, false);
searcher = newSearcher(reader);
Term searchTerm = new Term("id", "6");
int delCount = reader.deleteDocuments(searchTerm);
@ -385,26 +388,30 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
searcher.close();
// make sure they "took":
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir, true);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 43, searcher.getIndexReader());
searcher.close();
reader.close();
// fully merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.forceMerge(1);
writer.close();
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
d = searcher.doc(hits[0].doc);
doTestHits(hits, 43, searcher.getIndexReader());
assertEquals("wrong first document", "22", d.get("id"));
searcher.close();
reader.close();
dir.close();
}
@ -414,15 +421,17 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory dir = newFSDirectory(oldIndexDir);
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
Document d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
searcher.close();
reader.close();
// make sure we can do a delete & setNorm against this segment:
IndexReader reader = IndexReader.open(dir, false);
reader = IndexReader.open(dir, false);
Term searchTerm = new Term("id", "6");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("wrong delete count", 1, delCount);
@ -431,26 +440,30 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
reader.close();
// make sure they "took":
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 33, searcher.getIndexReader());
searcher.close();
reader.close();
// fully merge
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.forceMerge(1);
writer.close();
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 33, searcher.getIndexReader());
searcher.close();
reader.close();
dir.close();
}
@ -686,7 +699,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
File oldIndexDir = _TestUtil.getTempDir(oldNames[i]);
_TestUtil.unzip(getDataFile("index." + oldNames[i] + ".zip"), oldIndexDir);
Directory dir = newFSDirectory(oldIndexDir);
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
for (int id=10; id<15; id++) {
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
@ -719,6 +733,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
}
searcher.close();
reader.close();
dir.close();
_TestUtil.rmDir(oldIndexDir);
}

View File

@ -683,7 +683,8 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(2*(N+1)+1, policy.numOnInit);
assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader rwReader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(rwReader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(176, hits.length);
@ -694,6 +695,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
int expectedCount = 176;
searcher.close();
rwReader.close();
for(int i=0;i<N+1;i++) {
if (VERBOSE) {
System.out.println("TEST: i=" + i);
@ -801,7 +803,8 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(3*(N+1), policy.numOnInit);
assertEquals(3*(N+1)+1, policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader rwReader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(rwReader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
@ -812,6 +815,9 @@ public class TestDeletionPolicy extends LuceneTestCase {
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
int expectedCount = 0;
searcher.close();
rwReader.close();
for(int i=0;i<N+1;i++) {
try {
IndexReader reader = IndexReader.open(dir, true);

View File

@ -452,10 +452,12 @@ public class TestIndexWriter extends LuceneTestCase {
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader reader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
reader.close();
dir.close();
}
@ -482,10 +484,12 @@ public class TestIndexWriter extends LuceneTestCase {
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader reader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10));
@ -503,12 +507,14 @@ public class TestIndexWriter extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir, false);
reader = IndexReader.open(dir, false);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
reader.close();
IndexReader reader = IndexReader.open(dir, true);
reader = IndexReader.open(dir, true);
reader.close();
dir.close();
@ -578,15 +584,16 @@ public class TestIndexWriter extends LuceneTestCase {
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader reader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
searcher.close();
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE));
writer.close();
searcher.close();
dir.close();
}
@ -985,7 +992,8 @@ public class TestIndexWriter extends LuceneTestCase {
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir, false);
IndexReader r = IndexReader.open(dir, false);
IndexSearcher s = new IndexSearcher(r);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
@ -1008,6 +1016,7 @@ public class TestIndexWriter extends LuceneTestCase {
w.close();
s.close();
r.close();
dir.close();
}

View File

@ -51,22 +51,26 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader reader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
reader.close();
IndexReader reader = IndexReader.open(dir, true);
reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
TestIndexWriter.addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
IndexReader r = IndexReader.open(dir, false);
searcher = new IndexSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
r.close();
assertTrue("reader should have still been current", reader.isCurrent());
}
@ -74,10 +78,12 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir, false);
IndexReader r = IndexReader.open(dir, false);
searcher = new IndexSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
r.close();
reader.close();
dir.close();
}
@ -99,10 +105,12 @@ public class TestIndexWriterCommit extends LuceneTestCase {
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir, false);
IndexReader reader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
@ -112,20 +120,24 @@ public class TestIndexWriterCommit extends LuceneTestCase {
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir, false);
reader = IndexReader.open(dir, false);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
reader.close();
// Now, close the writer:
writer.rollback();
TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
searcher = new IndexSearcher(dir, false);
reader = IndexReader.open(dir, false);
searcher = new IndexSearcher(reader);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
reader.close();
// Now make sure we can re-open the index, add docs,
// and all is good:
@ -140,17 +152,21 @@ public class TestIndexWriterCommit extends LuceneTestCase {
for(int j=0;j<17;j++) {
TestIndexWriter.addDoc(writer);
}
searcher = new IndexSearcher(dir, false);
IndexReader r = IndexReader.open(dir, false);
searcher = new IndexSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
searcher.close();
r.close();
}
writer.close();
searcher = new IndexSearcher(dir, false);
IndexReader r = IndexReader.open(dir, false);
searcher = new IndexSearcher(r);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
r.close();
dir.close();
}

View File

@ -400,9 +400,11 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
private int getHitCount(Directory dir, Term term) throws IOException {
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
int hitCount = searcher.search(new TermQuery(term), null, 1000).totalHits;
searcher.close();
reader.close();
return hitCount;
}

View File

@ -286,7 +286,8 @@ public class TestOmitTf extends LuceneTestCase {
/*
* Verify the index
*/
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarityProvider(new SimpleSimilarityProvider());
Term a = new Term("noTf", term);
@ -401,6 +402,7 @@ public class TestOmitTf extends LuceneTestCase {
assertEquals(15, CountingHitCollector.getCount());
searcher.close();
reader.close();
dir.close();
}

View File

@ -102,8 +102,12 @@ public class TestStressIndexing extends LuceneTestCase {
@Override
public void doWork() throws Throwable {
for (int i=0; i<100; i++)
(new IndexSearcher(directory, true)).close();
for (int i=0; i<100; i++) {
IndexReader ir = IndexReader.open(directory, true);
IndexSearcher is = new IndexSearcher(ir);
is.close();
ir.close();
}
count += 100;
}
}

View File

@ -45,6 +45,7 @@ public class TestBoolean2 extends LuceneTestCase {
private static IndexSearcher searcher;
private static IndexSearcher bigSearcher;
private static IndexReader reader;
private static IndexReader littleReader;
private static int NUM_EXTRA_DOCS = 6000;
public static final String field = "field";
@ -62,7 +63,8 @@ public class TestBoolean2 extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory, true);
littleReader = IndexReader.open(directory);
searcher = new IndexSearcher(littleReader);
// Make big index
dir2 = new MockDirectoryWrapper(random, new RAMDirectory(directory, IOContext.DEFAULT));
@ -101,11 +103,13 @@ public class TestBoolean2 extends LuceneTestCase {
public static void afterClass() throws Exception {
searcher.close();
reader.close();
littleReader.close();
dir2.close();
directory.close();
bigSearcher.close();
searcher = null;
reader = null;
littleReader = null;
dir2 = null;
directory = null;
bigSearcher = null;

View File

@ -25,6 +25,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@ -77,10 +78,12 @@ public class TestFilteredSearch extends LuceneTestCase {
booleanQuery.add(new TermQuery(new Term(FIELD, "36")), BooleanClause.Occur.SHOULD);
IndexSearcher indexSearcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
IndexSearcher indexSearcher = new IndexSearcher(reader);
ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
assertEquals("Number of matched documents", 1, hits.length);
indexSearcher.close();
reader.close();
}
catch (IOException e) {
fail(e.getMessage());

View File

@ -288,7 +288,8 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
IndexSearcher s = new IndexSearcher(dir);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
Query q=NumericRangeQuery.newIntRange("int", null, null, true, true);
TopDocs topDocs = s.search(q, 10);
@ -315,6 +316,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
s.close();
r.close();
dir.close();
}

View File

@ -308,7 +308,8 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
IndexSearcher s = new IndexSearcher(dir);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
Query q=NumericRangeQuery.newLongRange("long", null, null, true, true);
TopDocs topDocs = s.search(q, 10);
@ -335,6 +336,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
s.close();
r.close();
dir.close();
}

View File

@ -7,6 +7,7 @@ import org.apache.lucene.util.LuceneTestCase;
import java.util.BitSet;
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@ -39,8 +40,10 @@ public class TestScorerPerf extends LuceneTestCase {
BitSet[] sets;
Term[] terms;
IndexSearcher s;
IndexReader r;
Directory d;
// TODO: this should be setUp()....
public void createDummySearcher() throws Exception {
// Create a dummy index with nothing in it.
// This could possibly fail if Lucene starts checking for docid ranges...
@ -48,7 +51,8 @@ public class TestScorerPerf extends LuceneTestCase {
IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
iw.addDocument(new Document());
iw.close();
s = new IndexSearcher(d, true);
r = IndexReader.open(d);
s = new IndexSearcher(r);
}
public void createRandomTerms(int nDocs, int nTerms, double power, Directory dir) throws Exception {
@ -320,6 +324,7 @@ public class TestScorerPerf extends LuceneTestCase {
doConjunctions(atLeast(10000), atLeast(5));
doNestedConjunctions(atLeast(10000), atLeast(3), atLeast(3));
s.close();
r.close();
d.close();
}

View File

@ -61,7 +61,8 @@ public class TestSetNorm extends LuceneTestCase {
// check that searches are ordered by this boost
final float[] scores = new float[4];
IndexSearcher is = new IndexSearcher(store, true);
IndexReader ir = IndexReader.open(store);
IndexSearcher is = new IndexSearcher(ir);
is.search
(new TermQuery(new Term("field", "word")),
new Collector() {
@ -85,6 +86,7 @@ public class TestSetNorm extends LuceneTestCase {
}
});
is.close();
ir.close();
float lastScore = 0.0f;
for (int i = 0; i < 4; i++) {

View File

@ -258,7 +258,8 @@ public class TestSort extends LuceneTestCase {
//writer.forceMerge(1);
//System.out.println(writer.getSegmentCount());
writer.close();
return new IndexSearcher (indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
return new IndexSearcher (reader);
}
public String getRandomNumberString(int num, int low, int high) {
@ -557,6 +558,7 @@ public class TestSort extends LuceneTestCase {
System.out.println("topn field1(field2)(docID):\n" + buff);
}
assertFalse("Found sort results out of order", fail);
searcher.getIndexReader().close();
searcher.close();
}
@ -1303,9 +1305,11 @@ public class TestSort extends LuceneTestCase {
new SortField("string", SortField.Type.STRING),
SortField.FIELD_DOC );
// this should not throw AIOOBE or RuntimeEx
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
searcher.search(new MatchAllDocsQuery(), null, 500, sort);
searcher.close();
reader.close();
indexStore.close();
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Terms;
@ -54,49 +55,62 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testExclusive() throws Exception {
Query query = TermRangeQuery.newStringRange("content", "A", "C", false, false);
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 1, hits.length);
searcher.close();
reader.close();
initializeIndex(new String[] {"A", "B", "D"});
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D, only B in range", 1, hits.length);
searcher.close();
reader.close();
addDoc("C");
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added, still only B in range", 1, hits.length);
searcher.close();
reader.close();
}
public void testInclusive() throws Exception {
Query query = TermRangeQuery.newStringRange("content", "A", "C", true, true);
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
searcher.close();
reader.close();
initializeIndex(new String[]{"A", "B", "D"});
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D - A and B in range", 2, hits.length);
searcher.close();
reader.close();
addDoc("C");
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added - A, B, C in range", 3, hits.length);
searcher.close();
reader.close();
}
public void testAllDocs() throws Exception {
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
TermRangeQuery query = new TermRangeQuery("content", null, null, true, true);
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "content");
assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
@ -112,6 +126,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
assertTrue(query.getTermsEnum(terms) instanceof TermRangeTermsEnum);
assertEquals(3, searcher.search(query, null, 1000).scoreDocs.length);
searcher.close();
reader.close();
}
/** This test should not be here, but it tests the fuzzy query rewrite mode (TOP_TERMS_SCORING_BOOLEAN_REWRITE)
@ -119,7 +134,8 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testTopTermsRewrite() throws Exception {
initializeIndex(new String[]{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"});
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
TermRangeQuery query = TermRangeQuery.newStringRange("content", "B", "J", true, true);
checkBooleanTerms(searcher, query, "B", "C", "D", "E", "F", "G", "H", "I", "J");
@ -131,6 +147,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
BooleanQuery.setMaxClauseCount(savedClauseCount);
}
searcher.close();
reader.close();
}
private void checkBooleanTerms(IndexSearcher searcher, TermRangeQuery query, String... terms) throws IOException {
@ -267,7 +284,8 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = TermRangeQuery.newStringRange("content", null, "C",
false, false);
initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer);
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
int numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 3, numHits);
@ -275,22 +293,27 @@ public class TestTermRangeQuery extends LuceneTestCase {
//assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 2, hits.length());
searcher.close();
reader.close();
initializeIndex(new String[] {"A", "B", "", "D"}, analyzer);
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 3, numHits);
// until Lucene-38 is fixed, use this assert:
//assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 2, hits.length());
searcher.close();
reader.close();
addDoc("C");
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added, still A, B & <empty string> are in range", 3, numHits);
// until Lucene-38 is fixed, use this assert
//assertEquals("C added, still A, B & <empty string> are in range", 2, hits.length());
searcher.close();
reader.close();
}
// LUCENE-38
@ -299,28 +322,34 @@ public class TestTermRangeQuery extends LuceneTestCase {
Analyzer analyzer = new SingleCharAnalyzer();
Query query = TermRangeQuery.newStringRange("content", null, "C", true, true);
initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer);
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
int numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 4, numHits);
// until Lucene-38 is fixed, use this assert
//assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 3, hits.length());
searcher.close();
reader.close();
initializeIndex(new String[]{"A", "B", "", "D"}, analyzer);
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D - A, B and <empty string> in range", 3, numHits);
// until Lucene-38 is fixed, use this assert
//assertEquals("A,B,<empty string>,D => A, B and <empty string> in range", 2, hits.length());
searcher.close();
reader.close();
addDoc("C");
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
numHits = searcher.search(query, null, 1000).totalHits;
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added => A,B,<empty string>,C in range", 4, numHits);
// until Lucene-38 is fixed, use this assert
//assertEquals("C added => A,B,<empty string>,C in range", 3, hits.length());
searcher.close();
reader.close();
}
}

View File

@ -22,6 +22,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@ -67,7 +68,8 @@ public class TestWildcard
*/
public void testTermWithoutWildcard() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", "nowildcard"));
assertMatches(searcher, wq, 1);
@ -96,6 +98,7 @@ public class TestWildcard
assertTrue(q instanceof ConstantScoreQuery);
assertEquals(q.getBoost(), wq.getBoost(), 0.1);
searcher.close();
reader.close();
indexStore.close();
}
@ -104,7 +107,8 @@ public class TestWildcard
*/
public void testEmptyTerm() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", ""));
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
@ -113,6 +117,7 @@ public class TestWildcard
assertTrue(q instanceof BooleanQuery);
assertEquals(0, ((BooleanQuery) q).clauses().size());
searcher.close();
reader.close();
indexStore.close();
}
@ -123,7 +128,8 @@ public class TestWildcard
*/
public void testPrefixTerm() throws IOException {
Directory indexStore = getIndexStore("field", new String[]{"prefix", "prefixx"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*"));
assertMatches(searcher, wq, 2);
@ -135,6 +141,7 @@ public class TestWildcard
assertFalse(wq.getTermsEnum(terms) instanceof PrefixTermsEnum);
assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum"));
searcher.close();
reader.close();
indexStore.close();
}
@ -145,7 +152,8 @@ public class TestWildcard
throws IOException {
Directory indexStore = getIndexStore("body", new String[]
{"metal", "metals"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
Query query1 = new TermQuery(new Term("body", "metal"));
Query query2 = new WildcardQuery(new Term("body", "metal*"));
Query query3 = new WildcardQuery(new Term("body", "m*tal"));
@ -174,6 +182,7 @@ public class TestWildcard
assertMatches(searcher, new WildcardQuery(new Term("body", "*tal")), 1);
assertMatches(searcher, new WildcardQuery(new Term("body", "*tal*")), 2);
searcher.close();
reader.close();
indexStore.close();
}
@ -186,7 +195,8 @@ public class TestWildcard
throws IOException {
Directory indexStore = getIndexStore("body", new String[]
{"metal", "metals", "mXtals", "mXtXls"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
Query query1 = new WildcardQuery(new Term("body", "m?tal"));
Query query2 = new WildcardQuery(new Term("body", "metal?"));
Query query3 = new WildcardQuery(new Term("body", "metals?"));
@ -201,6 +211,7 @@ public class TestWildcard
assertMatches(searcher, query5, 0);
assertMatches(searcher, query6, 1); // Query: 'meta??' matches 'metals' not 'metal'
searcher.close();
reader.close();
indexStore.close();
}
@ -210,7 +221,8 @@ public class TestWildcard
public void testEscapes() throws Exception {
Directory indexStore = getIndexStore("field",
new String[]{"foo*bar", "foo??bar", "fooCDbar", "fooSOMETHINGbar", "foo\\"});
IndexSearcher searcher = new IndexSearcher(indexStore, true);
IndexReader reader = IndexReader.open(indexStore);
IndexSearcher searcher = new IndexSearcher(reader);
// without escape: matches foo??bar, fooCDbar, foo*bar, and fooSOMETHINGbar
WildcardQuery unescaped = new WildcardQuery(new Term("field", "foo*bar"));
@ -233,6 +245,7 @@ public class TestWildcard
assertMatches(searcher, atEnd, 1);
searcher.close();
reader.close();
indexStore.close();
}
@ -347,7 +360,8 @@ public class TestWildcard
}
iw.close();
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
// test queries that must find all
for (Query q : matchAll) {
@ -386,6 +400,7 @@ public class TestWildcard
}
searcher.close();
reader.close();
dir.close();
}
}

View File

@ -232,7 +232,8 @@ public class TestPayloadTermQuery extends LuceneTestCase {
PayloadTermQuery query = new PayloadTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy"),
new MaxPayloadFunction(), false);
IndexSearcher theSearcher = new IndexSearcher(directory, true);
IndexReader reader = IndexReader.open(directory);
IndexSearcher theSearcher = new IndexSearcher(reader);
theSearcher.setSimilarityProvider(new DefaultSimilarityProvider() {
@Override
public Similarity get(String field) {
@ -271,6 +272,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
count++;
}
theSearcher.close();
reader.close();
}
public void testNoMatch() throws Exception {

View File

@ -26,6 +26,7 @@ import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
@ -339,11 +340,13 @@ public class TestLockFactory extends LuceneTestCase {
}
@Override
public void run() {
IndexReader reader = null;
IndexSearcher searcher = null;
Query query = new TermQuery(new Term("content", "aaa"));
for(int i=0;i<this.numIteration;i++) {
try{
searcher = new IndexSearcher(dir, false);
reader = IndexReader.open(dir, false);
searcher = new IndexSearcher(reader);
} catch (Exception e) {
hitException = true;
System.out.println("Stress Test Index Searcher: create hit unexpected exception: " + e.toString());
@ -361,6 +364,7 @@ public class TestLockFactory extends LuceneTestCase {
// System.out.println(hits.length() + " total results");
try {
searcher.close();
reader.close();
} catch (IOException e) {
hitException = true;
System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());

View File

@ -25,6 +25,7 @@ import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@ -78,7 +79,8 @@ public class TestWindowsMMap extends LuceneTestCase {
TEST_VERSION_CURRENT, analyzer)
.setOpenMode(OpenMode.CREATE));
writer.commit();
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
int num = atLeast(1000);
for(int dx = 0; dx < num; dx ++) {
@ -89,6 +91,7 @@ public class TestWindowsMMap extends LuceneTestCase {
}
searcher.close();
reader.close();
writer.close();
rmDir(dirPath);
}

View File

@ -32,18 +32,20 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
private RAMDirectory directory;
private Directory directory;
private IndexSearcher searcher;
private IndexReader reader;
@Override
public void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
@ -54,7 +56,16 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.close();
searcher = new IndexSearcher(directory, true);
reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
}
@Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
directory.close();
super.tearDown();
}
/*

View File

@ -29,19 +29,21 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
/**
* A test class for ShingleAnalyzerWrapper as regards queries and scoring.
*/
public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
public IndexSearcher searcher;
private Analyzer analyzer;
private IndexSearcher searcher;
private IndexReader reader;
private Directory directory;
/**
* Set up a new index in RAM with three test phrases and the supplied Analyzer.
@ -50,9 +52,12 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
* @return an indexSearcher on the test index.
* @throws Exception if an error occurs with index writer or searcher
*/
public IndexSearcher setUpSearcher(Analyzer analyzer) throws Exception {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
@Override
public void setUp() throws Exception {
super.setUp();
analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2);
directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc;
doc = new Document();
@ -69,7 +74,16 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
writer.close();
return new IndexSearcher(dir, true);
reader = IndexReader.open(directory);
searcher = new IndexSearcher(reader);
}
@Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
directory.close();
super.tearDown();
}
protected void compareRanks(ScoreDoc[] hits, int[] ranks) throws Exception {
@ -83,9 +97,6 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
Analyzer analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2);
searcher = setUpSearcher(analyzer);
PhraseQuery q = new PhraseQuery();
TokenStream ts = analyzer.tokenStream("content", new StringReader("this sentence"));
@ -112,9 +123,6 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase {
* in the right order and adjacent to each other.
*/
public void testShingleAnalyzerWrapperBooleanQuery() throws Exception {
Analyzer analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2);
searcher = setUpSearcher(analyzer);
BooleanQuery q = new BooleanQuery();
TokenStream ts = analyzer.tokenStream("content", new StringReader("test sentence"));

View File

@ -20,6 +20,7 @@ package org.apache.lucene.benchmark.quality.trec;
import org.apache.lucene.benchmark.quality.utils.SimpleQQParser;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.benchmark.quality.*;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.FSDirectory;
@ -53,7 +54,8 @@ public class QueryDriver {
SubmissionReport submitLog = new SubmissionReport(new PrintWriter(args[2]), "lucene");
FSDirectory dir = FSDirectory.open(new File(args[3]));
String fieldSpec = args.length == 5 ? args[4] : "T"; // default to Title-only if not specified.
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
int maxResults = 1000;
String docNameField = "docname";
@ -86,5 +88,8 @@ public class QueryDriver {
// print an avarage sum of the results
QualityStats avg = QualityStats.average(stats);
avg.log("SUMMARY", 2, logger, " ");
searcher.close();
reader.close();
dir.close();
}
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.benchmark.byTask.tasks.CreateIndexTask;
import org.apache.lucene.benchmark.byTask.tasks.TaskSequence;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
@ -87,10 +88,12 @@ public class DocMakerTest extends BenchmarkTestCase {
tasks.addTask(new CloseIndexTask(runData));
tasks.doLogic();
IndexSearcher searcher = new IndexSearcher(runData.getDirectory(), true);
IndexReader reader = IndexReader.open(runData.getDirectory());
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs td = searcher.search(new TermQuery(new Term("key", "value")), 10);
assertEquals(numExpectedResults, td.totalHits);
searcher.close();
reader.close();
}
private Document createTestNormsDocument(boolean setNormsProp,

View File

@ -38,6 +38,7 @@ import org.apache.lucene.benchmark.byTask.tasks.CreateIndexTask;
import org.apache.lucene.benchmark.byTask.tasks.TaskSequence;
import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
@ -140,7 +141,8 @@ public class LineDocSourceTest extends BenchmarkTestCase {
tasks.addTask(new CloseIndexTask(runData));
tasks.doLogic();
IndexSearcher searcher = new IndexSearcher(runData.getDirectory(), true);
IndexReader reader = IndexReader.open(runData.getDirectory());
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs td = searcher.search(new TermQuery(new Term("body", "body")), 10);
assertEquals(numAdds, td.totalHits);
assertNotNull(td.scoreDocs[0]);
@ -151,6 +153,7 @@ public class LineDocSourceTest extends BenchmarkTestCase {
assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
searcher.close();
reader.close();
}
/* Tests LineDocSource with a bzip2 input stream. */

View File

@ -22,6 +22,7 @@ import org.apache.lucene.benchmark.quality.trec.TrecJudge;
import org.apache.lucene.benchmark.quality.trec.TrecTopicsReader;
import org.apache.lucene.benchmark.quality.utils.SimpleQQParser;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
@ -68,7 +69,8 @@ public class TestQualityRun extends BenchmarkTestCase {
judge.validateData(qqs, logger);
Directory dir = newFSDirectory(new File(getWorkDir(),"index"));
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
QualityQueryParser qqParser = new SimpleQQParser("title","body");
QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
@ -134,6 +136,7 @@ public class TestQualityRun extends BenchmarkTestCase {
}
searcher.close();
reader.close();
dir.close();
}

View File

@ -29,6 +29,7 @@ import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
@ -198,7 +199,8 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
final Query q = new CustomExternalQuery(q1);
log(q);
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
TopDocs hits = s.search(q, 1000);
assertEquals(N_DOCS, hits.totalHits);
for(int i=0;i<N_DOCS;i++) {
@ -207,11 +209,13 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
assertEquals("doc=" + doc, (float) 1+(4*doc) % N_DOCS, score, 0.0001);
}
s.close();
r.close();
}
@Test
public void testRewrite() throws Exception {
final IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
final IndexSearcher s = new IndexSearcher(r);
Query q = new TermQuery(new Term(TEXT_FIELD, "first"));
CustomScoreQuery original = new CustomScoreQuery(q);
@ -229,13 +233,15 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
assertEquals(s.search(q,1).totalHits, s.search(rewritten,1).totalHits);
s.close();
r.close();
}
// Test that FieldScoreQuery returns docs with expected score.
private void doTestCustomScore(ValueSource valueSource, double dboost) throws Exception {
float boost = (float) dboost;
FunctionQuery functionQuery = new FunctionQuery(valueSource);
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
// regular (boolean) query.
BooleanQuery q1 = new BooleanQuery();
@ -285,6 +291,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
h1, h2CustomNeutral, h3CustomMul, h4CustomAdd, h5CustomMulAdd,
q1, q2CustomNeutral, q3CustomMul, q4CustomAdd, q5CustomMulAdd);
s.close();
r.close();
}
// verify results are as expected.

View File

@ -17,6 +17,7 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
@ -80,7 +81,8 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
// Test that FieldScoreQuery returns docs in expected order.
private void doTestRank (ValueSource valueSource) throws Exception {
FunctionQuery functionQuery = new FunctionQuery(valueSource);
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
log("test: "+ functionQuery);
QueryUtils.check(random, functionQuery,s);
ScoreDoc[] h = s.search(functionQuery, null, 1000).scoreDocs;
@ -94,6 +96,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
prevID = resID;
}
s.close();
r.close();
}
/** Test that FieldScoreQuery of Type.BYTE returns the expected scores. */
@ -128,7 +131,8 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
// Test that FieldScoreQuery returns docs with expected score.
private void doTestExactScore (ValueSource valueSource) throws Exception {
FunctionQuery functionQuery = new FunctionQuery(valueSource);
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
TopDocs td = s.search(functionQuery,null,1000);
assertEquals("All docs should be matched!",N_DOCS,td.totalHits);
ScoreDoc sd[] = td.scoreDocs;
@ -140,6 +144,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA);
}
s.close();
r.close();
}
}

View File

@ -18,6 +18,7 @@ package org.apache.lucene.queries.function;
*/
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.queries.function.valuesource.OrdFieldSource;
import org.apache.lucene.queries.function.valuesource.ReverseOrdFieldSource;
@ -62,7 +63,8 @@ public class TestOrdValues extends FunctionTestSetup {
// Test that queries based on reverse/ordFieldScore scores correctly
private void doTestRank(String field, boolean inOrder) throws CorruptIndexException, Exception {
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
ValueSource vs;
if (inOrder) {
vs = new OrdFieldSource(field);
@ -91,6 +93,7 @@ public class TestOrdValues extends FunctionTestSetup {
prevID = resID;
}
s.close();
r.close();
}
/**
@ -112,7 +115,8 @@ public class TestOrdValues extends FunctionTestSetup {
// Test that queries based on reverse/ordFieldScore returns docs with expected score.
private void doTestExactScore(String field, boolean inOrder) throws CorruptIndexException, Exception {
IndexSearcher s = new IndexSearcher(dir, true);
IndexReader r = IndexReader.open(dir);
IndexSearcher s = new IndexSearcher(r);
ValueSource vs;
if (inOrder) {
vs = new OrdFieldSource(field);
@ -136,6 +140,7 @@ public class TestOrdValues extends FunctionTestSetup {
assertTrue("id of result " + i + " shuould be " + expectedId + " != " + score, expectedId.equals(id));
}
s.close();
r.close();
}
// LUCENE-1250

View File

@ -25,6 +25,7 @@ import java.util.Map;
import org.apache.lucene.analysis.*;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
@ -290,10 +291,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
new MultiFieldQueryParser(TEST_VERSION_CURRENT, new String[] {"body"}, analyzer);
mfqp.setDefaultOperator(QueryParser.Operator.AND);
Query q = mfqp.parse("the footest");
IndexSearcher is = new IndexSearcher(ramDir, true);
IndexReader ir = IndexReader.open(ramDir);
IndexSearcher is = new IndexSearcher(ir);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
is.close();
ir.close();
ramDir.close();
}

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
@ -42,6 +43,7 @@ public class TestComplexPhraseQuery extends LuceneTestCase {
new DocData("jackson waits tom", "4") };
private IndexSearcher searcher;
private IndexReader reader;
String defaultFieldName = "name";
@ -120,12 +122,14 @@ public class TestComplexPhraseQuery extends LuceneTestCase {
w.addDocument(doc);
}
w.close();
searcher = new IndexSearcher(rd, true);
reader = IndexReader.open(rd);
searcher = new IndexSearcher(reader);
}
@Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
rd.close();
super.tearDown();
}

View File

@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.lucene.analysis.*;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
@ -328,10 +329,12 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
mfqp.setAnalyzer(analyzer);
mfqp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND);
Query q = mfqp.parse("the footest", null);
IndexSearcher is = new IndexSearcher(ramDir, true);
IndexReader ir = IndexReader.open(ramDir);
IndexSearcher is = new IndexSearcher(ir);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
is.close();
ir.close();
ramDir.close();
}

View File

@ -19,6 +19,7 @@ package org.apache.lucene.queryparser.surround.query;
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Collector;
@ -121,11 +122,13 @@ public class BooleanQueryTst {
/* if (verbose) System.out.println("Lucene: " + query.toString()); */
TestCollector tc = new TestCollector();
IndexSearcher searcher = new IndexSearcher(dBase.getDb(), true);
IndexReader reader = IndexReader.open(dBase.getDb());
IndexSearcher searcher = new IndexSearcher(reader);
try {
searcher.search(query, tc);
} finally {
searcher.close();
reader.close();
}
tc.checkNrHits();
}

View File

@ -20,6 +20,7 @@ package org.apache.lucene.queryparser.xml;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@ -46,6 +47,7 @@ public class TestQueryTemplateManager extends LuceneTestCase {
private CoreParser builder;
private final Analyzer analyzer = new MockAnalyzer(random);
private IndexSearcher searcher;
private IndexReader reader;
private Directory dir;
//A collection of documents' field values for use in our tests
@ -147,7 +149,8 @@ public class TestQueryTemplateManager extends LuceneTestCase {
}
w.forceMerge(1);
w.close();
searcher = new IndexSearcher(dir, true);
reader = IndexReader.open(dir);
searcher = new IndexSearcher(reader);
//initialize the parser
builder = new CorePlusExtensionsParser("artist", analyzer);
@ -157,6 +160,7 @@ public class TestQueryTemplateManager extends LuceneTestCase {
@Override
public void tearDown() throws Exception {
searcher.close();
reader.close();
dir.close();
super.tearDown();
}

View File

@ -638,6 +638,7 @@ public class SpellChecker implements java.io.Closeable {
ensureOpen();
closed = true;
if (searcher != null) {
searcher.getIndexReader().close();
searcher.close();
}
searcher = null;
@ -653,10 +654,12 @@ public class SpellChecker implements java.io.Closeable {
final IndexSearcher indexSearcher = createSearcher(dir);
synchronized (searcherLock) {
if(closed){
indexSearcher.getIndexReader().close();
indexSearcher.close();
throw new AlreadyClosedException("Spellchecker has been closed");
}
if (searcher != null) {
searcher.getIndexReader().close();
searcher.close();
}
// set the spellindex in the sync block - ensure consistency.
@ -673,7 +676,7 @@ public class SpellChecker implements java.io.Closeable {
*/
// for testing purposes
IndexSearcher createSearcher(final Directory dir) throws IOException{
return new IndexSearcher(dir, true);
return new IndexSearcher(IndexReader.open(dir));
}
/**

View File

@ -27,6 +27,7 @@ import java.io.Writer;
import java.net.URL;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TopDocs;
@ -829,11 +830,13 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
assertEquals(1, files.length);
File snapDir = files[0];
Directory dir = new SimpleFSDirectory(snapDir.getAbsoluteFile());
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
assertEquals(nDocs, hits.totalHits);
searcher.close();
reader.close();
dir.close();
AbstractSolrTestCase.recurseDelete(snapDir); // clean up the snap dir
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.*;
@ -193,9 +194,10 @@ public class TestSort extends SolrTestCaseJ4 {
iw.close();
IndexSearcher searcher = new IndexSearcher(dir, true);
IndexReader reader = IndexReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader);
// System.out.println("segments="+searcher.getIndexReader().getSequentialSubReaders().length);
assertTrue(searcher.getIndexReader().getSequentialSubReaders().length > 1);
assertTrue(reader.getSequentialSubReaders().length > 1);
for (int i=0; i<qiter; i++) {
Filter filt = new Filter() {
@ -301,6 +303,7 @@ public class TestSort extends SolrTestCaseJ4 {
}
}
searcher.close();
reader.close();
}
dir.close();