mirror of https://github.com/apache/lucene.git
LUCENE-9376: Fix or suppress 20 resource leak precommit warnings in lucene/search
This commit is contained in:
parent
9c066f60f1
commit
21b08d5cab
|
@ -286,6 +286,9 @@ Build
|
|||
|
||||
* Upgrade forbiddenapis to version 3.0. (Uwe Schindler)
|
||||
|
||||
* LUCENE-9376: Fix or suppress 20 resource leak precommit warnings in lucene/search
|
||||
(Andras Salamon via Erick Erickson)
|
||||
|
||||
======================= Lucene 8.5.1 =======================
|
||||
|
||||
Bug Fixes
|
||||
|
|
|
@ -411,7 +411,6 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
|
||||
public void testGiga() throws Exception {
|
||||
|
||||
MockAnalyzer analyzer = new MockAnalyzer(random());
|
||||
Directory index = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), index);
|
||||
|
||||
|
@ -443,6 +442,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
assertEquals(1, hits.length);
|
||||
assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field"));
|
||||
r.close();
|
||||
w.close();
|
||||
index.close();
|
||||
}
|
||||
|
||||
|
@ -561,6 +561,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
}
|
||||
DirectoryReader r = w.getReader();
|
||||
w.close();
|
||||
//System.out.println("TEST: reader=" + r);
|
||||
IndexSearcher s = newSearcher(r);
|
||||
int iters = atLeast(200);
|
||||
|
@ -638,7 +639,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
IOUtils.close(r, w, dir);
|
||||
IOUtils.close(r, dir);
|
||||
}
|
||||
|
||||
private static class TermAndScore implements Comparable<TermAndScore> {
|
||||
|
|
|
@ -181,14 +181,17 @@ public class TestLRUQueryCache extends LuceneTestCase {
|
|||
thread.join();
|
||||
}
|
||||
|
||||
if (error.get() != null) {
|
||||
throw error.get();
|
||||
try {
|
||||
if (error.get() != null) {
|
||||
throw error.get();
|
||||
}
|
||||
queryCache.assertConsistent();
|
||||
} finally {
|
||||
mgr.close();
|
||||
w.close();
|
||||
dir.close();
|
||||
queryCache.assertConsistent();
|
||||
}
|
||||
queryCache.assertConsistent();
|
||||
mgr.close();
|
||||
w.close();
|
||||
dir.close();
|
||||
queryCache.assertConsistent();
|
||||
}
|
||||
|
||||
public void testLRUEviction() throws Exception {
|
||||
|
|
|
@ -119,6 +119,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase {
|
|||
thread.join();
|
||||
}
|
||||
}
|
||||
docs.close();
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -310,6 +310,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
|
|||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
new MockAnalyzer(random())).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
@SuppressWarnings("resource")
|
||||
SearcherManager sm = new SearcherManager(writer, false, false, new SearcherFactory());
|
||||
writer.addDocument(new Document());
|
||||
writer.commit();
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.CompositeReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FilterDirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
|
@ -47,9 +48,13 @@ public class TestTermQuery extends LuceneTestCase {
|
|||
QueryUtils.checkUnequal(
|
||||
new TermQuery(new Term("foo", "bar")),
|
||||
new TermQuery(new Term("foo", "baz")));
|
||||
final CompositeReaderContext context;
|
||||
try (MultiReader multiReader = new MultiReader()) {
|
||||
context = multiReader.getContext();
|
||||
}
|
||||
QueryUtils.checkEqual(
|
||||
new TermQuery(new Term("foo", "bar")),
|
||||
new TermQuery(new Term("foo", "bar"), TermStates.build(new MultiReader().getContext(), new Term("foo", "bar"), true)));
|
||||
new TermQuery(new Term("foo", "bar"), TermStates.build(context, new Term("foo", "bar"), true)));
|
||||
}
|
||||
|
||||
public void testCreateWeightDoesNotSeekIfScoresAreNotNeeded() throws IOException {
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
|
||||
/**
|
||||
|
@ -643,6 +644,7 @@ public class UnifiedHighlighter {
|
|||
|
||||
batchDocIdx += fieldValsByDoc.size();
|
||||
}
|
||||
IOUtils.close(indexReaderWithTermVecCache);
|
||||
assert docIdIter.docID() == DocIdSetIterator.NO_MORE_DOCS
|
||||
|| docIdIter.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
|
||||
|
||||
|
|
|
@ -1362,24 +1362,25 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
public void run() throws Exception {
|
||||
HashMap<String,String> synonyms = new HashMap<>();
|
||||
synonyms.put("football", "soccer,footie");
|
||||
Analyzer analyzer = new SynonymAnalyzer(synonyms);
|
||||
try (Analyzer analyzer = new SynonymAnalyzer(synonyms)) {
|
||||
|
||||
String s = "football-soccer in the euro 2004 footie competition";
|
||||
String s = "football-soccer in the euro 2004 footie competition";
|
||||
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
query.add(new TermQuery(new Term("bookid", "football")), Occur.SHOULD);
|
||||
query.add(new TermQuery(new Term("bookid", "soccer")), Occur.SHOULD);
|
||||
query.add(new TermQuery(new Term("bookid", "footie")), Occur.SHOULD);
|
||||
BooleanQuery.Builder query = new BooleanQuery.Builder();
|
||||
query.add(new TermQuery(new Term("bookid", "football")), Occur.SHOULD);
|
||||
query.add(new TermQuery(new Term("bookid", "soccer")), Occur.SHOULD);
|
||||
query.add(new TermQuery(new Term("bookid", "footie")), Occur.SHOULD);
|
||||
|
||||
Highlighter highlighter = getHighlighter(query.build(), null, HighlighterTest.this);
|
||||
Highlighter highlighter = getHighlighter(query.build(), null, HighlighterTest.this);
|
||||
|
||||
// Get 3 best fragments and separate with a "..."
|
||||
TokenStream tokenStream = analyzer.tokenStream(null, s);
|
||||
// Get 3 best fragments and separate with a "..."
|
||||
TokenStream tokenStream = analyzer.tokenStream(null, s);
|
||||
|
||||
String result = highlighter.getBestFragments(tokenStream, s, 3, "...");
|
||||
String expectedResult = "<B>football</B>-<B>soccer</B> in the euro 2004 <B>footie</B> competition";
|
||||
assertTrue("overlapping analyzer should handle highlights OK, expected:" + expectedResult
|
||||
+ " actual:" + result, expectedResult.equals(result));
|
||||
String result = highlighter.getBestFragments(tokenStream, s, 3, "...");
|
||||
String expectedResult = "<B>football</B>-<B>soccer</B> in the euro 2004 <B>footie</B> competition";
|
||||
assertTrue("overlapping analyzer should handle highlights OK, expected:" + expectedResult
|
||||
+ " actual:" + result, expectedResult.equals(result));
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -391,6 +391,7 @@ public class TokenSourcesTest extends BaseTokenStreamTestCase {
|
|||
if (startOffsets[i] == startOffsets[i-1]) {
|
||||
if (VERBOSE)
|
||||
System.out.println("Skipping test because can't easily validate random token-stream is correct.");
|
||||
rTokenStream.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -438,6 +439,7 @@ public class TokenSourcesTest extends BaseTokenStreamTestCase {
|
|||
|
||||
reader.close();
|
||||
dir.close();
|
||||
rTokenStream.close();
|
||||
}
|
||||
|
||||
public void testMaxStartOffsetConsistency() throws IOException {
|
||||
|
|
|
@ -105,17 +105,18 @@ public class HighlightCustomQueryTest extends LuceneTestCase {
|
|||
*/
|
||||
private String highlightField(Query query, String fieldName,
|
||||
String text) throws IOException, InvalidTokenOffsetsException {
|
||||
TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE,
|
||||
true, MockTokenFilter.ENGLISH_STOPSET).tokenStream(fieldName, text);
|
||||
// Assuming "<B>", "</B>" used to highlight
|
||||
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
|
||||
MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);
|
||||
Highlighter highlighter = new Highlighter(formatter, scorer);
|
||||
highlighter.setTextFragmenter(new SimpleFragmenter(Integer.MAX_VALUE));
|
||||
try (MockAnalyzer mockAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE,true,
|
||||
MockTokenFilter.ENGLISH_STOPSET); TokenStream tokenStream = mockAnalyzer.tokenStream(fieldName, text)) {
|
||||
// Assuming "<B>", "</B>" used to highlight
|
||||
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
|
||||
MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);
|
||||
Highlighter highlighter = new Highlighter(formatter, scorer);
|
||||
highlighter.setTextFragmenter(new SimpleFragmenter(Integer.MAX_VALUE));
|
||||
|
||||
String rv = highlighter.getBestFragments(tokenStream, text, 1,
|
||||
"(FIELD TEXT TRUNCATED)");
|
||||
return rv.length() == 0 ? text : rv;
|
||||
String rv = highlighter.getBestFragments(tokenStream, text, 1,
|
||||
"(FIELD TEXT TRUNCATED)");
|
||||
return rv.length() == 0 ? text : rv;
|
||||
}
|
||||
}
|
||||
|
||||
public static class MyWeightedSpanTermExtractor extends
|
||||
|
|
|
@ -444,6 +444,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
|
|||
}
|
||||
|
||||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
IndexSearcher s = newSearcher(r);
|
||||
|
||||
// Used to match ANY using MultiPhraseQuery:
|
||||
|
@ -561,7 +562,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
IOUtils.close(w, r, dir, analyzer);
|
||||
IOUtils.close(r, dir, analyzer);
|
||||
}
|
||||
|
||||
private Set<String> toDocIDs(IndexSearcher s, TopDocs hits) throws IOException {
|
||||
|
|
|
@ -192,6 +192,7 @@ public class TestFreeTextSuggester extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
analyzer.close();
|
||||
lfd.close();
|
||||
}
|
||||
|
||||
// Make sure you can suggest based only on unigram model:
|
||||
|
|
|
@ -50,7 +50,6 @@ public class TestSuggestStopFilter extends BaseTokenStreamTestCase {
|
|||
Tokenizer stream = new MockTokenizer();
|
||||
stream.setReader(new StringReader("go to "));
|
||||
TokenStream filter = new SuggestStopFilter(stream, stopWords);
|
||||
filter = new SuggestStopFilter(stream, stopWords);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] {"go"},
|
||||
new int[] {0},
|
||||
|
@ -69,8 +68,6 @@ public class TestSuggestStopFilter extends BaseTokenStreamTestCase {
|
|||
Tokenizer stream = new MockTokenizer();
|
||||
stream.setReader(new StringReader("go to school"));
|
||||
TokenStream filter = new SuggestStopFilter(stream, stopWords);
|
||||
|
||||
filter = new SuggestStopFilter(stream, stopWords);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] {"go", "school"},
|
||||
new int[] {0, 6},
|
||||
|
@ -89,8 +86,6 @@ public class TestSuggestStopFilter extends BaseTokenStreamTestCase {
|
|||
Tokenizer stream = new MockTokenizer();
|
||||
stream.setReader(new StringReader("go to a the school"));
|
||||
TokenStream filter = new SuggestStopFilter(stream, stopWords);
|
||||
|
||||
filter = new SuggestStopFilter(stream, stopWords);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] { "go", "school" },
|
||||
new int[] {0, 12},
|
||||
|
@ -109,8 +104,6 @@ public class TestSuggestStopFilter extends BaseTokenStreamTestCase {
|
|||
Tokenizer stream = new MockTokenizer();
|
||||
stream.setReader(new StringReader("go to a the"));
|
||||
TokenStream filter = new SuggestStopFilter(stream, stopWords);
|
||||
|
||||
filter = new SuggestStopFilter(stream, stopWords);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] { "go", "the"},
|
||||
new int[] {0, 8},
|
||||
|
@ -129,8 +122,6 @@ public class TestSuggestStopFilter extends BaseTokenStreamTestCase {
|
|||
Tokenizer stream = new MockTokenizer();
|
||||
stream.setReader(new StringReader("go to a the "));
|
||||
TokenStream filter = new SuggestStopFilter(stream, stopWords);
|
||||
|
||||
filter = new SuggestStopFilter(stream, stopWords);
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] { "go"},
|
||||
new int[] {0},
|
||||
|
|
|
@ -761,7 +761,7 @@ public class TestSuggestField extends LuceneTestCase {
|
|||
}
|
||||
assertTrue("at least one of the entries should have the score", matched);
|
||||
}
|
||||
|
||||
lineFileDocs.close();
|
||||
reader.close();
|
||||
iw.close();
|
||||
}
|
||||
|
|
|
@ -550,8 +550,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase {
|
|||
private final class ChangeIndices extends Thread {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
final LineFileDocs docs = new LineFileDocs(random());
|
||||
try (final LineFileDocs docs = new LineFileDocs(random())) {
|
||||
int numDocs = 0;
|
||||
while (System.nanoTime() < endTimeNanos) {
|
||||
final int what = random().nextInt(3);
|
||||
|
|
Loading…
Reference in New Issue