mirror of https://github.com/apache/lucene.git
LUCENE-4918: Highlighter closes the given IndexReader if QueryScorer is used with an external IndexReader.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1465961 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ccf99d3e6c
commit
75c03eb548
|
@ -232,6 +232,9 @@ Bug Fixes
|
||||||
* LUCENE-4913: FacetResultNode.ordinal was always 0 when all children
|
* LUCENE-4913: FacetResultNode.ordinal was always 0 when all children
|
||||||
are returned. (Mike McCandless)
|
are returned. (Mike McCandless)
|
||||||
|
|
||||||
|
* LUCENE-4918: Highlighter closes the given IndexReader if QueryScorer
|
||||||
|
is used with an external IndexReader. (Simon Willnauer, Sirvan Yahyaei)
|
||||||
|
|
||||||
Documentation
|
Documentation
|
||||||
|
|
||||||
* LUCENE-4841: Added example SimpleSortedSetFacetsExample to show how
|
* LUCENE-4841: Added example SimpleSortedSetFacetsExample to show how
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class WeightedSpanTermExtractor {
|
||||||
private boolean cachedTokenStream;
|
private boolean cachedTokenStream;
|
||||||
private boolean wrapToCaching = true;
|
private boolean wrapToCaching = true;
|
||||||
private int maxDocCharsToAnalyze;
|
private int maxDocCharsToAnalyze;
|
||||||
private AtomicReader reader = null;
|
private AtomicReader internalReader = null;
|
||||||
|
|
||||||
|
|
||||||
public WeightedSpanTermExtractor() {
|
public WeightedSpanTermExtractor() {
|
||||||
|
@ -350,7 +350,7 @@ public class WeightedSpanTermExtractor {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected AtomicReaderContext getLeafContext() throws IOException {
|
protected AtomicReaderContext getLeafContext() throws IOException {
|
||||||
if (reader == null) {
|
if (internalReader == null) {
|
||||||
if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
|
if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
|
||||||
assert !cachedTokenStream;
|
assert !cachedTokenStream;
|
||||||
tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
|
tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
|
||||||
|
@ -361,9 +361,9 @@ public class WeightedSpanTermExtractor {
|
||||||
tokenStream.reset();
|
tokenStream.reset();
|
||||||
final IndexSearcher searcher = indexer.createSearcher();
|
final IndexSearcher searcher = indexer.createSearcher();
|
||||||
// MEM index has only atomic ctx
|
// MEM index has only atomic ctx
|
||||||
reader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
|
internalReader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
|
||||||
}
|
}
|
||||||
return reader.getContext();
|
return internalReader.getContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -468,7 +468,7 @@ public class WeightedSpanTermExtractor {
|
||||||
try {
|
try {
|
||||||
extract(query, terms);
|
extract(query, terms);
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.close(reader);
|
IOUtils.close(internalReader);
|
||||||
}
|
}
|
||||||
|
|
||||||
return terms;
|
return terms;
|
||||||
|
@ -516,7 +516,7 @@ public class WeightedSpanTermExtractor {
|
||||||
weightedSpanTerm.weight *= idf;
|
weightedSpanTerm.weight *= idf;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.close(reader);
|
IOUtils.close(internalReader);
|
||||||
}
|
}
|
||||||
|
|
||||||
return terms;
|
return terms;
|
||||||
|
|
|
@ -459,6 +459,31 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
||||||
numHighlights == 5);
|
numHighlights == 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testExternalReader() throws Exception {
|
||||||
|
query = new RegexpQuery(new Term(FIELD_NAME, "ken.*"));
|
||||||
|
searcher = new IndexSearcher(reader);
|
||||||
|
hits = searcher.search(query, 100);
|
||||||
|
int maxNumFragmentsRequired = 2;
|
||||||
|
|
||||||
|
QueryScorer scorer = new QueryScorer(query, reader, FIELD_NAME);
|
||||||
|
Highlighter highlighter = new Highlighter(this, scorer);
|
||||||
|
|
||||||
|
for (int i = 0; i < hits.totalHits; i++) {
|
||||||
|
String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
|
||||||
|
TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
|
||||||
|
|
||||||
|
highlighter.setTextFragmenter(new SimpleFragmenter(40));
|
||||||
|
|
||||||
|
String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
|
||||||
|
"...");
|
||||||
|
if (VERBOSE) System.out.println("\t" + result);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertTrue(reader.docFreq(new Term(FIELD_NAME, "hello")) > 0);
|
||||||
|
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
|
||||||
|
numHighlights == 5);
|
||||||
|
}
|
||||||
|
|
||||||
public void testNumericRangeQuery() throws Exception {
|
public void testNumericRangeQuery() throws Exception {
|
||||||
// doesn't currently highlight, but make sure it doesn't cause exception either
|
// doesn't currently highlight, but make sure it doesn't cause exception either
|
||||||
query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
|
query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
|
||||||
|
|
Loading…
Reference in New Issue