LUCENE-1800: reusable token streams for query parser

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@803664 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2009-08-12 18:53:49 +00:00
parent bd6bb85bb4
commit f79712ce82
3 changed files with 16 additions and 2 deletions

View File

@ -727,6 +727,8 @@ Optimizations
strings, the StringHelper.intern() interface was added with a strings, the StringHelper.intern() interface was added with a
default implementation that uses a lockless cache. default implementation that uses a lockless cache.
(Earwin Burrfoot, yonik) (Earwin Burrfoot, yonik)
13. LUCENE-1800: QueryParser should use reusable TokenStreams. (yonik)
Documentation Documentation

View File

@ -537,7 +537,13 @@ public class QueryParser implements QueryParserConstants {
// Use the analyzer to get all the tokens, and then build a TermQuery, // Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source;
try {
source = analyzer.reusableTokenStream(field, new StringReader(queryText));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(queryText));
}
CachingTokenFilter buffer = new CachingTokenFilter(source); CachingTokenFilter buffer = new CachingTokenFilter(source);
TermAttribute termAtt = null; TermAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null; PositionIncrementAttribute posIncrAtt = null;

View File

@ -562,7 +562,13 @@ public class QueryParser {
// Use the analyzer to get all the tokens, and then build a TermQuery, // Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count // PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText)); TokenStream source;
try {
source = analyzer.reusableTokenStream(field, new StringReader(queryText));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(queryText));
}
CachingTokenFilter buffer = new CachingTokenFilter(source); CachingTokenFilter buffer = new CachingTokenFilter(source);
TermAttribute termAtt = null; TermAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null; PositionIncrementAttribute posIncrAtt = null;