LUCENE-3455: Moved remaining Analysis consumers over to using reusableTokenStream

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1176191 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Christopher John Male 2011-09-27 04:07:38 +00:00
parent 33893d0e88
commit fe3982c746
7 changed files with 72 additions and 48 deletions

View File

@ -58,6 +58,18 @@ public class ParseException extends Exception {
specialConstructor = false;
}
/**
* Creates a new ParseException which is wrapping another Throwable with an
* additional message
*
* @param message Message for the Exception
* @param throwable Wrapped Throwable
*/
public ParseException(String message, Throwable throwable) {
super(message, throwable);
specialConstructor = false;
}
/**
* This variable determines which constructor was used to create
* this object and thereby affects the semantics of the

View File

@ -477,28 +477,25 @@ public abstract class QueryParserBase {
source = analyzer.reusableTokenStream(field, new StringReader(queryText));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(queryText));
throw new ParseException("Unable to initialize TokenStream to analyze query text", e);
}
CachingTokenFilter buffer = new CachingTokenFilter(source);
TermToBytesRefAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
boolean success = false;
try {
buffer.reset();
success = true;
} catch (IOException e) {
// success==false if we hit an exception
throw new ParseException("Unable to initialize TokenStream to analyze query text", e);
}
if (success) {
if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
}
if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
}
}
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
@ -529,7 +526,7 @@ public abstract class QueryParserBase {
source.close();
}
catch (IOException e) {
// ignore
throw new ParseException("Cannot close TokenStream analyzing query text", e);
}
BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef();
@ -789,7 +786,7 @@ public abstract class QueryParserBase {
source = analyzer.reusableTokenStream(field, new StringReader(part));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(part));
throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);
}
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
@ -808,7 +805,9 @@ public abstract class QueryParserBase {
try {
source.end();
source.close();
} catch (IOException ignored) {}
} catch (IOException e) {
throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e);
}
return new BytesRef(bytes);
}

View File

@ -195,7 +195,7 @@ public class ICUCollationField extends FieldType {
source = analyzer.reusableTokenStream(field, new StringReader(part));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(part));
throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);
}
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
@ -212,8 +212,11 @@ public class ICUCollationField extends FieldType {
}
try {
source.end();
source.close();
} catch (IOException ignored) {}
} catch (IOException e) {
throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e);
}
return new BytesRef(bytes);
}

View File

@ -140,20 +140,25 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase {
* @param analyzer The analyzer to use.
*/
protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) {
try {
final Set<BytesRef> tokens = new HashSet<BytesRef>();
final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query));
final TokenStream tokenStream = analyzer.reusableTokenStream("", new StringReader(query));
final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
final BytesRef bytes = bytesAtt.getBytesRef();
try {
tokenStream.reset();
while (tokenStream.incrementToken()) {
bytesAtt.fillBytesRef();
tokens.add(new BytesRef(bytes));
}
tokenStream.end();
tokenStream.close();
return tokens;
} catch (IOException ioe) {
throw new RuntimeException("Error occured while iterating over tokenstream", ioe);
}
return tokens;
}
/**

View File

@ -217,7 +217,7 @@ public class CollationField extends FieldType {
source = analyzer.reusableTokenStream(field, new StringReader(part));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(part));
throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);
}
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
@ -234,8 +234,11 @@ public class CollationField extends FieldType {
}
try {
source.end();
source.close();
} catch (IOException ignored) {}
} catch (IOException e) {
throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e);
}
return new BytesRef(bytes);
}

View File

@ -112,28 +112,25 @@ public class TextField extends FieldType {
source = analyzer.reusableTokenStream(field, new StringReader(queryText));
source.reset();
} catch (IOException e) {
source = analyzer.tokenStream(field, new StringReader(queryText));
throw new RuntimeException("Unable to initialize TokenStream to analyze query text", e);
}
CachingTokenFilter buffer = new CachingTokenFilter(source);
CharTermAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
boolean success = false;
try {
buffer.reset();
success = true;
} catch (IOException e) {
// success==false if we hit an exception
throw new RuntimeException("Unable to initialize TokenStream to analyze query text", e);
}
if (success) {
if (buffer.hasAttribute(CharTermAttribute.class)) {
termAtt = buffer.getAttribute(CharTermAttribute.class);
}
if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
}
}
int positionCount = 0;
boolean severalTokensAtSamePosition = false;

View File

@ -37,12 +37,14 @@ import java.io.IOException;
*
* @since solr 1.3
**/
class SimpleQueryConverter extends SpellingQueryConverter{
class SimpleQueryConverter extends SpellingQueryConverter {
@Override
public Collection<Token> convert(String origQuery) {
try {
Collection<Token> result = new HashSet<Token>();
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_40);
TokenStream ts = analyzer.tokenStream("", new StringReader(origQuery));
TokenStream ts = analyzer.reusableTokenStream("", new StringReader(origQuery));
// TODO: support custom attributes
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
@ -51,9 +53,9 @@ class SimpleQueryConverter extends SpellingQueryConverter{
PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
try {
ts.reset();
while (ts.incrementToken()){
while (ts.incrementToken()) {
Token tok = new Token();
tok.copyBuffer(termAtt.buffer(), 0, termAtt.length());
tok.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
@ -63,9 +65,12 @@ class SimpleQueryConverter extends SpellingQueryConverter{
tok.setType(typeAtt.type());
result.add(tok);
}
ts.end();
ts.close();
return result;
} catch (IOException e) {
throw new RuntimeException(e);
}
return result;
}
}