LUCENE-4971: fix NPE in AnalyzingSuggester when there are too many graph expansions

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1495206 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2013-06-20 21:55:08 +00:00
parent 543d33f941
commit b3d70e8f69
3 changed files with 34 additions and 4 deletions

View File

@ -173,6 +173,9 @@ Bug Fixes
when the sum of those shapes contain the query shape but none do individually.
A flag was added to use the original faster algorithm. (David Smiley)
* LUCENE-4971: Fixed NPE in AnalyzingSuggester when there are too many
graph expansions. (Alexey Kudinov via Mike McCandless)
Optimizations
* LUCENE-4936: Improve numeric doc values compression in case all values share

View File

@ -219,7 +219,7 @@ final public class SpecialOperations {
/**
* Returns the set of accepted strings, assuming that at most
* <code>limit</code> strings are accepted. If more than <code>limit</code>
* strings are accepted, null is returned. If <code>limit</code>&lt;0, then
* strings are accepted, the first limit strings found are returned. If <code>limit</code>&lt;0, then
* the limit is infinite.
*/
public static Set<IntsRef> getFiniteStrings(Automaton a, int limit) {
@ -227,11 +227,9 @@ final public class SpecialOperations {
if (a.isSingleton()) {
if (limit > 0) {
strings.add(Util.toUTF32(a.singleton, new IntsRef()));
} else {
return null;
}
} else if (!getFiniteStrings(a.initial, new HashSet<State>(), strings, new IntsRef(), limit)) {
return null;
return strings;
}
return strings;
}

View File

@ -1164,4 +1164,33 @@ public class AnalyzingSuggesterTest extends LuceneTestCase {
assertEquals("[isla de muerta/8, i love lucy/7]", suggester.lookup("i", false, 3).toString());
assertEquals("[i love lucy/7]", suggester.lookup("i ", false, 3).toString());
}
public void testTooManyExpansions() throws Exception {
final Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true);
return new TokenStreamComponents(tokenizer) {
@Override
public TokenStream getTokenStream() {
Token a = new Token("a", 0, 1);
a.setPositionIncrement(1);
Token b = new Token("b", 0, 1);
b.setPositionIncrement(0);
return new CannedTokenStream(new Token[] {a, b});
}
@Override
protected void setReader(final Reader reader) throws IOException {
}
};
}
};
AnalyzingSuggester suggester = new AnalyzingSuggester(a, a, 0, 256, 1);
suggester.build(new TermFreqArrayIterator(new TermFreq[] {new TermFreq("a", 1)}));
assertEquals("[a/1]", suggester.lookup("a", false, 1).toString());
}
}