LUCENE-6657: don't throw NPE when a given segment never saw all terms used in the TAQ

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1689133 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2015-07-04 10:14:27 +00:00
parent 7b2973f408
commit db01548109
2 changed files with 54 additions and 3 deletions

View File

@ -34,7 +34,6 @@ import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.Operations;
@ -386,20 +385,25 @@ public class TermAutomatonQuery extends Query {
// Initialize the enums; null for a given slot means that term didn't appear in this reader // Initialize the enums; null for a given slot means that term didn't appear in this reader
EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()]; EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()];
boolean any = false;
for(Map.Entry<Integer,TermContext> ent : termStates.entrySet()) { for(Map.Entry<Integer,TermContext> ent : termStates.entrySet()) {
TermContext termContext = ent.getValue(); TermContext termContext = ent.getValue();
assert termContext.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termContext.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context); assert termContext.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termContext.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
BytesRef term = idToTerm.get(ent.getKey()); BytesRef term = idToTerm.get(ent.getKey());
TermState state = termContext.get(context.ord); TermState state = termContext.get(context.ord);
if (state != null) { if (state != null) {
TermsEnum termsEnum = context.reader().terms(field).iterator(); TermsEnum termsEnum = context.reader().terms(field).iterator();
termsEnum.seekExact(term, state); termsEnum.seekExact(term, state);
enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS)); enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS));
any = true;
} }
} }
return new TermAutomatonScorer(this, enums, anyTermID, idToTerm, similarity.simScorer(stats, context)); if (any) {
return new TermAutomatonScorer(this, enums, anyTermID, idToTerm, similarity.simScorer(stats, context));
} else {
return null;
}
} }
@Override @Override

View File

@ -738,4 +738,51 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
r.close(); r.close();
dir.close(); dir.close();
} }
public void testTermDoesNotExist() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(newTextField("field", "x y z", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
TokenStream ts = new CannedTokenStream(new Token[] {
token("a", 1, 1),
});
TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
// System.out.println("DOT: " + q.toDot());
assertEquals(0, s.search(q, 1).totalHits);
w.close();
r.close();
dir.close();
}
public void testOneTermDoesNotExist() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(newTextField("field", "x y z", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
TokenStream ts = new CannedTokenStream(new Token[] {
token("a", 1, 1),
token("x", 1, 1),
});
TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
// System.out.println("DOT: " + q.toDot());
assertEquals(0, s.search(q, 1).totalHits);
w.close();
r.close();
dir.close();
}
} }