Caching a MinDocQuery can lead to wrong results. (#25909)
Queries are supposed to be cacheable per segment, yet matches of this query also depend on how many documents exist on previous segments.
This commit is contained in:
parent
876c7e0400
commit
1cd5e3413d
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
|
@ -35,16 +36,26 @@ import java.util.Objects;
|
|||
* to a configured doc ID. */
|
||||
public final class MinDocQuery extends Query {
|
||||
|
||||
// Matching documents depend on the sequence of segments that the index reader
|
||||
// wraps. Yet matches must be cacheable per-segment, so we need to incorporate
|
||||
// the reader id in the identity of the query so that a cache entry may only
|
||||
// be reused if this query is run against the same index reader.
|
||||
private final Object readerId;
|
||||
private final int minDoc;
|
||||
|
||||
/** Sole constructor. */
|
||||
public MinDocQuery(int minDoc) {
|
||||
this(minDoc, null);
|
||||
}
|
||||
|
||||
MinDocQuery(int minDoc, Object readerId) {
|
||||
this.minDoc = minDoc;
|
||||
this.readerId = readerId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(classHash(), minDoc);
|
||||
return Objects.hash(classHash(), minDoc, readerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -53,11 +64,24 @@ public final class MinDocQuery extends Query {
|
|||
return false;
|
||||
}
|
||||
MinDocQuery that = (MinDocQuery) obj;
|
||||
return minDoc == that.minDoc;
|
||||
return minDoc == that.minDoc && Objects.equals(readerId, that.readerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (Objects.equals(reader.getContext().id(), readerId) == false) {
|
||||
return new MinDocQuery(minDoc, reader.getContext().id());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
if (readerId == null) {
|
||||
throw new IllegalStateException("Rewrite first");
|
||||
} else if (Objects.equals(searcher.getIndexReader().getContext().id(), readerId) == false) {
|
||||
throw new IllegalStateException("Executing against a different reader than the query has been rewritten against");
|
||||
}
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
|
|
|
@ -21,8 +21,10 @@ package org.apache.lucene.queries;
|
|||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryUtils;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -38,6 +40,19 @@ public class MinDocQueryTests extends ESTestCase {
|
|||
QueryUtils.check(query1);
|
||||
QueryUtils.checkEqual(query1, query2);
|
||||
QueryUtils.checkUnequal(query1, query3);
|
||||
|
||||
MinDocQuery query4 = new MinDocQuery(42, new Object());
|
||||
MinDocQuery query5 = new MinDocQuery(42, new Object());
|
||||
QueryUtils.checkUnequal(query4, query5);
|
||||
}
|
||||
|
||||
public void testRewrite() throws Exception {
|
||||
IndexReader reader = new MultiReader();
|
||||
MinDocQuery query = new MinDocQuery(42);
|
||||
Query rewritten = query.rewrite(reader);
|
||||
QueryUtils.checkUnequal(query, rewritten);
|
||||
Query rewritten2 = rewritten.rewrite(reader);
|
||||
assertSame(rewritten, rewritten2);
|
||||
}
|
||||
|
||||
public void testRandom() throws IOException {
|
||||
|
|
Loading…
Reference in New Issue